diff --git a/.gitignore b/.gitignore index a187011..b5474f1 100644 --- a/.gitignore +++ b/.gitignore @@ -6,14 +6,14 @@ datadir/ broker/broker client/client -server-webrtc/server-webrtc server/server -proxy-go/proxy-go +proxy/proxy +probetest/probetest snowflake.log -proxy/test -proxy/build -proxy/node_modules -proxy/spec/support -proxy/webext/snowflake.js ignore/ -npm-debug.log + +# from running the vagrant setup +/.vagrant/ +/sdk-tools-linux-*.zip* +/android-ndk-* +/tools/ \ No newline at end of file diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b7fd956..3d433c8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,29 +1,408 @@ -image: golang:1.10-stretch +variables: + DOCKER_REGISTRY_URL: docker.io -cache: - paths: - - .gradle/wrapper - - .gradle/caches +stages: + - test + - deploy + - container-build + - container-mirror -before_script: - # Create symbolic links under $GOPATH, this is needed for local build - - export src=$GOPATH/src - - mkdir -p $src/git.torproject.org/pluggable-transports - - mkdir -p $src/gitlab.com/$CI_PROJECT_NAMESPACE - - ln -s $CI_PROJECT_DIR $src/git.torproject.org/pluggable-transports/snowflake.git - - ln -s $CI_PROJECT_DIR $src/gitlab.com/$CI_PROJECT_PATH +variables: + DEBIAN_FRONTEND: noninteractive + DEBIAN_OLD_STABLE: bullseye + DEBIAN_STABLE: bookworm + REPRODUCIBLE_FLAGS: -trimpath -ldflags=-buildid= + # Don't fail pulling images if dependency_proxy.yml is not included + DOCKER_REGISTRY_URL: "docker.io" -build: - script: - - apt-get -qy update - - apt-get -qy install libx11-dev - - cd $src/gitlab.com/$CI_PROJECT_PATH/client - - go get ./... - - go build ./... - - go vet ./... - - go test -v -race ./... -after_script: +# set up apt for automated use +.apt-template: &apt-template +- export LC_ALL=C.UTF-8 +- export DEBIAN_FRONTEND=noninteractive +- ln -fs /usr/share/zoneinfo/Etc/UTC /etc/localtime +- echo 'quiet "1";' + 'APT::Install-Recommends "0";' + 'APT::Install-Suggests "0";' + 'APT::Acquire::Retries "20";' + 'APT::Get::Assume-Yes "true";' + 'Dpkg::Use-Pty "0";' + > /etc/apt/apt.conf.d/99gitlab +- apt-get update +- apt-get dist-upgrade + + +# Set things up to use the OS-native packages for Go. Anything that +# is downloaded by go during the `go fmt` stage is not coming from the +# Debian/Ubuntu repo. So those would need to be packaged for this to +# make it into Debian and/or Ubuntu. +.debian-native-template: &debian-native-template + variables: + GOPATH: /usr/share/gocode + before_script: + - apt-get update + - apt-get -qy install --no-install-recommends + build-essential + ca-certificates + git + golang + golang-github-cheekybits-genny-dev + golang-github-jtolds-gls-dev + golang-github-klauspost-reedsolomon-dev + golang-github-lucas-clemente-quic-go-dev + golang-github-smartystreets-assertions-dev + golang-github-smartystreets-goconvey-dev + golang-github-tjfoc-gmsm-dev + golang-github-xtaci-kcp-dev + golang-github-xtaci-smux-dev + golang-golang-x-crypto-dev + golang-golang-x-net-dev + golang-goptlib-dev + golang-golang-x-sys-dev + golang-golang-x-text-dev + golang-golang-x-xerrors-dev + +# use Go installed as part of the official, Debian-based Docker images +.golang-docker-debian-template: &golang-docker-debian-template + before_script: + - apt-get update + - apt-get -qy install --no-install-recommends + ca-certificates + git + +.go-test: &go-test + - gofmt -d . + - test -z "$(go fmt ./...)" + - go vet ./... + - go test -v -race ./... + + - cd $CI_PROJECT_DIR/client/ + - go get + - go build $REPRODUCIBLE_FLAGS + +.test-template: &test-template + artifacts: + name: "${CI_PROJECT_PATH}_${CI_JOB_STAGE}_${CI_JOB_ID}_${CI_COMMIT_REF_NAME}_${CI_COMMIT_SHA}" + paths: + - client/*.aar + - client/*.jar + - client/client + expire_in: 1 week + when: on_success + after_script: + - echo "Download debug artifacts from https://gitlab.com/${CI_PROJECT_PATH}/-/jobs" # this file changes every time but should not be cached - rm -f $GRADLE_USER_HOME/caches/modules-2/modules-2.lock - - rm -fr $GRADLE_USER_HOME/caches/*/plugin-resolution/ + - rm -rf $GRADLE_USER_HOME/caches/*/plugin-resolution/ + +# -- jobs ------------------------------------------------------------ + +android: + image: ${DOCKER_REGISTRY_URL}/golang:1.24-$DEBIAN_OLD_STABLE + variables: + ANDROID_HOME: /usr/lib/android-sdk + LANG: C.UTF-8 + cache: + paths: + - .gradle/wrapper + - .gradle/caches + <<: *test-template + before_script: + - *apt-template + - apt-get install + android-sdk-platform-23 + android-sdk-platform-tools + build-essential + curl + default-jdk-headless + git + gnupg + unzip + wget + ca-certificates + + - ndk=android-ndk-r21e-linux-x86_64.zip + - wget --continue --no-verbose https://dl.google.com/android/repository/$ndk + - echo "ad7ce5467e18d40050dc51b8e7affc3e635c85bd8c59be62de32352328ed467e $ndk" > $ndk.sha256 + - sha256sum -c $ndk.sha256 + - unzip -q $ndk + - rm ${ndk}* + - mv android-ndk-* $ANDROID_HOME/ndk-bundle/ + + - chmod -R a+rX $ANDROID_HOME + script: + - *go-test + - export GRADLE_USER_HOME=$CI_PROJECT_DIR/.gradle + - go version + - go env + + - go get golang.org/x/mobile/cmd/gomobile + - go get golang.org/x/mobile/cmd/gobind + - go install golang.org/x/mobile/cmd/gobind + - go install golang.org/x/mobile/cmd/gomobile + - gomobile init + + - cd $CI_PROJECT_DIR/client + # gomobile builds a shared library not a CLI executable + - sed -i 's,^package main$,package snowflakeclient,' *.go + - go get golang.org/x/mobile/bind + - gomobile bind -v -target=android $REPRODUCIBLE_FLAGS . + +go-1.23: + image: ${DOCKER_REGISTRY_URL}/golang:1.23-$DEBIAN_STABLE + <<: *golang-docker-debian-template + <<: *test-template + script: + - *go-test + +go-1.24: + image: ${DOCKER_REGISTRY_URL}/golang:1.24-$DEBIAN_STABLE + <<: *golang-docker-debian-template + <<: *test-template + script: + - *go-test + +debian-testing: + image: containers.torproject.org/tpo/tpa/base-images/debian:testing + <<: *debian-native-template + <<: *test-template + script: + - *go-test + +shadow-integration: + image: ${DOCKER_REGISTRY_URL}/golang:1.23-$DEBIAN_STABLE + variables: + SHADOW_VERSION: "27d0bcf2cf1c7f0d403b6ad3efd575e45ae93126" + TGEN_VERSION: "v1.1.2" + cache: + - key: sf-integration-shadow-$SHADOW_VERSION + paths: + - opt/shadow + - key: sf-integration-tgen-$TGEN_VERSION + paths: + - opt/tgen + artifacts: + paths: + - shadow.data.tar.gz + when: on_failure + tags: + - amd64 + - tpa + script: + - apt-get update + - apt-get install -y git tor libglib2.0-0 libigraph3 + - mkdir -p ~/.local/bin + - mkdir -p ~/.local/src + - export PATH=$PATH:$CI_PROJECT_DIR/opt/shadow/bin/:$CI_PROJECT_DIR/opt/tgen/bin/ + + # Install shadow and tgen + - pushd ~/.local/src + - | + if [ ! -f $CI_PROJECT_DIR/opt/shadow/bin/shadow ] + then + echo "The required version of shadow was not cached, building from source" + git clone --shallow-since=2021-08-01 https://github.com/shadow/shadow.git + pushd shadow/ + git checkout $SHADOW_VERSION + CONTAINER=debian:stable-slim ci/container_scripts/install_deps.sh + CC=gcc CONTAINER=debian:stable-slim ci/container_scripts/install_extra_deps.sh + export PATH="$HOME/.cargo/bin:${PATH}" + ./setup build --jobs $(nproc) --prefix $CI_PROJECT_DIR/opt/shadow + ./setup install + popd + fi + - | + if [ ! -f $CI_PROJECT_DIR/opt/tgen/bin/tgen ] + then + echo "The required version of tgen was not cached, building from source" + git clone --branch $TGEN_VERSION --depth 1 https://github.com/shadow/tgen.git + pushd tgen/ + apt-get install -y cmake libglib2.0-dev libigraph-dev + mkdir build && cd build + cmake .. -DCMAKE_INSTALL_PREFIX=$CI_PROJECT_DIR/opt/tgen + make + make install + popd + fi + install $CI_PROJECT_DIR/opt/tgen/bin/tgen ~/.local/bin/tgen + - popd + + # Apply snowflake patch(es) + - | + git clone --depth 1 https://github.com/cohosh/shadow-snowflake-minimal + git am -3 shadow-snowflake-minimal/*.patch + + # Install snowflake binaries to .local folder + - | + for app in "proxy" "client" "server" "broker" "probetest"; do + pushd $app + go build + install $app ~/.local/bin/snowflake-$app + popd + done + + # Install stun server + - GOBIN=~/.local/bin go install github.com/gortc/stund@latest + + # Run a minimal snowflake shadow experiment + - pushd shadow-snowflake-minimal/ + - shadow --log-level=debug --model-unblocked-syscall-latency=true snowflake-minimal.yaml > shadow.log + + # Check to make sure streams succeeded + - | + if [ $(grep -c "stream-success" shadow.data/hosts/snowflakeclient/tgen.*.stdout) = 10 ] + then + echo "All streams in shadow completed successfully" + else + echo "Shadow simulation failed" + exit 1 + fi + after_script: + - tar -czvf $CI_PROJECT_DIR/shadow.data.tar.gz shadow-snowflake-minimal/shadow.data/ shadow-snowflake-minimal/shadow.log + +generate_tarball: + stage: deploy + image: ${DOCKER_REGISTRY_URL}/golang:1.22-$DEBIAN_STABLE + rules: + - if: $CI_COMMIT_TAG + script: + - go mod vendor + - tar czf ${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz --transform "s,^,${CI_PROJECT_NAME}-${CI_COMMIT_TAG}/," * + after_script: + - echo TAR_JOB_ID=$CI_JOB_ID >> generate_tarball.env + artifacts: + paths: + - ${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz + reports: + dotenv: generate_tarball.env + +release-job: + stage: deploy + image: registry.gitlab.com/gitlab-org/release-cli:latest + rules: + - if: $CI_COMMIT_TAG + needs: + - job: generate_tarball + artifacts: true + script: + - echo "running release_job" + release: + name: 'Release $CI_COMMIT_TAG' + description: 'Created using the release-cli' + tag_name: '$CI_COMMIT_TAG' + ref: '$CI_COMMIT_TAG' + assets: + links: + - name: '${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz' + url: '${CI_PROJECT_URL}/-/jobs/${TAR_JOB_ID}/artifacts/file/${CI_PROJECT_NAME}-${CI_COMMIT_TAG}.tar.gz' + +# Build the container only if the commit is to main, or it is a tag. +# If the commit is to main, then the docker image tag should be set to `nightly`. +# If it is a tag, then the docker image tag should be set to the tag name. +build-container: + variables: + TAG: $CI_COMMIT_TAG # Will not be set on a non-tag build, will be set later + stage: container-build + parallel: + matrix: + - ARCH: amd64 + - ARCH: arm64 + tags: + - $ARCH + image: + name: gcr.io/kaniko-project/executor:debug + entrypoint: [""] + script: + - if [ $CI_COMMIT_REF_NAME == "main" ]; then export TAG='nightly'; fi + - >- + /kaniko/executor + --context "${CI_PROJECT_DIR}" + --dockerfile "${CI_PROJECT_DIR}/Dockerfile" + --destination "${CI_REGISTRY_IMAGE}:${TAG}_${ARCH}" + rules: + - if: $CI_COMMIT_REF_NAME == "main" + - if: $CI_COMMIT_TAG + +merge-manifests: + variables: + TAG: $CI_COMMIT_TAG + stage: container-build + needs: + - job: build-container + artifacts: false + image: + name: ${DOCKER_REGISTRY_URL}/mplatform/manifest-tool:alpine + entrypoint: [""] + script: + - if [ $CI_COMMIT_REF_NAME == "main" ]; then export TAG='nightly'; fi + - >- + manifest-tool + --username="${CI_REGISTRY_USER}" + --password="${CI_REGISTRY_PASSWORD}" + push from-args + --platforms linux/amd64,linux/arm64 + --template "${CI_REGISTRY_IMAGE}:${TAG}_ARCH" + --target "${CI_REGISTRY_IMAGE}:${TAG}" + rules: + - if: $CI_COMMIT_REF_NAME == "main" + when: always + - if: $CI_COMMIT_TAG + when: always + +# If this is a tag, then we want to additionally tag the image as `latest` +tag-container-release: + stage: container-build + needs: + - job: merge-manifests + artifacts: false + image: + name: gcr.io/go-containerregistry/crane:debug + entrypoint: [""] + allow_failure: false + variables: + CI_REGISTRY: $CI_REGISTRY + IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_TAG + RELEASE_TAG: $CI_REGISTRY_IMAGE:latest + script: + - echo "Tagging docker image with stable tag with crane" + - echo -n "$CI_JOB_TOKEN" | crane auth login $CI_REGISTRY -u gitlab-ci-token --password-stdin + - crane cp $IMAGE_TAG $RELEASE_TAG + rules: + - if: $CI_COMMIT_TAG + when: always + +clean-image-tags: + stage: container-build + needs: + - job: merge-manifests + artifacts: false + image: containers.torproject.org/tpo/tpa/base-images/debian:bookworm + before_script: + - *apt-template + - apt-get install -y jq curl + script: + - "REGISTRY_ID=$(curl --silent --request GET --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \"https://gitlab.torproject.org/api/v4/projects/${CI_PROJECT_ID}/registry/repositories\" | jq '.[].id')" + - "curl --request DELETE --data \"name_regex_delete=(latest|${CI_COMMIT_TAG})_.*\" --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \"https://gitlab.torproject.org/api/v4/projects/${CI_PROJECT_ID}/registry/repositories/${REGISTRY_ID}/tags\"" + rules: + - if: $CI_COMMIT_REF_NAME == "main" + when: always + - if: $CI_COMMIT_TAG + when: always + +mirror-image-to-dockerhub: + stage: container-mirror + variables: + DOCKERHUB_MIRROR_REPOURL: $DOCKERHUB_MIRROR_REPOURL + DOCKERHUB_USERNAME: $DOCKERHUB_MIRROR_USERNAME + DOCKERHUB_PASSWORD: $DOCKERHUB_MIRROR_PASSWORD + image: + name: gcr.io/go-containerregistry/crane:debug + entrypoint: [""] + rules: + - if: $CI_COMMIT_REF_NAME == "main" + when: always + - if: $CI_COMMIT_TAG + when: always + script: + - echo "$DOCKERHUB_PASSWORD" | crane auth login docker.io -u $DOCKERHUB_MIRROR_USERNAME --password-stdin + - crane cp -a containers.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake $DOCKERHUB_MIRROR_REPOURL diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..e69de29 diff --git a/.travis.yml b/.travis.yml index 072f073..a2c8880 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,42 +2,12 @@ language: go dist: xenial -go_import_path: git.torproject.org/pluggable-transports/snowflake.git - -addons: - apt: - sources: - - ubuntu-toolchain-r-test - packages: - - g++-5 - - gcc-5 +go_import_path: git.torproject.org/pluggable-transports/snowflake.git/v2 go: - - 1.10.x - -env: - - TRAVIS_NODE_VERSION="8" CC="gcc-5" CXX="g++-5" - -before_install: - - nvm install $TRAVIS_NODE_VERSION - -install: - - go get -u github.com/smartystreets/goconvey - - go get -u github.com/keroserene/go-webrtc - - go get -u github.com/dchest/uniuri - - go get -u git.torproject.org/pluggable-transports/goptlib.git - - go get -u git.torproject.org/pluggable-transports/websocket.git/websocket - - go get -u google.golang.org/appengine - - go get -u golang.org/x/crypto/acme/autocert - - go get -u golang.org/x/net/http2 - - pushd proxy - - npm install - - popd + - 1.13.x script: - test -z "$(go fmt ./...)" - go vet ./... - go test -v -race ./... - - cd proxy - - npm run lint - - npm test diff --git a/ChangeLog b/ChangeLog new file mode 100644 index 0000000..5de42c7 --- /dev/null +++ b/ChangeLog @@ -0,0 +1,274 @@ +Changes in version v2.11.0 - 2025-03-18 +- Fix data race warnings for tokens_t +- Fix race condition in proxy connection count stats +- Make NATPolicy thread-safe +- Fix race conditions with error scope +- Fix race condition with proxy isClosing variable +- Issue 40454: Update broker metrics to count matches, denials, and timeouts +- Add proxy event and metrics for failed connections +- Issue 40377: Create CI artifact if shadow fails +- Issue 40438: Copy base client config for each SOCKS connection +- Fix minor data race in Snowflake broker metrics +- Issue 40363: Process and read broker SQS messages more quickly +- Issue 40419: delay before calling dc.Close() to improve NAT test on proxy +- Add country stats to proxy prometheus metrics +- Issue 40381: Avoid snowflake client dependency in proxy +- Issue 40446: Lower broker ClientTimeout to 5 seconds in line with CDN77 defaults +- Refactor out utls library into ptutil/utls +- Issue 40414: Use /etc/localtime for CI +- Issue 40440: Add LE self-signed ISRG Root X1 to cert pool +- Proxy refactor to simplify tokens.ret() on error +- Clarify ephemeral-ports-range proxy option +- Issue 40417: Fixes and updates to CI containers +- Issue 40178: Handle unknown client type better +- Issue 40304: Update STUN server list +- Issue 40210: Remove proxy log when offer is nil +- Issue 40413: Log EventOnCurrentNATTypeDetermined for proxy +- Use named return for some functions to improve readability +- Issue 40271: Use pion SetIPFilter rather than our own StripLocalAddress +- Issue 40413: Suppress logs of proxy events by default +- Add IsLinkLocalUnicast in IsLocal +- Fix comments +- Bump versions of dependencies + +Changes in version v2.10.1 - 2024-11-11 +- Issue 40406: Update version string + +Changes in version v2.10.0 - 2024-11-07 +- Issue 40402: Add proxy event for when client has connected +- Issue 40405: Prevent panic for duplicate SnowflakeConn.Close() calls +- Enable local time for proxy logging +- Have proxy summary statistics log average transfer rate +- Issue 40210: Remove duplicate poll interval loop in proxy +- Issue 40371: Prevent broker and proxy from rejecting clients without ICE candidates +- Issue 40392: Allow the proxy and probetest to set multiple STUN URLs +- Issue 40387: Fix error in probetest NAT check +- Fix proxy panic on invalid relayURL +- Set empty pattern if broker bridge-list is empty +- Improve documentation of Ephemeral[Min,Max]Port +- Fix resource leak and NAT check in probetest +- Fix memory leak from failed NAT check +- Improve NAT check logging +- Issue 40230: Send answer even if ICE gathering is not complete +- Improve broker error message on unknown bridge fingerprint +- Don't proxy private IP addresses +- Only accept ws:// and wss:// relay addresses +- Issue 40373: Add cli flag and SnowflakeProxy field to modify proxy poll interval +- Use %w not $v in fmt.Errorf +- Updates to documentation +- Adjust copy buffer size to improve proxy performance +- Improve descriptions of cli flags +- Cosmetic changes for code readability +- Issue 40367: Deduplicate prometheus metrics names +- Report the version of snowflake to the tor process +- Issue 40365: Indicate whether the repo was modified in the version string +- Simplify NAT checking logic +- Issue 40354: Use ptutil library for safelog and prometheus metrics +- Add cli flag to set a listen address for proxy prometheus metrics +- Issue 40345: Integrate docker image with release process +- Bump versions of dependencies + +Changes in version v2.9.2 - 2024-03-18 +- Issue 40288: Add integration testing with Shadow +- Issue 40345: Automatically build and push containers to our registry +- Issue 40339: Fix client ID reuse bug in SQS rendezvous +- Issue 40341: Modify SQS rendezvous arguments to use b64 encoded parameters +- Issue 40330: Add new metrics at the broker for per-country rendezvous stats +- Issue 40345: Update docker container tags +- Bump versions of dependencies + +Changes in version v2.9.1 - 2024-02-27 +- Issue 40335: Fix release job +- Change deprecated io/ioutil package to io package +- Bump versions of dependencies + +Changes in version v2.9.0 - 2024-02-05 +- Issue 40285: Add vcs revision to version string +- Issue 40294: Update recommended torrc options in client README +- Issue 40306: Scrub space-separated IP addresses +- Add proxy commandline option for probe server URL +- Use SetNet setting in probest to ignore net.Interfaces error +- Add probetest commandline option for STUN URL +- Issue 26151: Implement SQS rendezvous in client and broker +- Add broker metrics to track rendezvous method +- Cosmetic code quality fixes +- Bump versions of dependencies + +Changes in version v2.8.1 - 2023-12-21 +- Issue 40276: Reduce allocations in encapsulation.ReadData +- Issue 40310: Remove excessive logging for closed proxy connections +- Issue 40278: Add network fix for old version of android to proxy +- Bump versions of dependencies + +Changes in version v2.8.0 - 2023-11-20 +- Issue 40069: Add outbound proxy support +- Issue 40301: Fix for a bug in domain fronting configurations +- Issue 40302: Remove throughput summary from proxy logger +- Issue 40302: Change proxy stats logging to only log stats for traffic that occurred in the summary interval +- Update renovate bot configuration to use Go 1.21 +- Bump versions of dependencies + +Changes in version v2.7.0 - 2023-10-16 +7142fa3 fix(proxy): Correctly close connection pipe when dealing with error +6393af6 Remove proxy churn measurements from broker. +a615e8b fix(proxy): remove _potential_ deadlock +d434549 Maintain backward compatability with old clients +9fdfb3d Randomly select front domain from comma-separated list +5cdf52c Update dependencies +1559963 chore(deps): update module github.com/xtaci/kcp-go/v5 to v5.6.3 +60e66be Remove Golang 1.20 from CI Testing +1d069ca Update CI targets to test android from golang 1.21 +3a050c6 Use ShouldBeNil to check for nil values +e45e8e5 chore(deps): update module github.com/smartystreets/goconvey to v1.8.1 +f47ca18 chore(deps): update module gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib to v1.5.0 +106da49 chore(deps): update module github.com/pion/webrtc/v3 to v3.2.20 +2844ac6 Update CI targets to include only Go 1.20 and 1.21 +f4e1ab9 chore(deps): update module golang.org/x/net to v0.15.0 +caaff70 Update module golang.org/x/sys to v0.12.0 + +Changes in version v2.6.1 - 2023-09-11 +- a3bfc28 Update module golang.org/x/crypto to v0.12.0 +- e37e15a Update golang Docker tag to v1.21 +- b632c7d Workaround for shadow in lieu of AF_NETLINK support +- 0cb2975 Update module golang.org/x/net to v0.13.0 [SECURITY] +- f73fe6e Keep the 'v' from the tag on the released .tar.gz +- 8104732 Change DefaultRelayURL back to wss://snowflake.torproject.net/. +- d932cb2 feat: add option to expose the stats by using metrics +- af73ab7 Add renovate config +- aaeab3f Update dependencies +- 58c3121 Close temporary UDPSession in TestQueuePacketConnWriteToKCP. +- 80980a3 Fix a comment left over from turbotunnel-quic. +- 08d1c6d Bump minimum required version of go + +Changes in version v2.6.0 - 2023-06-19 +- Issue 40243: Implement datachannel flow control at proxy +- Issue 40087: Append Let's Encrypt ISRG Root X1 to cert pool +- Issue 40198: Use IP_BIND_ADDRESS_NO_PORT when dialing the ORPort on linux +- Move from gitweb to gitlab +- Add warning log at broker when proxy does not connect with client +- Fix unit tests after SDP validation +- Soften non-critical log from error to warning +- Issue 40231: Validate SDP offers and answers +- Add scanner error check to ClusterCounter.Count +- Fix server benchmark tests +- Issue 40260: Use a sync.Pool to reuse QueuePacketConn buffers +- Issue 40043: Restore ListenAndServe error in server +- Update pion webrtc library versions +- Issue 40108: Add outbound address config option to proxy +- Issue 40260: Fix a data race in the Snowflake server +- Issue 40216: Add utls-imitate, utls-nosni documentation to the README +- Fix up/down traffic stats in standalone proxy +- Issue 40226: Filter out ICE servers that are not STUN +- Issue 40226: Update README to reflect the type of ICE servers we support +- Issue 40226: Parse ICE servers using the pion/ice library function +- Bring client torrc up to date with Tor Browser + +Changes in version v2.5.1 - 2023-01-18 +- Issue 40249: Fix issue with Skip Hello Verify patch + +Changes in version v2.5.0 - 2023-01-18 +- Issue 40249: Apply Skip Hello Verify Migration + +Changes in version v2.4.3 - 2023-01-16 +- Fix version number in version.go + +Changes in version v2.4.2 - 2023-01-13 +- Issue 40208: Enhance help info for capacity flag +- Issue 40232: Update README and fix help output +- Issue 40173: Increase clientIDAddrMapCapacity +- Issue 40177: Manually unlock mutex in ClientMap.SendQueue +- Issue 40177: Have SnowflakeClientConn implement io.WriterTo +- Issue 40179: Reduce turbotunnel queueSize from 2048 to 512 +- Issue 40187/40199: Take ownership of buffer in QueuePacketConn QueueIncoming/WriteTo +- Add more tests for URL encoded IPs (safelog) +- Fix server flag name +- Issue 40200: Use multiple parallel KCP state machines in the server +- Add a num-turbotunnel server transport option +- Issue: 40241: Switch default proxy STUN server to stun.l.google.com + +Changes in version v2.4.1 - 2022-12-01 +- Issue 40224: Bug fix in utls roundtripper + +Changes in version v2.4.0 - 2022-11-29 +- Fix proxy command line help output +- Issue 40123: Reduce multicast DNS candidates +- Add ICE ephemeral ports range setting +- Reformat using Go 1.19 +- Update CI tests to include latest and minimum Go versions +- Issue 40184: Use fixed unit for bandwidth logging +- Update gorilla/websocket to v1.5.0 +- Issue 40175: Server performance improvements +- Issue 40183: Change snowflake proxy log verbosity +- Issue 40117: Display proxy NAT type in logs +- Issue 40198: Add a `orport-srcaddr` server transport option +- Add gofmt output to CI test +- Issue 40185: Change bandwidth type from int to int64 to prevent overflow +- Add version output support to snowflake +- Issue 40229: Change regexes for ipv6 addresses to catch url-encoded addresses +- Issue 40220: Close stale connections in standalone proxy + +Changes in version v2.3.0 - 2022-06-23 +- Issue 40146: Avoid performing two NAT probe tests at startup +- Issue 40134: Log messages from client NAT check failures are confusing +- Issue 34075: Implement metrics to measure snowflake churn +- Issue 28651: Prepare all pieces of the snowflake pipeline for a second snowflake bridge +- Issue 40129: Distributed Snowflake Server Support + +Changes in version v2.2.0 - 2022-05-25 + +- Issue 40099: Initialize SnowflakeListener.closed +- Add connection failure events for proxy timeouts +- Issue 40103: Fix proxy logging verb tense +- Fix up and downstream metrics output for proxy +- Issue 40054: uTLS for broker negotiation +- Forward bridge fingerprint from client to broker (WIP, Issue 28651) +- Issue 40104: Make it easier to configure proxy type +- Remove version from ClientPollRequest +- Issue 40124: Move tor-specific code out of library +- Issue 40115: Scrub pt event logs +- Issue 40127: Bump webrtc and dtls library versions +- Bump version of webrtc and dtls to fix dtls CVEs +- Issue 40141: Ensure library calls of events can be scrubbed + +Changes in version v2.1.0 - 2022-02-08 + +- Issue 40098: Remove support for legacy one shot mode +- Issue 40079: Make connection summary at proxy privacy preserving +- Issue 40076: Add snowflake event API for notifications of connection events +- Issue 40084: Increase capacity of client address map at the server +- Issue 40060: Further clean up snowflake server logs +- Issue 40089: Validate proxy and client supplied strings at broker +- Issue 40014: Update version of DTLS library to include fingerprinting fixes +- Issue 40075: Support recurring NAT type check in standalone proxy + + +Changes in version v2.0.0 - 2021-11-04 + +- Turn the standalone snowflake proxy code into a library +- Clean up and reworked the snowflake client and server library code +- Unify broker/bridge domains to *.torproject.net +- Updates to the snowflake library documentation +- New package functions to define and set a rendezvous method with the +broker +- Factor out the broker geoip code into its own external library +- Bug fix to check error calls in preparePeerConnection +- Bug fixes in snowflake tests +- Issue 40059: add the ability to pass in snowflake arguments through SOCKS +- Increase buffer sizes for sending and receiving snowflake data +- Issue 25985: rendezvous with the broker using AMP cache +- Issue 40055: wait for the full poll interval between proxy polls + +Changes in version v1.1.0 - 2021-07-13 + +- Refactors of the Snowflake broker code +- Refactors of the Snowflake proxy code +- Issue 40048: assign proxies based on self-reported client load +- Issue 40052: fixed a memory leak in the server accept loop +- Version bump of kcp and smux libraries +- Bug fix to pass the correct client address to the Snowflake bridge metrics +counter +- Bug fixes to prevent race conditions in the Snowflake client + +Changes in version v1.0.0 - 2021-06-07 + +- Initial release. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..77673dd --- /dev/null +++ b/Dockerfile @@ -0,0 +1,50 @@ +FROM docker.io/library/golang:latest AS build + + +ADD . /app + +WORKDIR /app/proxy +RUN go get +RUN CGO_ENABLED=0 go build -o proxy -ldflags '-extldflags "-static" -w -s' . + +FROM containers.torproject.org/tpo/tpa/base-images/debian:bookworm as debian-base + +# Install dependencies to add Tor's repository. +RUN apt-get update && apt-get install -y \ + curl \ + gpg \ + gpg-agent \ + ca-certificates \ + libcap2-bin \ + --no-install-recommends + +# See: +RUN curl https://deb.torproject.org/torproject.org/A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89.asc | gpg --import +RUN gpg --export A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89 | apt-key add - + +RUN printf "deb https://deb.torproject.org/torproject.org bookworm main\n" >> /etc/apt/sources.list.d/tor.list + +# Install remaining dependencies. +RUN apt-get update && apt-get install -y \ + tor \ + tor-geoipdb \ + --no-install-recommends + + +FROM scratch + +COPY --from=debian-base /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +COPY --from=debian-base /usr/share/zoneinfo /usr/share/zoneinfo +COPY --from=debian-base /usr/share/tor/geoip* /usr/share/tor/ +COPY --from=build /app/proxy/proxy /bin/proxy + +ENTRYPOINT [ "/bin/proxy" ] + +# Set some labels +# io.containers.autoupdate label will instruct podman to reach out to the +# corresponding registry to check if the image has been updated. If an image +# must be updated, Podman pulls it down and restarts the systemd unit executing +# the container. See podman-auto-update(1) for more details, or +# https://docs.podman.io/en/latest/markdown/podman-auto-update.1.html +LABEL io.containers.autoupdate=registry +LABEL org.opencontainers.image.authors="anti-censorship-team@lists.torproject.org" diff --git a/LICENSE b/LICENSE index f700c98..42f6296 100644 --- a/LICENSE +++ b/LICENSE @@ -3,7 +3,7 @@ ================================================================================ Copyright (c) 2016, Serene Han, Arlo Breault -All rights reserved. +Copyright (c) 2019-2020, The Tor Project, Inc Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/README.md b/README.md index cd2641b..b2b0c4f 100644 --- a/README.md +++ b/README.md @@ -1,119 +1,59 @@ # Snowflake -[![Build Status](https://travis-ci.org/keroserene/snowflake.svg?branch=master)](https://travis-ci.org/keroserene/snowflake) - -Pluggable Transport using WebRTC, inspired by Flashproxy. - -### Status - -- [x] Transport: Successfully connects using WebRTC. -- [x] Rendezvous: HTTP signaling (with optional domain fronting) to the Broker - arranges peer-to-peer connections with multitude of volunteer "snowflakes". -- [x] Client multiplexes remote snowflakes. -- [x] Can browse using Tor over Snowflake. -- [ ] Reproducible build with TBB. +Snowflake is a censorship-evasion pluggable transport using WebRTC, inspired by Flashproxy. **Table of Contents** +- [Structure of this Repository](#structure-of-this-repository) - [Usage](#usage) - - [Dependencies](#dependencies) - - [More Info](#more-info) - - [Building](#building) - - [Test Environment](#test-environment) + - [Using Snowflake with Tor](#using-snowflake-with-tor) + - [Running a Snowflake Proxy](#running-a-snowflake-proxy) + - [Using the Snowflake Library with Other Applications](#using-the-snowflake-library-with-other-applications) +- [Test Environment](#test-environment) - [FAQ](#faq) -- [Appendix](#appendix) - - [-- Testing directly via WebRTC Server --](#---testing-directly-via-webrtc-server---) +- [More info and links](#more-info-and-links) +### Structure of this Repository + +- `broker/` contains code for the Snowflake broker +- `doc/` contains Snowflake documentation and manpages +- `client/` contains the Tor pluggable transport client and client library code +- `common/` contains generic libraries used by multiple pieces of Snowflake +- `proxy/` contains code for the Go standalone Snowflake proxy +- `probetest/` contains code for a NAT probetesting service +- `server/` contains the Tor pluggable transport server and server library code + ### Usage -``` -cd client/ -go get -go build -tor -f torrc -``` -This should start the client plugin, bootstrapping to 100% using WebRTC. +Snowflake is currently deployed as a pluggable transport for Tor. -#### Dependencies +#### Using Snowflake with Tor -Client: -- [go-webrtc](https://github.com/keroserene/go-webrtc) -- Go 1.5+ +To use the Snowflake client with Tor, you will need to add the appropriate `Bridge` and `ClientTransportPlugin` lines to your [torrc](https://2019.www.torproject.org/docs/tor-manual.html.en) file. See the [client README](client) for more information on building and running the Snowflake client. -Proxy: -- JavaScript +#### Running a Snowflake Proxy ---- +You can contribute to Snowflake by running a Snowflake proxy. We have the option to run a proxy in your browser or as a standalone Go program. See our [community documentation](https://community.torproject.org/relay/setup/snowflake/) for more details. -#### More Info +#### Using the Snowflake Library with Other Applications -Tor can plug in the Snowflake client via a correctly configured `torrc`. -For example: +Snowflake can be used as a Go API, and adheres to the [v2.1 pluggable transports specification](). For more information on using the Snowflake Go library, see the [Snowflake library documentation](doc/using-the-snowflake-library.md). -``` -ClientTransportPlugin snowflake exec ./client \ --url https://snowflake-broker.azureedge.net/ \ --front ajax.aspnetcdn.com \ --ice stun:stun.l.google.com:19302 --max 3 -``` - -The flags `-url` and `-front` allow the Snowflake client to speak to the Broker, -in order to get connected with some volunteer's browser proxy. `-ice` is a -comma-separated list of ICE servers, which are required for NAT traversal. - -For logging, run `tail -F snowflake.log` in a second terminal. - -You can modify the `torrc` to use your own broker: - -``` -ClientTransportPlugin snowflake exec ./client --meek -``` - - -#### Building - -This describes how to build the in-browser snowflake. For the client, see Usage, -above. - -The client will only work if there are browser snowflakes available. -To run your own: - -``` -cd proxy/ -npm run build -``` - -Then, start a local http server in the `proxy/build/` in any way you like. -For instance: - -``` -cd build/ -python -m http.server -``` - -Then, open a browser tab to `http://127.0.0.1:8000/snowflake.html` to view -the debug-console of the snowflake., -So long as that tab is open, you are an ephemeral Tor bridge. - - -#### Test Environment +### Test Environment There is a Docker-based test environment at https://github.com/cohosh/snowbox. - ### FAQ **Q: How does it work?** In the Tor use-case: -1. Volunteers visit websites which host the "snowflake" proxy. (just -like flashproxy) +1. Volunteers visit websites that host the 'snowflake' proxy, run a snowflake [web extension](https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake-webext), or use a standalone proxy. 2. Tor clients automatically find available browser proxies via the Broker (the domain fronted signaling channel). 3. Tor client and browser proxy establish a WebRTC peer connection. @@ -141,22 +81,26 @@ manual port forwarding! It utilizes the "ICE" negotiation via WebRTC, and also involves a great abundance of ephemeral and short-lived (and special!) volunteer proxies... -### Appendix +### More info and links -##### -- Testing with Standalone Proxy -- +We have more documentation in the [Snowflake wiki](https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/wikis/home) and at https://snowflake.torproject.org/. -``` -cd proxy-go -go build -./proxy-go -``` -##### -- Testing directly via WebRTC Server -- +##### -- Android AAR Reproducible Build Setup -- -See server-webrtc/README.md for information on connecting directly to a -WebRTC server transport plugin, bypassing the Broker and browser proxy. +Using `gomobile` it is possible to build snowflake as shared libraries for all +the architectures supported by Android. This is in the _.gitlab-ci.yml_, which +runs in GitLab CI. It is also possible to run this setup in a Virtual Machine +using [vagrant](https://www.vagrantup.com/). Just run `vagrant up` and it will +create and provision the VM. `vagrant ssh` to get into the VM to use it as a +development environment. -More documentation on the way. +##### uTLS Settings -Also available at: -[torproject.org/pluggable-transports/snowflake](https://gitweb.torproject.org/pluggable-transports/snowflake.git/) +Snowflake communicate with broker that serves as signaling server with TLS based domain fronting connection, which may be identified by its usage of Go language TLS stack. + +uTLS is a software library designed to initiate the TLS Client Hello fingerprint of browsers or other popular software's TLS stack to evade censorship based on TLS client hello fingerprint with `-utls-imitate` . You can use `-version` to see a list of supported values. + +Depending on client and server configuration, it may not always work as expected as not all extensions are correctly implemented. + +You can also remove SNI (Server Name Indication) from client hello to evade censorship with `-utls-nosni`, not all servers supports this. diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000..739fbc8 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,67 @@ +require 'pathname' +require 'tempfile' +require 'yaml' + +srvpath = Pathname.new(File.dirname(__FILE__)).realpath +configfile = YAML.load_file(File.join(srvpath, "/.gitlab-ci.yml")) +remote_url = 'https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake' + +# set up essential environment variables +env = configfile['variables'] +env = env.merge(configfile['android']['variables']) +env['CI_PROJECT_DIR'] = '/builds/tpo/anti-censorship/pluggable-transports/snowflake' +env_file = Tempfile.new('env') +File.chmod(0644, env_file.path) +env.each do |k,v| + env_file.write("export #{k}='#{v}'\n") +end +env_file.rewind + +sourcepath = '/etc/profile.d/env.sh' +header = "#!/bin/bash -ex\nsource #{sourcepath}\ncd $CI_PROJECT_DIR\n" + +before_script_file = Tempfile.new('before_script') +File.chmod(0755, before_script_file.path) +before_script_file.write(header) +configfile['android']['before_script'].flatten.each do |line| + before_script_file.write(line) + before_script_file.write("\n") +end +before_script_file.rewind + +script_file = Tempfile.new('script') +File.chmod(0755, script_file.path) +script_file.write(header) +configfile['android']['script'].flatten.each do |line| + script_file.write(line) + script_file.write("\n") +end +script_file.rewind + +Vagrant.configure("2") do |config| + config.vm.box = "debian/bullseye64" + config.vm.synced_folder '.', '/vagrant', disabled: true + config.vm.provision "file", source: env_file.path, destination: 'env.sh' + config.vm.provision :shell, inline: <<-SHELL + set -ex + mv ~vagrant/env.sh #{sourcepath} + source #{sourcepath} + test -d /go || mkdir /go + mkdir -p $(dirname $CI_PROJECT_DIR) + chown -R vagrant.vagrant $(dirname $CI_PROJECT_DIR) + apt-get update + apt-get -qy install --no-install-recommends git + git clone #{remote_url} $CI_PROJECT_DIR + chmod -R a+rX,u+w /go $CI_PROJECT_DIR + chown -R vagrant.vagrant /go $CI_PROJECT_DIR +SHELL + config.vm.provision "file", source: before_script_file.path, destination: 'before_script.sh' + config.vm.provision "file", source: script_file.path, destination: 'script.sh' + config.vm.provision :shell, inline: '/home/vagrant/before_script.sh' + config.vm.provision :shell, privileged: false, inline: '/home/vagrant/script.sh' + + # remove this or comment it out to use VirtualBox instead of libvirt + config.vm.provider :libvirt do |libvirt| + libvirt.memory = 1536 + end +end diff --git a/appengine/README b/appengine/README deleted file mode 100644 index 797b0f9..0000000 --- a/appengine/README +++ /dev/null @@ -1,28 +0,0 @@ -This component runs on Google App Engine. It reflects domain-fronted -requests from a client to the Snowflake broker. - -You need the Go App Engine SDK in order to deploy the app. - https://cloud.google.com/sdk/docs/#linux -After unpacking, install the app-engine-go component: - google-cloud-sdk/bin/gcloud components install app-engine-go - -To test locally, run - google-cloud-sdk/bin/dev_appserver.py app.yaml -The app will be running at http://127.0.0.1:8080/. - -To deploy to App Engine, first create a new project and app. You have to -think of a unique name (marked as "" in the commands). You only -have to do the "create" step once; subsequent times you can go straight -to the "deploy" step. The "gcloud auth login" command will open a -browser window so you can log in to a Google account. - google-cloud-sdk/bin/gcloud auth login - google-cloud-sdk/bin/gcloud projects create - google-cloud-sdk/bin/gcloud app create --project= -Then to deploy the project, run: - google-cloud-sdk/bin/gcloud app deploy --project= - -To configure the Snowflake client to talk to the App Engine app, provide -"https://.appspot.com/" as the --url option. - UseBridges 1 - Bridge snowflake 0.0.2.0:1 - ClientTransportPlugin snowflake exec ./client -url https://.appspot.com/ -front www.google.com diff --git a/appengine/app.yaml b/appengine/app.yaml deleted file mode 100644 index 44df436..0000000 --- a/appengine/app.yaml +++ /dev/null @@ -1,7 +0,0 @@ -runtime: go -api_version: go1 - -handlers: -- url: /.* - script: _go_app - secure: always diff --git a/appengine/reflect.go b/appengine/reflect.go deleted file mode 100644 index 58d8a67..0000000 --- a/appengine/reflect.go +++ /dev/null @@ -1,111 +0,0 @@ -// A web app for Google App Engine that proxies HTTP requests and responses to -// the Snowflake broker. -package reflect - -import ( - "context" - "io" - "net/http" - "net/url" - "time" - - "google.golang.org/appengine" - "google.golang.org/appengine/log" - "google.golang.org/appengine/urlfetch" -) - -const ( - forwardURL = "https://snowflake-broker.bamsoftware.com/" - // A timeout of 0 means to use the App Engine default (5 seconds). - urlFetchTimeout = 20 * time.Second -) - -var ctx context.Context - -// Join two URL paths. -func pathJoin(a, b string) string { - if len(a) > 0 && a[len(a)-1] == '/' { - a = a[:len(a)-1] - } - if len(b) == 0 || b[0] != '/' { - b = "/" + b - } - return a + b -} - -// We reflect only a whitelisted set of header fields. Otherwise, we may copy -// headers like Transfer-Encoding that interfere with App Engine's own -// hop-by-hop headers. -var reflectedHeaderFields = []string{ - "Content-Type", - "X-Session-Id", -} - -// Make a copy of r, with the URL being changed to be relative to forwardURL, -// and including only the headers in reflectedHeaderFields. -func copyRequest(r *http.Request) (*http.Request, error) { - u, err := url.Parse(forwardURL) - if err != nil { - return nil, err - } - // Append the requested path to the path in forwardURL, so that - // forwardURL can be something like "https://example.com/reflect". - u.Path = pathJoin(u.Path, r.URL.Path) - c, err := http.NewRequest(r.Method, u.String(), r.Body) - if err != nil { - return nil, err - } - for _, key := range reflectedHeaderFields { - values, ok := r.Header[key] - if ok { - for _, value := range values { - c.Header.Add(key, value) - } - } - } - return c, nil -} - -func handler(w http.ResponseWriter, r *http.Request) { - ctx = appengine.NewContext(r) - fr, err := copyRequest(r) - if err != nil { - log.Errorf(ctx, "copyRequest: %s", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if urlFetchTimeout != 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, urlFetchTimeout) - defer cancel() - } - // Use urlfetch.Transport directly instead of urlfetch.Client because we - // want only a single HTTP transaction, not following redirects. - transport := urlfetch.Transport{ - Context: ctx, - } - resp, err := transport.RoundTrip(fr) - if err != nil { - log.Errorf(ctx, "RoundTrip: %s", err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - defer resp.Body.Close() - for _, key := range reflectedHeaderFields { - values, ok := resp.Header[key] - if ok { - for _, value := range values { - w.Header().Add(key, value) - } - } - } - w.WriteHeader(resp.StatusCode) - n, err := io.Copy(w, resp.Body) - if err != nil { - log.Errorf(ctx, "io.Copy after %d bytes: %s", n, err) - } -} - -func init() { - http.HandleFunc("/", handler) -} diff --git a/broker/README.md b/broker/README.md index fb6181e..1e0a763 100644 --- a/broker/README.md +++ b/broker/README.md @@ -1,3 +1,12 @@ + + +**Table of Contents** + +- [Overview](#overview) +- [Running your own](#running-your-own) + + + This is the Broker component of Snowflake. ### Overview diff --git a/broker/amp.go b/broker/amp.go new file mode 100644 index 0000000..4d19a6c --- /dev/null +++ b/broker/amp.go @@ -0,0 +1,83 @@ +package main + +import ( + "context" + "log" + "net/http" + "strings" + "time" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/amp" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages" +) + +// ampClientOffers is the AMP-speaking endpoint for client poll messages, +// intended for access via an AMP cache. In contrast to the other clientOffers, +// the client's encoded poll message is stored in the URL path rather than the +// HTTP request body (because an AMP cache does not support POST), and the +// encoded client poll response is sent back as AMP-armored HTML. +func ampClientOffers(i *IPC, w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithTimeout(r.Context(), ClientTimeout*time.Second) + defer cancel() + + // The encoded client poll message immediately follows the /amp/client/ + // path prefix, so this function unfortunately needs to be aware of and + // remove its own routing prefix. + path := strings.TrimPrefix(r.URL.Path, "/amp/client/") + if path == r.URL.Path { + // The path didn't start with the expected prefix. This probably + // indicates an internal bug. + log.Println("ampClientOffers: unexpected prefix in path") + w.WriteHeader(http.StatusInternalServerError) + return + } + + var encPollReq []byte + var response []byte + var err error + + encPollReq, err = amp.DecodePath(path) + if err == nil { + arg := messages.Arg{ + Body: encPollReq, + RemoteAddr: "", + RendezvousMethod: messages.RendezvousAmpCache, + Context: ctx, + } + err = i.ClientOffers(arg, &response) + } else { + response, err = (&messages.ClientPollResponse{ + Error: "cannot decode URL path", + }).EncodePollResponse() + } + + if err != nil { + // We couldn't even construct a JSON object containing an error + // message :( Nothing to do but signal an error at the HTTP + // layer. The AMP cache will translate this 500 status into a + // 404 status. + // https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#redirect-%26-error-handling + log.Printf("ampClientOffers: %v", err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "text/html") + // Attempt to hint to an AMP cache not to waste resources caching this + // document. "The Google AMP Cache considers any document fresh for at + // least 15 seconds." + // https://developers.google.com/amp/cache/overview#google-amp-cache-updates + w.Header().Set("Cache-Control", "max-age=15") + w.WriteHeader(http.StatusOK) + + enc, err := amp.NewArmorEncoder(w) + if err != nil { + log.Printf("amp.NewArmorEncoder: %v", err) + return + } + defer enc.Close() + + if _, err := enc.Write(response); err != nil { + log.Printf("ampClientOffers: unable to write answer: %v", err) + } +} diff --git a/broker/bridge-list.go b/broker/bridge-list.go new file mode 100644 index 0000000..00fc3a9 --- /dev/null +++ b/broker/bridge-list.go @@ -0,0 +1,94 @@ +/* (*BridgeListHolderFileBased).LoadBridgeInfo loads a Snowflake Server bridge info description file, + its format is as follows: + + This file should be in newline-delimited JSON format(https://jsonlines.org/). + For each line, the format of json data should be in the format of: + {"displayName":"default", "webSocketAddress":"wss://snowflake.torproject.net/", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"} + + displayName:string is the name of this bridge. This value is not currently used programmatically. + + webSocketAddress:string is the WebSocket URL of this bridge. + This will be the address proxy used to connect to this snowflake server. + + fingerprint:string is the identifier of the bridge. + This will be used by a client to identify the bridge it wishes to connect to. + + The existence of ANY other fields is NOT permitted. + + The file will be considered invalid if there is at least one invalid json record. + In this case, an error will be returned, and none of the records will be loaded. +*/ + +package main + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "io" + "sync" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint" +) + +var ErrBridgeNotFound = errors.New("bridge with requested fingerprint is unknown to the broker") + +func NewBridgeListHolder() BridgeListHolderFileBased { + return &bridgeListHolder{} +} + +type bridgeListHolder struct { + bridgeInfo map[bridgefingerprint.Fingerprint]BridgeInfo + accessBridgeInfo sync.RWMutex +} + +type BridgeListHolder interface { + GetBridgeInfo(bridgefingerprint.Fingerprint) (BridgeInfo, error) +} + +type BridgeListHolderFileBased interface { + BridgeListHolder + LoadBridgeInfo(reader io.Reader) error +} + +type BridgeInfo struct { + DisplayName string `json:"displayName"` + WebSocketAddress string `json:"webSocketAddress"` + Fingerprint string `json:"fingerprint"` +} + +func (h *bridgeListHolder) GetBridgeInfo(fingerprint bridgefingerprint.Fingerprint) (BridgeInfo, error) { + h.accessBridgeInfo.RLock() + defer h.accessBridgeInfo.RUnlock() + if bridgeInfo, ok := h.bridgeInfo[fingerprint]; ok { + return bridgeInfo, nil + } + return BridgeInfo{}, ErrBridgeNotFound +} + +func (h *bridgeListHolder) LoadBridgeInfo(reader io.Reader) error { + bridgeInfoMap := map[bridgefingerprint.Fingerprint]BridgeInfo{} + inputScanner := bufio.NewScanner(reader) + for inputScanner.Scan() { + inputLine := inputScanner.Bytes() + bridgeInfo := BridgeInfo{} + decoder := json.NewDecoder(bytes.NewReader(inputLine)) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&bridgeInfo); err != nil { + return err + } + + var bridgeFingerprint bridgefingerprint.Fingerprint + var err error + if bridgeFingerprint, err = bridgefingerprint.FingerprintFromHexString(bridgeInfo.Fingerprint); err != nil { + return err + } + + bridgeInfoMap[bridgeFingerprint] = bridgeInfo + } + h.accessBridgeInfo.Lock() + defer h.accessBridgeInfo.Unlock() + h.bridgeInfo = bridgeInfoMap + return nil +} diff --git a/broker/bridge-list_test.go b/broker/bridge-list_test.go new file mode 100644 index 0000000..d74a4be --- /dev/null +++ b/broker/bridge-list_test.go @@ -0,0 +1,64 @@ +package main + +import ( + "bytes" + "encoding/hex" + . "github.com/smartystreets/goconvey/convey" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint" + "testing" +) + +const DefaultBridges = `{"displayName":"default", "webSocketAddress":"wss://snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"} +` + +const ImaginaryBridges = `{"displayName":"default", "webSocketAddress":"wss://snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"} +{"displayName":"imaginary-1", "webSocketAddress":"wss://imaginary-1-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B00"} +{"displayName":"imaginary-2", "webSocketAddress":"wss://imaginary-2-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B01"} +{"displayName":"imaginary-3", "webSocketAddress":"wss://imaginary-3-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B02"} +{"displayName":"imaginary-4", "webSocketAddress":"wss://imaginary-4-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B03"} +{"displayName":"imaginary-5", "webSocketAddress":"wss://imaginary-5-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B04"} +{"displayName":"imaginary-6", "webSocketAddress":"wss://imaginary-6-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B05"} +{"displayName":"imaginary-7", "webSocketAddress":"wss://imaginary-7-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B06"} +{"displayName":"imaginary-8", "webSocketAddress":"wss://imaginary-8-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B07"} +{"displayName":"imaginary-9", "webSocketAddress":"wss://imaginary-9-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B08"} +{"displayName":"imaginary-10", "webSocketAddress":"wss://imaginary-10-snowflake.torproject.org", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80B09"} +` + +func TestBridgeLoad(t *testing.T) { + Convey("load default list", t, func() { + bridgeList := NewBridgeListHolder() + So(bridgeList.LoadBridgeInfo(bytes.NewReader([]byte(DefaultBridges))), ShouldBeNil) + { + bridgeFingerprint := [20]byte{} + { + n, err := hex.Decode(bridgeFingerprint[:], []byte("2B280B23E1107BB62ABFC40DDCC8824814F80A72")) + So(n, ShouldEqual, 20) + So(err, ShouldBeNil) + } + Fingerprint, err := bridgefingerprint.FingerprintFromBytes(bridgeFingerprint[:]) + So(err, ShouldBeNil) + bridgeInfo, err := bridgeList.GetBridgeInfo(Fingerprint) + So(err, ShouldBeNil) + So(bridgeInfo.DisplayName, ShouldEqual, "default") + So(bridgeInfo.WebSocketAddress, ShouldEqual, "wss://snowflake.torproject.org") + } + }) + Convey("load imaginary list", t, func() { + bridgeList := NewBridgeListHolder() + So(bridgeList.LoadBridgeInfo(bytes.NewReader([]byte(ImaginaryBridges))), ShouldBeNil) + { + bridgeFingerprint := [20]byte{} + { + n, err := hex.Decode(bridgeFingerprint[:], []byte("2B280B23E1107BB62ABFC40DDCC8824814F80B07")) + So(n, ShouldEqual, 20) + So(err, ShouldBeNil) + } + Fingerprint, err := bridgefingerprint.FingerprintFromBytes(bridgeFingerprint[:]) + So(err, ShouldBeNil) + bridgeInfo, err := bridgeList.GetBridgeInfo(Fingerprint) + So(err, ShouldBeNil) + So(bridgeInfo.DisplayName, ShouldEqual, "imaginary-8") + So(bridgeInfo.WebSocketAddress, ShouldEqual, "wss://imaginary-8-snowflake.torproject.org") + } + }) +} diff --git a/broker/broker.go b/broker/broker.go index 0a79b8f..8351d8f 100644 --- a/broker/broker.go +++ b/broker/broker.go @@ -6,43 +6,60 @@ SessionDescriptions in order to negotiate a WebRTC connection. package main import ( + "bytes" "container/heap" + "context" "crypto/tls" "flag" - "fmt" "io" - "io/ioutil" "log" - "net" "net/http" "os" "os/signal" "strings" + "sync" "syscall" "time" - "git.torproject.org/pluggable-transports/snowflake.git/common/safelog" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint" + + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/namematcher" "golang.org/x/crypto/acme/autocert" ) -const ( - ClientTimeout = 10 - ProxyTimeout = 10 - readLimit = 100000 //Maximum number of bytes to be read from an HTTP request -) - type BrokerContext struct { - snowflakes *SnowflakeHeap - // Map keeping track of snowflakeIDs required to match SDP answers from - // the second http POST. + snowflakes *SnowflakeHeap + restrictedSnowflakes *SnowflakeHeap + // Maps keeping track of snowflakeIDs required to match SDP answers from + // the second http POST. Restricted snowflakes can only be matched up with + // clients behind an unrestricted NAT. idToSnowflake map[string]*Snowflake + // Synchronization for the snowflake map and heap + snowflakeLock sync.Mutex proxyPolls chan *ProxyPoll metrics *Metrics + + bridgeList BridgeListHolderFileBased + allowedRelayPattern string } -func NewBrokerContext(metricsLogger *log.Logger) *BrokerContext { +func (ctx *BrokerContext) GetBridgeInfo(fingerprint bridgefingerprint.Fingerprint) (BridgeInfo, error) { + return ctx.bridgeList.GetBridgeInfo(fingerprint) +} + +func NewBrokerContext( + metricsLogger *log.Logger, + allowedRelayPattern string, +) *BrokerContext { snowflakes := new(SnowflakeHeap) heap.Init(snowflakes) + rSnowflakes := new(SnowflakeHeap) + heap.Init(rSnowflakes) metrics, err := NewMetrics(metricsLogger) if err != nil { @@ -53,42 +70,41 @@ func NewBrokerContext(metricsLogger *log.Logger) *BrokerContext { panic("Failed to create metrics") } + bridgeListHolder := NewBridgeListHolder() + + const DefaultBridges = `{"displayName":"default", "webSocketAddress":"wss://snowflake.torproject.net/", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"} +` + bridgeListHolder.LoadBridgeInfo(bytes.NewReader([]byte(DefaultBridges))) + return &BrokerContext{ - snowflakes: snowflakes, - idToSnowflake: make(map[string]*Snowflake), - proxyPolls: make(chan *ProxyPoll), - metrics: metrics, + snowflakes: snowflakes, + restrictedSnowflakes: rSnowflakes, + idToSnowflake: make(map[string]*Snowflake), + proxyPolls: make(chan *ProxyPoll), + metrics: metrics, + bridgeList: bridgeListHolder, + allowedRelayPattern: allowedRelayPattern, } } -// Implements the http.Handler interface -type SnowflakeHandler struct { - *BrokerContext - handle func(*BrokerContext, http.ResponseWriter, *http.Request) -} - -func (sh SnowflakeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Session-ID") - // Return early if it's CORS preflight. - if "OPTIONS" == r.Method { - return - } - sh.handle(sh.BrokerContext, w, r) -} - // Proxies may poll for client offers concurrently. type ProxyPoll struct { id string - offerChannel chan []byte + proxyType string + natType string + clients int + offerChannel chan *ClientOffer } // Registers a Snowflake and waits for some Client to send an offer, // as part of the polling logic of the proxy handler. -func (ctx *BrokerContext) RequestOffer(id string) []byte { +func (ctx *BrokerContext) RequestOffer(id string, proxyType string, natType string, clients int) *ClientOffer { request := new(ProxyPoll) request.id = id - request.offerChannel = make(chan []byte) + request.proxyType = proxyType + request.natType = natType + request.clients = clients + request.offerChannel = make(chan *ClientOffer) ctx.proxyPolls <- request // Block until an offer is available, or timeout which sends a nil offer. offer := <-request.offerChannel @@ -100,19 +116,26 @@ func (ctx *BrokerContext) RequestOffer(id string) []byte { // client offer or nil on timeout / none are available. func (ctx *BrokerContext) Broker() { for request := range ctx.proxyPolls { - snowflake := ctx.AddSnowflake(request.id) + snowflake := ctx.AddSnowflake(request.id, request.proxyType, request.natType, request.clients) // Wait for a client to avail an offer to the snowflake. go func(request *ProxyPoll) { select { case offer := <-snowflake.offerChannel: - log.Println("Passing client offer to snowflake proxy.") request.offerChannel <- offer case <-time.After(time.Second * ProxyTimeout): // This snowflake is no longer available to serve clients. - // TODO: Fix race using a delete channel - heap.Remove(ctx.snowflakes, snowflake.index) - delete(ctx.idToSnowflake, snowflake.id) - request.offerChannel <- nil + ctx.snowflakeLock.Lock() + defer ctx.snowflakeLock.Unlock() + if snowflake.index != -1 { + if request.natType == NATUnrestricted { + heap.Remove(ctx.snowflakes, snowflake.index) + } else { + heap.Remove(ctx.restrictedSnowflakes, snowflake.index) + } + ctx.metrics.promMetrics.AvailableProxies.With(prometheus.Labels{"nat": request.natType, "type": request.proxyType}).Dec() + delete(ctx.idToSnowflake, snowflake.id) + close(request.offerChannel) + } } }(request) } @@ -121,134 +144,47 @@ func (ctx *BrokerContext) Broker() { // Create and add a Snowflake to the heap. // Required to keep track of proxies between providing them // with an offer and awaiting their second POST with an answer. -func (ctx *BrokerContext) AddSnowflake(id string) *Snowflake { +func (ctx *BrokerContext) AddSnowflake(id string, proxyType string, natType string, clients int) *Snowflake { snowflake := new(Snowflake) snowflake.id = id - snowflake.clients = 0 - snowflake.offerChannel = make(chan []byte) - snowflake.answerChannel = make(chan []byte) - heap.Push(ctx.snowflakes, snowflake) + snowflake.clients = clients + snowflake.proxyType = proxyType + snowflake.natType = natType + snowflake.offerChannel = make(chan *ClientOffer) + snowflake.answerChannel = make(chan string) + ctx.snowflakeLock.Lock() + if natType == NATUnrestricted { + heap.Push(ctx.snowflakes, snowflake) + } else { + heap.Push(ctx.restrictedSnowflakes, snowflake) + } + ctx.metrics.promMetrics.AvailableProxies.With(prometheus.Labels{"nat": natType, "type": proxyType}).Inc() ctx.idToSnowflake[id] = snowflake + ctx.snowflakeLock.Unlock() return snowflake } -/* -For snowflake proxies to request a client from the Broker. -*/ -func proxyPolls(ctx *BrokerContext, w http.ResponseWriter, r *http.Request) { - id := r.Header.Get("X-Session-ID") - body, err := ioutil.ReadAll(http.MaxBytesReader(w, r.Body, readLimit)) - if nil != err { - log.Println("Invalid data.") - w.WriteHeader(http.StatusBadRequest) - return +func (ctx *BrokerContext) InstallBridgeListProfile(reader io.Reader) error { + if err := ctx.bridgeList.LoadBridgeInfo(reader); err != nil { + return err } - if string(body) != id { - log.Println("Mismatched IDs!") - w.WriteHeader(http.StatusBadRequest) - return - } - log.Println("Received snowflake: ", id) - - // Log geoip stats - remoteIP, _, err := net.SplitHostPort(r.RemoteAddr) - if err != nil { - log.Println("Error processing proxy IP: ", err.Error()) - } else { - ctx.metrics.UpdateCountryStats(remoteIP) - } - - // Wait for a client to avail an offer to the snowflake, or timeout if nil. - offer := ctx.RequestOffer(id) - if nil == offer { - log.Println("Proxy " + id + " did not receive a Client offer.") - ctx.metrics.proxyIdleCount++ - w.WriteHeader(http.StatusGatewayTimeout) - return - } - log.Println("Passing client offer to snowflake.") - w.Write(offer) + return nil } -/* -Expects a WebRTC SDP offer in the Request to give to an assigned -snowflake proxy, which responds with the SDP answer to be sent in -the HTTP response back to the client. -*/ -func clientOffers(ctx *BrokerContext, w http.ResponseWriter, r *http.Request) { - startTime := time.Now() - offer, err := ioutil.ReadAll(http.MaxBytesReader(w, r.Body, readLimit)) - if nil != err { - log.Println("Invalid data.") - w.WriteHeader(http.StatusBadRequest) - return - } - // Immediately fail if there are no snowflakes available. - if ctx.snowflakes.Len() <= 0 { - log.Println("Client: No snowflake proxies available.") - ctx.metrics.clientDeniedCount++ - w.WriteHeader(http.StatusServiceUnavailable) - return - } - // Otherwise, find the most available snowflake proxy, and pass the offer to it. - // Delete must be deferred in order to correctly process answer request later. - snowflake := heap.Pop(ctx.snowflakes).(*Snowflake) - defer delete(ctx.idToSnowflake, snowflake.id) - snowflake.offerChannel <- offer - - // Wait for the answer to be returned on the channel or timeout. - select { - case answer := <-snowflake.answerChannel: - log.Println("Client: Retrieving answer") - ctx.metrics.clientProxyMatchCount++ - w.Write(answer) - // Initial tracking of elapsed time. - ctx.metrics.clientRoundtripEstimate = time.Since(startTime) / - time.Millisecond - case <-time.After(time.Second * ClientTimeout): - log.Println("Client: Timed out.") - w.WriteHeader(http.StatusGatewayTimeout) - w.Write([]byte("timed out waiting for answer!")) +func (ctx *BrokerContext) CheckProxyRelayPattern(pattern string, nonSupported bool) bool { + if nonSupported { + return false } + proxyPattern := namematcher.NewNameMatcher(pattern) + brokerPattern := namematcher.NewNameMatcher(ctx.allowedRelayPattern) + return proxyPattern.IsSupersetOf(brokerPattern) } -/* -Expects snowflake proxes which have previously successfully received -an offer from proxyHandler to respond with an answer in an HTTP POST, -which the broker will pass back to the original client. -*/ -func proxyAnswers(ctx *BrokerContext, w http.ResponseWriter, r *http.Request) { - id := r.Header.Get("X-Session-ID") - snowflake, ok := ctx.idToSnowflake[id] - if !ok || nil == snowflake { - // The snowflake took too long to respond with an answer, so its client - // disappeared / the snowflake is no longer recognized by the Broker. - w.WriteHeader(http.StatusGone) - return - } - body, err := ioutil.ReadAll(http.MaxBytesReader(w, r.Body, readLimit)) - if nil != err || nil == body || len(body) <= 0 { - log.Println("Invalid data.") - w.WriteHeader(http.StatusBadRequest) - return - } - - log.Println("Received answer.") - snowflake.answerChannel <- body -} - -func debugHandler(ctx *BrokerContext, w http.ResponseWriter, r *http.Request) { - s := fmt.Sprintf("current snowflakes available: %d\n", ctx.snowflakes.Len()) - for _, snowflake := range ctx.idToSnowflake { - s += fmt.Sprintf("\nsnowflake %d: %s", snowflake.index, snowflake.id) - } - s += fmt.Sprintf("\n\nroundtrip avg: %d", ctx.metrics.clientRoundtripEstimate) - w.Write([]byte(s)) -} - -func robotsTxtHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.Write([]byte("User-agent: *\nDisallow: /\n")) +// Client offer contains an SDP, bridge fingerprint and the NAT type of the client +type ClientOffer struct { + natType string + sdp []byte + fingerprint []byte } func main() { @@ -258,10 +194,13 @@ func main() { var addr string var geoipDatabase string var geoip6Database string + var bridgeListFilePath, allowedRelayPattern string + var brokerSQSQueueName, brokerSQSQueueRegion string var disableTLS bool var certFilename, keyFilename string var disableGeoip bool var metricsFilename string + var unsafeLogging bool flag.StringVar(&acmeEmail, "acme-email", "", "optional contact email for Let's Encrypt notifications") flag.StringVar(&acmeHostnamesCommas, "acme-hostnames", "", "comma-separated hostnames for TLS certificate") @@ -271,20 +210,29 @@ func main() { flag.StringVar(&addr, "addr", ":443", "address to listen on") flag.StringVar(&geoipDatabase, "geoipdb", "/usr/share/tor/geoip", "path to correctly formatted geoip database mapping IPv4 address ranges to country codes") flag.StringVar(&geoip6Database, "geoip6db", "/usr/share/tor/geoip6", "path to correctly formatted geoip database mapping IPv6 address ranges to country codes") + flag.StringVar(&bridgeListFilePath, "bridge-list-path", "", "file path for bridgeListFile") + flag.StringVar(&allowedRelayPattern, "allowed-relay-pattern", "", "allowed pattern for relay host name. The broker will reject proxies whose AcceptedRelayPattern is more restrictive than this") + flag.StringVar(&brokerSQSQueueName, "broker-sqs-name", "", "name of broker SQS queue to listen for incoming messages on") + flag.StringVar(&brokerSQSQueueRegion, "broker-sqs-region", "", "name of AWS region of broker SQS queue") flag.BoolVar(&disableTLS, "disable-tls", false, "don't use HTTPS") flag.BoolVar(&disableGeoip, "disable-geoip", false, "don't use geoip for stats collection") flag.StringVar(&metricsFilename, "metrics-log", "", "path to metrics logging output") + flag.BoolVar(&unsafeLogging, "unsafe-logging", false, "prevent logs from being scrubbed") flag.Parse() - var err error - var metricsFile io.Writer = os.Stdout + var metricsFile io.Writer var logOutput io.Writer = os.Stderr - //We want to send the log output through our scrubber first - log.SetOutput(&safelog.LogScrubber{Output: logOutput}) + if unsafeLogging { + log.SetOutput(logOutput) + } else { + // We want to send the log output through our scrubber first + log.SetOutput(&safelog.LogScrubber{Output: logOutput}) + } log.SetFlags(log.LstdFlags | log.LUTC) if metricsFilename != "" { + var err error metricsFile, err = os.OpenFile(metricsFilename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) if err != nil { @@ -296,7 +244,18 @@ func main() { metricsLogger := log.New(metricsFile, "", 0) - ctx := NewBrokerContext(metricsLogger) + ctx := NewBrokerContext(metricsLogger, allowedRelayPattern) + + if bridgeListFilePath != "" { + bridgeListFile, err := os.Open(bridgeListFilePath) + if err != nil { + log.Fatal(err.Error()) + } + err = ctx.InstallBridgeListProfile(bridgeListFile) + if err != nil { + log.Fatal(err.Error()) + } + } if !disableGeoip { err := ctx.metrics.LoadGeoipDatabases(geoipDatabase, geoip6Database) @@ -307,17 +266,39 @@ func main() { go ctx.Broker() + i := &IPC{ctx} + http.HandleFunc("/robots.txt", robotsTxtHandler) - http.Handle("/proxy", SnowflakeHandler{ctx, proxyPolls}) - http.Handle("/client", SnowflakeHandler{ctx, clientOffers}) - http.Handle("/answer", SnowflakeHandler{ctx, proxyAnswers}) - http.Handle("/debug", SnowflakeHandler{ctx, debugHandler}) + http.Handle("/proxy", SnowflakeHandler{i, proxyPolls}) + http.Handle("/client", SnowflakeHandler{i, clientOffers}) + http.Handle("/answer", SnowflakeHandler{i, proxyAnswers}) + http.Handle("/debug", SnowflakeHandler{i, debugHandler}) + http.Handle("/metrics", MetricsHandler{metricsFilename, metricsHandler}) + http.Handle("/prometheus", promhttp.HandlerFor(ctx.metrics.promMetrics.registry, promhttp.HandlerOpts{})) + + http.Handle("/amp/client/", SnowflakeHandler{i, ampClientOffers}) server := http.Server{ Addr: addr, } + // Run SQS Handler to continuously poll and process messages from SQS + if brokerSQSQueueName != "" && brokerSQSQueueRegion != "" { + log.Printf("Loading SQSHandler using SQS Queue %s in region %s\n", brokerSQSQueueName, brokerSQSQueueRegion) + sqsHandlerContext := context.Background() + cfg, err := config.LoadDefaultConfig(sqsHandlerContext, config.WithRegion(brokerSQSQueueRegion)) + if err != nil { + log.Fatal(err) + } + client := sqs.NewFromConfig(cfg) + sqsHandler, err := newSQSHandler(sqsHandlerContext, client, brokerSQSQueueName, brokerSQSQueueRegion, i) + if err != nil { + log.Fatal(err) + } + go sqsHandler.PollAndHandleMessages(sqsHandlerContext) + } + sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGHUP) @@ -327,8 +308,10 @@ func main() { go func() { for { signal := <-sigChan - log.Println("Received signal:", signal, ". Reloading geoip databases.") - ctx.metrics.LoadGeoipDatabases(geoipDatabase, geoip6Database) + log.Printf("Received signal: %s. Reloading geoip databases.", signal) + if err := ctx.metrics.LoadGeoipDatabases(geoipDatabase, geoip6Database); err != nil { + log.Fatalf("reload of Geo IP databases on signal %s returned error: %v", signal, err) + } } }() @@ -339,12 +322,13 @@ func main() { // --disable-tls // The outputs of this block of code are the disableTLS, // needHTTP01Listener, certManager, and getCertificate variables. + var err error if acmeHostnamesCommas != "" { acmeHostnames := strings.Split(acmeHostnamesCommas, ",") log.Printf("ACME hostnames: %q", acmeHostnames) var cache autocert.Cache - if err = os.MkdirAll(acmeCertCacheDir, 0700); err != nil { + if err := os.MkdirAll(acmeCertCacheDir, 0700); err != nil { log.Printf("Warning: Couldn't create cache directory %q (reason: %s) so we're *not* using our certificate cache.", acmeCertCacheDir, err) } else { cache = autocert.DirCache(acmeCertCacheDir) diff --git a/broker/geoip.go b/broker/geoip.go deleted file mode 100644 index 5a16a8a..0000000 --- a/broker/geoip.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -This code is for loading database data that maps ip addresses to countries -for collecting and presenting statistics on snowflake use that might alert us -to censorship events. - -The functions here are heavily based off of how tor maintains and searches their -geoip database - -The tables used for geoip data must be structured as follows: - -Recognized line format for IPv4 is: - INTIPLOW,INTIPHIGH,CC - where INTIPLOW and INTIPHIGH are IPv4 addresses encoded as big-endian 4-byte unsigned - integers, and CC is a country code. - -Note that the IPv4 line format - "INTIPLOW","INTIPHIGH","CC","CC3","COUNTRY NAME" -is not currently supported. - -Recognized line format for IPv6 is: - IPV6LOW,IPV6HIGH,CC - where IPV6LOW and IPV6HIGH are IPv6 addresses and CC is a country code. - -It also recognizes, and skips over, blank lines and lines that start -with '#' (comments). - -*/ -package main - -import ( - "bufio" - "bytes" - "crypto/sha1" - "encoding/hex" - "fmt" - "io" - "log" - "net" - "os" - "sort" - "strconv" - "strings" - "sync" -) - -type GeoIPTable interface { - parseEntry(string) (*GeoIPEntry, error) - Len() int - Append(GeoIPEntry) - ElementAt(int) GeoIPEntry - Lock() - Unlock() -} - -type GeoIPEntry struct { - ipLow net.IP - ipHigh net.IP - country string -} - -type GeoIPv4Table struct { - table []GeoIPEntry - - lock sync.Mutex // synchronization for geoip table accesses and reloads -} - -type GeoIPv6Table struct { - table []GeoIPEntry - - lock sync.Mutex // synchronization for geoip table accesses and reloads -} - -func (table *GeoIPv4Table) Len() int { return len(table.table) } -func (table *GeoIPv6Table) Len() int { return len(table.table) } - -func (table *GeoIPv4Table) Append(entry GeoIPEntry) { - (*table).table = append(table.table, entry) -} -func (table *GeoIPv6Table) Append(entry GeoIPEntry) { - (*table).table = append(table.table, entry) -} - -func (table *GeoIPv4Table) ElementAt(i int) GeoIPEntry { return table.table[i] } -func (table *GeoIPv6Table) ElementAt(i int) GeoIPEntry { return table.table[i] } - -func (table *GeoIPv4Table) Lock() { (*table).lock.Lock() } -func (table *GeoIPv6Table) Lock() { (*table).lock.Lock() } - -func (table *GeoIPv4Table) Unlock() { (*table).lock.Unlock() } -func (table *GeoIPv6Table) Unlock() { (*table).lock.Unlock() } - -// Convert a geoip IP address represented as a big-endian unsigned integer to net.IP -func geoipStringToIP(ipStr string) (net.IP, error) { - ip, err := strconv.ParseUint(ipStr, 10, 32) - if err != nil { - return net.IPv4(0, 0, 0, 0), fmt.Errorf("Error parsing IP %s", ipStr) - } - var bytes [4]byte - bytes[0] = byte(ip & 0xFF) - bytes[1] = byte((ip >> 8) & 0xFF) - bytes[2] = byte((ip >> 16) & 0xFF) - bytes[3] = byte((ip >> 24) & 0xFF) - - return net.IPv4(bytes[3], bytes[2], bytes[1], bytes[0]), nil -} - -//Parses a line in the provided geoip file that corresponds -//to an address range and a two character country code -func (table *GeoIPv4Table) parseEntry(candidate string) (*GeoIPEntry, error) { - - if candidate[0] == '#' { - return nil, nil - } - - parsedCandidate := strings.Split(candidate, ",") - - if len(parsedCandidate) != 3 { - return nil, fmt.Errorf("Provided geoip file is incorrectly formatted. Could not parse line:\n%s", parsedCandidate) - } - - low, err := geoipStringToIP(parsedCandidate[0]) - if err != nil { - return nil, err - } - high, err := geoipStringToIP(parsedCandidate[1]) - if err != nil { - return nil, err - } - - geoipEntry := &GeoIPEntry{ - ipLow: low, - ipHigh: high, - country: parsedCandidate[2], - } - - return geoipEntry, nil -} - -//Parses a line in the provided geoip file that corresponds -//to an address range and a two character country code -func (table *GeoIPv6Table) parseEntry(candidate string) (*GeoIPEntry, error) { - - if candidate[0] == '#' { - return nil, nil - } - - parsedCandidate := strings.Split(candidate, ",") - - if len(parsedCandidate) != 3 { - return nil, fmt.Errorf("") - } - - low := net.ParseIP(parsedCandidate[0]) - if low == nil { - return nil, fmt.Errorf("") - } - high := net.ParseIP(parsedCandidate[1]) - if high == nil { - return nil, fmt.Errorf("") - } - - geoipEntry := &GeoIPEntry{ - ipLow: low, - ipHigh: high, - country: parsedCandidate[2], - } - - return geoipEntry, nil -} - -//Loads provided geoip file into our tables -//Entries are stored in a table -func GeoIPLoadFile(table GeoIPTable, pathname string) error { - //open file - geoipFile, err := os.Open(pathname) - if err != nil { - return err - } - defer geoipFile.Close() - - hash := sha1.New() - - table.Lock() - defer table.Unlock() - - hashedFile := io.TeeReader(geoipFile, hash) - - //read in strings and call parse function - scanner := bufio.NewScanner(hashedFile) - for scanner.Scan() { - entry, err := table.parseEntry(scanner.Text()) - if err != nil { - return fmt.Errorf("Provided geoip file is incorrectly formatted. Line is: %+q", scanner.Text()) - } - - if entry != nil { - table.Append(*entry) - } - - } - if err := scanner.Err(); err != nil { - return err - } - - sha1Hash := hex.EncodeToString(hash.Sum(nil)) - log.Println("Using geoip file ", pathname, " with checksum", sha1Hash) - log.Println("Loaded ", table.Len(), " entries into table") - - return nil -} - -//Returns the country location of an IPv4 or IPv6 address, and a boolean value -//that indicates whether the IP address was present in the geoip database -func GetCountryByAddr(table GeoIPTable, ip net.IP) (string, bool) { - - table.Lock() - defer table.Unlock() - - //look IP up in database - index := sort.Search(table.Len(), func(i int) bool { - entry := table.ElementAt(i) - return (bytes.Compare(ip.To16(), entry.ipHigh.To16()) <= 0) - }) - - if index == table.Len() { - return "", false - } - - // check to see if addr is in the range specified by the returned index - // search on IPs in invalid ranges (e.g., 127.0.0.0/8) will return the - //country code of the next highest range - entry := table.ElementAt(index) - if !(bytes.Compare(ip.To16(), entry.ipLow.To16()) >= 0 && - bytes.Compare(ip.To16(), entry.ipHigh.To16()) <= 0) { - return "", false - } - - return table.ElementAt(index).country, true - -} diff --git a/broker/http.go b/broker/http.go new file mode 100644 index 0000000..ed8e24a --- /dev/null +++ b/broker/http.go @@ -0,0 +1,259 @@ +package main + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log" + "net/http" + "os" + "time" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util" +) + +const ( + readLimit = 100000 // Maximum number of bytes to be read from an HTTP request +) + +// Implements the http.Handler interface +type SnowflakeHandler struct { + *IPC + handle func(*IPC, http.ResponseWriter, *http.Request) +} + +func (sh SnowflakeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Session-ID") + // Return early if it's CORS preflight. + if "OPTIONS" == r.Method { + return + } + sh.handle(sh.IPC, w, r) +} + +// Implements the http.Handler interface +type MetricsHandler struct { + logFilename string + handle func(string, http.ResponseWriter, *http.Request) +} + +func (mh MetricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Session-ID") + // Return early if it's CORS preflight. + if "OPTIONS" == r.Method { + return + } + mh.handle(mh.logFilename, w, r) +} + +func robotsTxtHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + if _, err := w.Write([]byte("User-agent: *\nDisallow: /\n")); err != nil { + log.Printf("robotsTxtHandler unable to write, with this error: %v", err) + } +} + +func metricsHandler(metricsFilename string, w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + + if metricsFilename == "" { + http.NotFound(w, r) + return + } + metricsFile, err := os.OpenFile(metricsFilename, os.O_RDONLY, 0644) + if err != nil { + log.Println("Error opening metrics file for reading") + http.NotFound(w, r) + return + } + + if _, err := io.Copy(w, metricsFile); err != nil { + log.Printf("copying metricsFile returned error: %v", err) + } +} + +func debugHandler(i *IPC, w http.ResponseWriter, r *http.Request) { + var response string + + err := i.Debug(new(interface{}), &response) + if err != nil { + log.Println(err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + if _, err := w.Write([]byte(response)); err != nil { + log.Printf("writing proxy information returned error: %v ", err) + } +} + +/* +For snowflake proxies to request a client from the Broker. +*/ +func proxyPolls(i *IPC, w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, readLimit)) + if err != nil { + log.Println("Invalid data.", err.Error()) + w.WriteHeader(http.StatusBadRequest) + return + } + + arg := messages.Arg{ + Body: body, + RemoteAddr: util.GetClientIp(r), + } + + var response []byte + err = i.ProxyPolls(arg, &response) + switch { + case err == nil: + case errors.Is(err, messages.ErrBadRequest): + w.WriteHeader(http.StatusBadRequest) + return + case errors.Is(err, messages.ErrInternal): + fallthrough + default: + log.Println(err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + if _, err := w.Write(response); err != nil { + log.Printf("proxyPolls unable to write offer with error: %v", err) + } +} + +/* +Expects a WebRTC SDP offer in the Request to give to an assigned +snowflake proxy, which responds with the SDP answer to be sent in +the HTTP response back to the client. +*/ +func clientOffers(i *IPC, w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithTimeout(r.Context(), ClientTimeout*time.Second) + defer cancel() + + body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, readLimit)) + if err != nil { + log.Printf("Error reading client request: %s", err.Error()) + w.WriteHeader(http.StatusBadRequest) + return + } + + // Handle the legacy version + // + // We support two client message formats. The legacy format is for backwards + // compatability and relies heavily on HTTP headers and status codes to convey + // information. + isLegacy := false + if len(body) > 0 && body[0] == '{' { + isLegacy = true + req := messages.ClientPollRequest{ + Offer: string(body), + NAT: r.Header.Get("Snowflake-NAT-Type"), + } + body, err = req.EncodeClientPollRequest() + if err != nil { + log.Printf("Error shimming the legacy request: %s", err.Error()) + w.WriteHeader(http.StatusInternalServerError) + return + } + } + + arg := messages.Arg{ + Body: body, + RemoteAddr: util.GetClientIp(r), + RendezvousMethod: messages.RendezvousHttp, + Context: ctx, + } + + var response []byte + err = i.ClientOffers(arg, &response) + if err != nil { + log.Println(err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + if isLegacy { + resp, err := messages.DecodeClientPollResponse(response) + if err != nil { + log.Println(err) + w.WriteHeader(http.StatusInternalServerError) + return + } + switch resp.Error { + case "": + response = []byte(resp.Answer) + case messages.StrNoProxies: + w.WriteHeader(http.StatusServiceUnavailable) + return + case messages.StrTimedOut: + w.WriteHeader(http.StatusGatewayTimeout) + return + default: + panic("unknown error") + } + } + + if _, err := w.Write(response); err != nil { + log.Printf("clientOffers unable to write answer with error: %v", err) + } +} + +/* +Expects snowflake proxies which have previously successfully received +an offer from proxyHandler to respond with an answer in an HTTP POST, +which the broker will pass back to the original client. +*/ +func proxyAnswers(i *IPC, w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, readLimit)) + if err != nil { + log.Println("Invalid data.", err.Error()) + w.WriteHeader(http.StatusBadRequest) + return + } + + err = validateSDP(body) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + arg := messages.Arg{ + Body: body, + RemoteAddr: util.GetClientIp(r), + } + + var response []byte + err = i.ProxyAnswers(arg, &response) + switch { + case err == nil: + case errors.Is(err, messages.ErrBadRequest): + w.WriteHeader(http.StatusBadRequest) + return + case errors.Is(err, messages.ErrInternal): + fallthrough + default: + log.Println(err) + w.WriteHeader(http.StatusInternalServerError) + return + } + + if _, err := w.Write(response); err != nil { + log.Printf("proxyAnswers unable to write answer response with error: %v", err) + } +} + +func validateSDP(SDP []byte) error { + // TODO: more validation likely needed + if !bytes.Contains(SDP, []byte("a=candidate")) { + return fmt.Errorf("SDP contains no candidate") + } + + return nil +} diff --git a/broker/ipc.go b/broker/ipc.go new file mode 100644 index 0000000..d58baec --- /dev/null +++ b/broker/ipc.go @@ -0,0 +1,272 @@ +package main + +import ( + "container/heap" + "encoding/hex" + "fmt" + "log" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/constants" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util" + + "github.com/prometheus/client_golang/prometheus" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages" +) + +const ( + ClientTimeout = constants.BrokerClientTimeout + ProxyTimeout = 10 + + NATUnknown = "unknown" + NATRestricted = "restricted" + NATUnrestricted = "unrestricted" +) + +type IPC struct { + ctx *BrokerContext +} + +func (i *IPC) Debug(_ interface{}, response *string) error { + var unknowns int + var natRestricted, natUnrestricted, natUnknown int + proxyTypes := make(map[string]int) + + i.ctx.snowflakeLock.Lock() + s := fmt.Sprintf("current snowflakes available: %d\n", len(i.ctx.idToSnowflake)) + for _, snowflake := range i.ctx.idToSnowflake { + if messages.KnownProxyTypes[snowflake.proxyType] { + proxyTypes[snowflake.proxyType]++ + } else { + unknowns++ + } + + switch snowflake.natType { + case NATRestricted: + natRestricted++ + case NATUnrestricted: + natUnrestricted++ + default: + natUnknown++ + } + + } + i.ctx.snowflakeLock.Unlock() + + for pType, num := range proxyTypes { + s += fmt.Sprintf("\t%s proxies: %d\n", pType, num) + } + s += fmt.Sprintf("\tunknown proxies: %d", unknowns) + + s += fmt.Sprintf("\nNAT Types available:") + s += fmt.Sprintf("\n\trestricted: %d", natRestricted) + s += fmt.Sprintf("\n\tunrestricted: %d", natUnrestricted) + s += fmt.Sprintf("\n\tunknown: %d", natUnknown) + + *response = s + return nil +} + +func (i *IPC) ProxyPolls(arg messages.Arg, response *[]byte) error { + sid, proxyType, natType, clients, relayPattern, relayPatternSupported, err := messages.DecodeProxyPollRequestWithRelayPrefix(arg.Body) + if err != nil { + return messages.ErrBadRequest + } + + if !relayPatternSupported { + i.ctx.metrics.IncrementCounter("proxy-poll-without-relay-url") + i.ctx.metrics.promMetrics.ProxyPollWithoutRelayURLExtensionTotal.With(prometheus.Labels{"nat": natType, "type": proxyType}).Inc() + } else { + i.ctx.metrics.IncrementCounter("proxy-poll-with-relay-url") + i.ctx.metrics.promMetrics.ProxyPollWithRelayURLExtensionTotal.With(prometheus.Labels{"nat": natType, "type": proxyType}).Inc() + } + + if !i.ctx.CheckProxyRelayPattern(relayPattern, !relayPatternSupported) { + i.ctx.metrics.IncrementCounter("proxy-poll-rejected-relay-url") + i.ctx.metrics.promMetrics.ProxyPollRejectedForRelayURLExtensionTotal.With(prometheus.Labels{"nat": natType, "type": proxyType}).Inc() + + b, err := messages.EncodePollResponseWithRelayURL("", false, "", "", "incorrect relay pattern") + *response = b + if err != nil { + return messages.ErrInternal + } + return nil + } + + // Log geoip stats + remoteIP := arg.RemoteAddr + if err != nil { + log.Println("Warning: cannot process proxy IP: ", err.Error()) + } else { + i.ctx.metrics.UpdateProxyStats(remoteIP, proxyType, natType) + } + + var b []byte + + // Wait for a client to avail an offer to the snowflake, or timeout if nil. + offer := i.ctx.RequestOffer(sid, proxyType, natType, clients) + + if offer == nil { + i.ctx.metrics.IncrementCounter("proxy-idle") + i.ctx.metrics.promMetrics.ProxyPollTotal.With(prometheus.Labels{"nat": natType, "type": proxyType, "status": "idle"}).Inc() + + b, err = messages.EncodePollResponse("", false, "") + if err != nil { + return messages.ErrInternal + } + + *response = b + return nil + } + + i.ctx.metrics.promMetrics.ProxyPollTotal.With(prometheus.Labels{"nat": natType, "type": proxyType, "status": "matched"}).Inc() + var relayURL string + bridgeFingerprint, err := bridgefingerprint.FingerprintFromBytes(offer.fingerprint) + if err != nil { + return messages.ErrBadRequest + } + if info, err := i.ctx.bridgeList.GetBridgeInfo(bridgeFingerprint); err != nil { + return err + } else { + relayURL = info.WebSocketAddress + } + b, err = messages.EncodePollResponseWithRelayURL(string(offer.sdp), true, offer.natType, relayURL, "") + if err != nil { + return messages.ErrInternal + } + *response = b + + return nil +} + +func sendClientResponse(resp *messages.ClientPollResponse, response *[]byte) error { + data, err := resp.EncodePollResponse() + if err != nil { + log.Printf("error encoding answer") + return messages.ErrInternal + } else { + *response = []byte(data) + return nil + } +} + +func (i *IPC) ClientOffers(arg messages.Arg, response *[]byte) error { + + req, err := messages.DecodeClientPollRequest(arg.Body) + if err != nil { + return sendClientResponse(&messages.ClientPollResponse{Error: err.Error()}, response) + } + + // If we couldn't extract the remote IP from the rendezvous method + // pull it from the offer SDP + remoteAddr := arg.RemoteAddr + if remoteAddr == "" { + sdp, err := util.DeserializeSessionDescription(req.Offer) + if err == nil { + candidateAddrs := util.GetCandidateAddrs(sdp.SDP) + if len(candidateAddrs) > 0 { + remoteAddr = candidateAddrs[0].String() + } + } + } + + offer := &ClientOffer{ + natType: req.NAT, + sdp: []byte(req.Offer), + } + + fingerprint, err := hex.DecodeString(req.Fingerprint) + if err != nil { + return sendClientResponse(&messages.ClientPollResponse{Error: err.Error()}, response) + } + + BridgeFingerprint, err := bridgefingerprint.FingerprintFromBytes(fingerprint) + if err != nil { + return sendClientResponse(&messages.ClientPollResponse{Error: err.Error()}, response) + } + + if _, err := i.ctx.GetBridgeInfo(BridgeFingerprint); err != nil { + return sendClientResponse( + &messages.ClientPollResponse{Error: err.Error()}, + response, + ) + } + + offer.fingerprint = BridgeFingerprint.ToBytes() + + snowflake := i.matchSnowflake(offer.natType) + if snowflake != nil { + snowflake.offerChannel <- offer + } else { + i.ctx.metrics.UpdateClientStats(remoteAddr, arg.RendezvousMethod, offer.natType, "denied") + resp := &messages.ClientPollResponse{Error: messages.StrNoProxies} + return sendClientResponse(resp, response) + } + + // Wait for the answer to be returned on the channel or timeout. + select { + case answer := <-snowflake.answerChannel: + i.ctx.metrics.UpdateClientStats(remoteAddr, arg.RendezvousMethod, offer.natType, "matched") + resp := &messages.ClientPollResponse{Answer: answer} + err = sendClientResponse(resp, response) + case <-arg.Context.Done(): + i.ctx.metrics.UpdateClientStats(remoteAddr, arg.RendezvousMethod, offer.natType, "timeout") + resp := &messages.ClientPollResponse{Error: messages.StrTimedOut} + err = sendClientResponse(resp, response) + } + + i.ctx.snowflakeLock.Lock() + i.ctx.metrics.promMetrics.AvailableProxies.With(prometheus.Labels{"nat": snowflake.natType, "type": snowflake.proxyType}).Dec() + delete(i.ctx.idToSnowflake, snowflake.id) + i.ctx.snowflakeLock.Unlock() + + return err +} + +func (i *IPC) matchSnowflake(natType string) *Snowflake { + i.ctx.snowflakeLock.Lock() + defer i.ctx.snowflakeLock.Unlock() + + // Proiritize known restricted snowflakes for unrestricted clients + if natType == NATUnrestricted && i.ctx.restrictedSnowflakes.Len() > 0 { + return heap.Pop(i.ctx.restrictedSnowflakes).(*Snowflake) + } + + if i.ctx.snowflakes.Len() > 0 { + return heap.Pop(i.ctx.snowflakes).(*Snowflake) + } + + return nil +} + +func (i *IPC) ProxyAnswers(arg messages.Arg, response *[]byte) error { + answer, id, err := messages.DecodeAnswerRequest(arg.Body) + if err != nil || answer == "" { + return messages.ErrBadRequest + } + + var success = true + i.ctx.snowflakeLock.Lock() + snowflake, ok := i.ctx.idToSnowflake[id] + i.ctx.snowflakeLock.Unlock() + if !ok || snowflake == nil { + // The snowflake took too long to respond with an answer, so its client + // disappeared / the snowflake is no longer recognized by the Broker. + success = false + i.ctx.metrics.promMetrics.ProxyAnswerTotal.With(prometheus.Labels{"type": "", "status": "timeout"}).Inc() + } + + b, err := messages.EncodeAnswerResponse(success) + if err != nil { + log.Printf("Error encoding answer: %s", err.Error()) + return messages.ErrInternal + } + *response = b + + if success { + i.ctx.metrics.promMetrics.ProxyAnswerTotal.With(prometheus.Labels{"type": snowflake.proxyType, "status": "success"}).Inc() + snowflake.answerChannel <- answer + } + + return nil +} diff --git a/broker/metrics.go b/broker/metrics.go index ffa61e2..c1d4e4a 100644 --- a/broker/metrics.go +++ b/broker/metrics.go @@ -1,198 +1,390 @@ /* -We export metrics in the following format: - - "snowflake-stats-end" YYYY-MM-DD HH:MM:SS (NSEC s) NL - [At most once.] - - YYYY-MM-DD HH:MM:SS defines the end of the included measurement - interval of length NSEC seconds (86400 seconds by default). - - "snowflake-ips" CC=NUM,CC=NUM,... NL - [At most once.] - - List of mappings from two-letter country codes to the number of - unique IP addresses of snowflake proxies that have polled. - - "snowflake-ips-total" NUM NL - [At most once.] - - A count of the total number of unique IP addresses of snowflake - proxies that have polled. - - "snowflake-idle-count" NUM NL - [At most once.] - - A count of the number of times a proxy has polled but received - no client offer, rounded up to the nearest multiple of 8. - - "client-denied-count" NUM NL - [At most once.] - - A count of the number of times a client has requested a proxy - from the broker but no proxies were available, rounded up to - the nearest multiple of 8. - - "client-snowflake-match-count" NUM NL - [At most once.] - - A count of the number of times a client successfully received a - proxy from the broker, rounded up to the nearest multiple of 8. +We export metrics in the format specified in our broker spec: +https://gitweb.torproject.org/pluggable-transports/snowflake.git/tree/doc/broker-spec.txt */ package main import ( - // "golang.org/x/net/internal/timeseries" "fmt" "log" - "math" "net" + "sort" + "strings" "sync" + "sync/atomic" "time" + + "github.com/prometheus/client_golang/prometheus" + "gitlab.torproject.org/tpo/anti-censorship/geoip" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safeprom" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages" ) -var ( - once sync.Once +const ( + prometheusNamespace = "snowflake" + metricsResolution = 60 * 60 * 24 * time.Second //86400 seconds ) -const metricsResolution = 60 * 60 * 24 * time.Second //86400 seconds - -type CountryStats struct { - addrs map[string]bool - counts map[string]int -} - -// Implements Observable type Metrics struct { logger *log.Logger - tablev4 *GeoIPv4Table - tablev6 *GeoIPv6Table + geoipdb *geoip.Geoip - countryStats CountryStats - clientRoundtripEstimate time.Duration - proxyIdleCount uint - clientDeniedCount uint - clientProxyMatchCount uint -} + ips *sync.Map // proxy IP addresses we've seen before + counters *sync.Map // counters for ip-based metrics -func (s CountryStats) Display() string { - output := "" - for cc, count := range s.counts { - output += fmt.Sprintf("%s=%d,", cc, count) - } + // counters for country-based metrics + proxies *sync.Map // ip-based counts of proxy country codes + clientHTTPPolls *sync.Map // poll-based counts of client HTTP rendezvous + clientAMPPolls *sync.Map // poll-based counts of client AMP cache rendezvous + clientSQSPolls *sync.Map // poll-based counts of client SQS rendezvous - // cut off trailing "," - if len(output) > 0 { - return output[:len(output)-1] - } - - return output -} - -func (m *Metrics) UpdateCountryStats(addr string) { - - var country string - var ok bool - - if m.countryStats.addrs[addr] { - return - } - - ip := net.ParseIP(addr) - if ip.To4() != nil { - //This is an IPv4 address - if m.tablev4 == nil { - return - } - country, ok = GetCountryByAddr(m.tablev4, ip) - } else { - if m.tablev6 == nil { - return - } - country, ok = GetCountryByAddr(m.tablev6, ip) - } - - if !ok { - country = "??" - log.Println("Unknown geoip") - } - - //update map of unique ips and counts - m.countryStats.counts[country]++ - m.countryStats.addrs[addr] = true - - return -} - -func (m *Metrics) LoadGeoipDatabases(geoipDB string, geoip6DB string) error { - - // Load geoip databases - log.Println("Loading geoip databases") - tablev4 := new(GeoIPv4Table) - err := GeoIPLoadFile(tablev4, geoipDB) - if err != nil { - m.tablev4 = nil - return err - } else { - m.tablev4 = tablev4 - } - - tablev6 := new(GeoIPv6Table) - err = GeoIPLoadFile(tablev6, geoip6DB) - if err != nil { - m.tablev6 = nil - return err - } else { - m.tablev6 = tablev6 - } - - return nil + promMetrics *PromMetrics } func NewMetrics(metricsLogger *log.Logger) (*Metrics, error) { m := new(Metrics) - m.countryStats = CountryStats{ - counts: make(map[string]int), - addrs: make(map[string]bool), - } - m.logger = metricsLogger + m.promMetrics = initPrometheus() + m.ips = new(sync.Map) + m.counters = new(sync.Map) + m.proxies = new(sync.Map) + m.clientHTTPPolls = new(sync.Map) + m.clientAMPPolls = new(sync.Map) + m.clientSQSPolls = new(sync.Map) - // Write to log file every hour with updated metrics - go once.Do(m.logMetrics) + // Write to log file every day with updated metrics + go m.logMetrics() return m, nil } +func incrementMapCounter(counters *sync.Map, key string) { + start := uint64(1) + val, loaded := counters.LoadOrStore(key, &start) + if loaded { + ptr := val.(*uint64) + atomic.AddUint64(ptr, 1) + } +} + +func (m *Metrics) IncrementCounter(key string) { + incrementMapCounter(m.counters, key) +} + +func (m *Metrics) UpdateProxyStats(addr string, proxyType string, natType string) { + + // perform geolocation of IP address + ip := net.ParseIP(addr) + if m.geoipdb == nil { + return + } + country, ok := m.geoipdb.GetCountryByAddr(ip) + if !ok { + country = "??" + } + + // check whether we've seen this proxy ip before + if _, loaded := m.ips.LoadOrStore(addr, true); !loaded { + m.IncrementCounter("proxy-total") + incrementMapCounter(m.proxies, country) + m.promMetrics.ProxyTotal.With(prometheus.Labels{ + "nat": natType, + "type": proxyType, + "cc": country, + }).Inc() + } + + // update unique IP proxy NAT metrics + key := fmt.Sprintf("%s-%s", addr, natType) + if _, loaded := m.ips.LoadOrStore(key, true); !loaded { + switch natType { + case NATRestricted: + m.IncrementCounter("proxy-nat-restricted") + case NATUnrestricted: + m.IncrementCounter("proxy-nat-unrestricted") + default: + m.IncrementCounter("proxy-nat-unknown") + } + } + // update unique IP proxy type metrics + key = fmt.Sprintf("%s-%s", addr, proxyType) + if _, loaded := m.ips.LoadOrStore(key, true); !loaded { + switch proxyType { + case "standalone": + m.IncrementCounter("proxy-standalone") + case "badge": + m.IncrementCounter("proxy-badge") + case "iptproxy": + m.IncrementCounter("proxy-iptproxy") + case "webext": + m.IncrementCounter("proxy-webext") + } + } +} + +func (m *Metrics) UpdateClientStats(addr string, rendezvousMethod messages.RendezvousMethod, natType, status string) { + ip := net.ParseIP(addr) + country := "??" + if m.geoipdb != nil { + country_by_addr, ok := m.geoipdb.GetCountryByAddr(ip) + if ok { + country = country_by_addr + } + } + + switch status { + case "denied": + m.IncrementCounter("client-denied") + if natType == NATUnrestricted { + m.IncrementCounter("client-unrestricted-denied") + } else { + m.IncrementCounter("client-restricted-denied") + } + case "matched": + m.IncrementCounter("client-match") + case "timeout": + m.IncrementCounter("client-timeout") + default: + log.Printf("Unknown rendezvous status: %s", status) + } + + switch rendezvousMethod { + case messages.RendezvousHttp: + m.IncrementCounter("client-http") + incrementMapCounter(m.clientHTTPPolls, country) + case messages.RendezvousAmpCache: + m.IncrementCounter("client-amp") + incrementMapCounter(m.clientAMPPolls, country) + case messages.RendezvousSqs: + m.IncrementCounter("client-sqs") + incrementMapCounter(m.clientSQSPolls, country) + } + m.promMetrics.ClientPollTotal.With(prometheus.Labels{ + "nat": natType, + "status": status, + "rendezvous_method": string(rendezvousMethod), + "cc": country, + }).Inc() +} + +// Types to facilitate sorting in formatAndClearCountryStats. +type record struct { + cc string + count uint64 +} +type records []record + +// Implementation of sort.Interface for records. The ordering is lexicographic: +// first by count (descending), then by cc (ascending). +func (r records) Len() int { return len(r) } +func (r records) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r records) Less(i, j int) bool { + return r[i].count > r[j].count || (r[i].count == r[j].count && r[i].cc < r[j].cc) +} + +// formatAndClearCountryStats takes a map from country codes to counts, and +// returns a formatted string of comma-separated CC=COUNT. Entries are sorted by +// count from largest to smallest. When counts are equal, entries are sorted by +// country code in ascending order. +// +// formatAndClearCountryStats has the side effect of deleting all entries in m. +func formatAndClearCountryStats(m *sync.Map, binned bool) string { + // Extract entries from the map into a slice of records, binning counts + // if asked to. + rs := records{} + m.Range(func(cc, countPtr any) bool { + count := *countPtr.(*uint64) + if binned { + count = binCount(count) + } + rs = append(rs, record{cc: cc.(string), count: count}) + m.Delete(cc) + return true + }) + // Sort the records. + sort.Sort(rs) + // Format and concatenate. + var output strings.Builder + for i, r := range rs { + if i != 0 { + output.WriteString(",") + } + fmt.Fprintf(&output, "%s=%d", r.cc, r.count) + } + return output.String() +} + +func (m *Metrics) LoadGeoipDatabases(geoipDB string, geoip6DB string) error { + + // Load geoip databases + var err error + log.Println("Loading geoip databases") + m.geoipdb, err = geoip.New(geoipDB, geoip6DB) + return err +} + // Logs metrics in intervals specified by metricsResolution func (m *Metrics) logMetrics() { heartbeat := time.Tick(metricsResolution) for range heartbeat { m.printMetrics() - m.zeroMetrics() } } +func (m *Metrics) loadAndZero(key string) uint64 { + count, loaded := m.counters.LoadAndDelete(key) + if !loaded { + count = new(uint64) + } + ptr := count.(*uint64) + return *ptr +} + func (m *Metrics) printMetrics() { - m.logger.Println("snowflake-stats-end", time.Now().UTC().Format("2006-01-02 15:04:05"), fmt.Sprintf("(%d s)", int(metricsResolution.Seconds()))) - m.logger.Println("snowflake-ips", m.countryStats.Display()) - m.logger.Println("snowflake-ips-total", len(m.countryStats.addrs)) - m.logger.Println("snowflake-idle-count", binCount(m.proxyIdleCount)) - m.logger.Println("client-denied-count", binCount(m.clientDeniedCount)) - m.logger.Println("client-snowflake-match-count", binCount(m.clientProxyMatchCount)) + m.logger.Println( + "snowflake-stats-end", + time.Now().UTC().Format("2006-01-02 15:04:05"), + fmt.Sprintf("(%d s)", int(metricsResolution.Seconds())), + ) + m.logger.Println("snowflake-ips", formatAndClearCountryStats(m.proxies, false)) + m.logger.Printf("snowflake-ips-iptproxy %d\n", m.loadAndZero("proxy-iptproxy")) + m.logger.Printf("snowflake-ips-standalone %d\n", m.loadAndZero("proxy-standalone")) + m.logger.Printf("snowflake-ips-webext %d\n", m.loadAndZero("proxy-webext")) + m.logger.Printf("snowflake-ips-badge %d\n", m.loadAndZero("proxy-badge")) + m.logger.Println("snowflake-ips-total", m.loadAndZero("proxy-total")) + m.logger.Println("snowflake-idle-count", binCount(m.loadAndZero("proxy-idle"))) + m.logger.Println("snowflake-proxy-poll-with-relay-url-count", binCount(m.loadAndZero("proxy-poll-with-relay-url"))) + m.logger.Println("snowflake-proxy-poll-without-relay-url-count", binCount(m.loadAndZero("proxy-poll-without-relay-url"))) + m.logger.Println("snowflake-proxy-rejected-for-relay-url-count", binCount(m.loadAndZero("proxy-poll-rejected-relay-url"))) + + m.logger.Println("client-denied-count", binCount(m.loadAndZero("client-denied"))) + m.logger.Println("client-restricted-denied-count", binCount(m.loadAndZero("client-restricted-denied"))) + m.logger.Println("client-unrestricted-denied-count", binCount(m.loadAndZero("client-unrestricted-denied"))) + m.logger.Println("client-snowflake-match-count", binCount(m.loadAndZero("client-match"))) + m.logger.Println("client-snowflake-timeout-count", binCount(m.loadAndZero("client-timeout"))) + + m.logger.Printf("client-http-count %d\n", binCount(m.loadAndZero("client-http"))) + m.logger.Printf("client-http-ips %s\n", formatAndClearCountryStats(m.clientHTTPPolls, true)) + m.logger.Printf("client-ampcache-count %d\n", binCount(m.loadAndZero("client-amp"))) + m.logger.Printf("client-ampcache-ips %s\n", formatAndClearCountryStats(m.clientAMPPolls, true)) + m.logger.Printf("client-sqs-count %d\n", binCount(m.loadAndZero("client-sqs"))) + m.logger.Printf("client-sqs-ips %s\n", formatAndClearCountryStats(m.clientSQSPolls, true)) + + m.logger.Println("snowflake-ips-nat-restricted", m.loadAndZero("proxy-nat-restricted")) + m.logger.Println("snowflake-ips-nat-unrestricted", m.loadAndZero("proxy-nat-unrestricted")) + m.logger.Println("snowflake-ips-nat-unknown", m.loadAndZero("proxy-nat-unknown")) + + m.ips.Clear() } -// Restores all metrics to original values -func (m *Metrics) zeroMetrics() { - m.proxyIdleCount = 0 - m.clientDeniedCount = 0 - m.clientProxyMatchCount = 0 - m.countryStats.counts = make(map[string]int) - m.countryStats.addrs = make(map[string]bool) +// binCount rounds count up to the next multiple of 8. Returns 0 on integer +// overflow. +func binCount(count uint64) uint64 { + return (count + 7) / 8 * 8 } -// Rounds up a count to the nearest multiple of 8. -func binCount(count uint) uint { - return uint((math.Ceil(float64(count) / 8)) * 8) +type PromMetrics struct { + registry *prometheus.Registry + ProxyTotal *prometheus.CounterVec + ProxyPollTotal *safeprom.CounterVec + ClientPollTotal *safeprom.CounterVec + ProxyAnswerTotal *safeprom.CounterVec + AvailableProxies *prometheus.GaugeVec + + ProxyPollWithRelayURLExtensionTotal *safeprom.CounterVec + ProxyPollWithoutRelayURLExtensionTotal *safeprom.CounterVec + + ProxyPollRejectedForRelayURLExtensionTotal *safeprom.CounterVec +} + +// Initialize metrics for prometheus exporter +func initPrometheus() *PromMetrics { + promMetrics := &PromMetrics{} + + promMetrics.registry = prometheus.NewRegistry() + + promMetrics.ProxyTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "proxy_total", + Help: "The number of unique snowflake IPs", + }, + []string{"type", "nat", "cc"}, + ) + + promMetrics.AvailableProxies = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: prometheusNamespace, + Name: "available_proxies", + Help: "The number of currently available snowflake proxies", + }, + []string{"type", "nat"}, + ) + + promMetrics.ProxyPollTotal = safeprom.NewCounterVec( + prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "rounded_proxy_poll_total", + Help: "The number of snowflake proxy polls, rounded up to a multiple of 8", + }, + []string{"nat", "type", "status"}, + ) + + promMetrics.ProxyAnswerTotal = safeprom.NewCounterVec( + prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "rounded_proxy_answer_total", + Help: "The number of snowflake proxy answers, rounded up to a multiple of 8", + }, + []string{"type", "status"}, + ) + + promMetrics.ProxyPollWithRelayURLExtensionTotal = safeprom.NewCounterVec( + prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "rounded_proxy_poll_with_relay_url_extension_total", + Help: "The number of snowflake proxy polls with Relay URL Extension, rounded up to a multiple of 8", + }, + []string{"nat", "type"}, + ) + + promMetrics.ProxyPollWithoutRelayURLExtensionTotal = safeprom.NewCounterVec( + prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "rounded_proxy_poll_without_relay_url_extension_total", + Help: "The number of snowflake proxy polls without Relay URL Extension, rounded up to a multiple of 8", + }, + []string{"nat", "type"}, + ) + + promMetrics.ProxyPollRejectedForRelayURLExtensionTotal = safeprom.NewCounterVec( + prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "rounded_proxy_poll_rejected_relay_url_extension_total", + Help: "The number of snowflake proxy polls rejected by Relay URL Extension, rounded up to a multiple of 8", + }, + []string{"nat", "type"}, + ) + + promMetrics.ClientPollTotal = safeprom.NewCounterVec( + prometheus.CounterOpts{ + Namespace: prometheusNamespace, + Name: "rounded_client_poll_total", + Help: "The number of snowflake client polls, rounded up to a multiple of 8", + }, + []string{"nat", "status", "cc", "rendezvous_method"}, + ) + + // We need to register our metrics so they can be exported. + promMetrics.registry.MustRegister( + promMetrics.ClientPollTotal, promMetrics.ProxyPollTotal, + promMetrics.ProxyTotal, promMetrics.ProxyAnswerTotal, promMetrics.AvailableProxies, + promMetrics.ProxyPollWithRelayURLExtensionTotal, + promMetrics.ProxyPollWithoutRelayURLExtensionTotal, + promMetrics.ProxyPollRejectedForRelayURLExtensionTotal, + ) + + return promMetrics } diff --git a/broker/metrics_test.go b/broker/metrics_test.go new file mode 100644 index 0000000..fad1958 --- /dev/null +++ b/broker/metrics_test.go @@ -0,0 +1,47 @@ +package main + +import ( + "sync" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestFormatAndClearCountryStats(t *testing.T) { + Convey("given a mapping of country stats", t, func() { + stats := new(sync.Map) + for _, record := range []struct { + cc string + count uint64 + }{ + {"IT", 50}, + {"FR", 200}, + {"TZ", 100}, + {"CN", 250}, + {"RU", 150}, + {"CA", 1}, + {"BE", 1}, + {"PH", 1}, + // The next 3 bin to the same value, 112. When not + // binned, they should go in the order MY,ZA,AT (ordered + // by count). When binned, they should go in the order + // AT,MY,ZA (ordered by country code). + {"AT", 105}, + {"MY", 112}, + {"ZA", 108}, + } { + stats.Store(record.cc, &record.count) + } + + Convey("the order should be correct with binned=false", func() { + So(formatAndClearCountryStats(stats, false), ShouldEqual, "CN=250,FR=200,RU=150,MY=112,ZA=108,AT=105,TZ=100,IT=50,BE=1,CA=1,PH=1") + }) + + Convey("the order should be correct with binned=true", func() { + So(formatAndClearCountryStats(stats, true), ShouldEqual, "CN=256,FR=200,RU=152,AT=112,MY=112,ZA=112,TZ=104,IT=56,BE=8,CA=8,PH=8") + }) + + // The map should be cleared on return. + stats.Range(func(_, _ any) bool { panic("map was not cleared") }) + }) +} diff --git a/broker/snowflake-broker_test.go b/broker/snowflake-broker_test.go index 5afbe33..3eb4ba5 100644 --- a/broker/snowflake-broker_test.go +++ b/broker/snowflake-broker_test.go @@ -3,32 +3,103 @@ package main import ( "bytes" "container/heap" - . "github.com/smartystreets/goconvey/convey" - "io/ioutil" + "crypto/rand" + "encoding/base64" + "encoding/hex" + "fmt" + "io" "log" - "net" "net/http" "net/http/httptest" "os" + "strings" + "sync" "testing" "time" + + . "github.com/smartystreets/goconvey/convey" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/amp" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages" ) func NullLogger() *log.Logger { logger := log.New(os.Stdout, "", 0) - logger.SetOutput(ioutil.Discard) + logger.SetOutput(io.Discard) return logger } +var promOnce sync.Once + +var ( + sdp = "v=0\r\n" + + "o=- 123456789 987654321 IN IP4 0.0.0.0\r\n" + + "s=-\r\n" + + "t=0 0\r\n" + + "a=fingerprint:sha-256 12:34\r\n" + + "a=extmap-allow-mixed\r\n" + + "a=group:BUNDLE 0\r\n" + + "m=application 9 UDP/DTLS/SCTP webrtc-datachannel\r\n" + + "c=IN IP4 0.0.0.0\r\n" + + "a=setup:actpass\r\n" + + "a=mid:0\r\n" + + "a=sendrecv\r\n" + + "a=sctp-port:5000\r\n" + + "a=ice-ufrag:CoVEaiFXRGVzshXG\r\n" + + "a=ice-pwd:aOrOZXraTfFKzyeBxIXYYKjSgRVPGhUx\r\n" + + "a=candidate:1000 1 udp 2000 8.8.8.8 3000 typ host\r\n" + + "a=end-of-candidates\r\n" + + rawOffer = `{"type":"offer","sdp":"v=0\r\no=- 4358805017720277108 2 IN IP4 0.0.0.0\r\ns=-\r\nt=0 0\r\na=group:BUNDLE data\r\na=msid-semantic: WMS\r\nm=application 56688 DTLS/SCTP 5000\r\nc=IN IP4 0.0.0.0\r\na=candidate:3769337065 1 udp 2122260223 129.97.208.23 56688 typ host generation 0 network-id 1 network-cost 50\r\na=candidate:2921887769 1 tcp 1518280447 129.97.208.23 35441 typ host tcptype passive generation 0 network-id 1 network-cost 50\r\na=ice-ufrag:aMAZ\r\na=ice-pwd:jcHb08Jjgrazp2dzjdrvPPvV\r\na=ice-options:trickle\r\na=fingerprint:sha-256 C8:88:EE:B9:E7:02:2E:21:37:ED:7A:D1:EB:2B:A3:15:A2:3B:5B:1C:3D:D4:D5:1F:06:CF:52:40:03:F8:DD:66\r\na=setup:actpass\r\na=mid:data\r\na=sctpmap:5000 webrtc-datachannel 1024\r\n"}` + sid = "ymbcCMto7KHNGYlp" +) + +func createClientOffer(sdp, nat, fingerprint string) (*bytes.Reader, error) { + clientRequest := &messages.ClientPollRequest{ + Offer: sdp, + NAT: nat, + Fingerprint: fingerprint, + } + encOffer, err := clientRequest.EncodeClientPollRequest() + if err != nil { + return nil, err + } + offer := bytes.NewReader(encOffer) + return offer, nil +} + +func createProxyAnswer(sdp, sid string) (*bytes.Reader, error) { + proxyRequest, err := messages.EncodeAnswerRequest(sdp, sid) + if err != nil { + return nil, err + } + answer := bytes.NewReader(proxyRequest) + return answer, nil +} + +func decodeAMPArmorToString(r io.Reader) (string, error) { + dec, err := amp.NewArmorDecoder(r) + if err != nil { + return "", err + } + p, err := io.ReadAll(dec) + return string(p), err +} + func TestBroker(t *testing.T) { + defaultBridgeValue, _ := hex.DecodeString("2B280B23E1107BB62ABFC40DDCC8824814F80A72") + var defaultBridge [20]byte + copy(defaultBridge[:], defaultBridgeValue) + Convey("Context", t, func() { - ctx := NewBrokerContext(NullLogger()) + buf := new(bytes.Buffer) + ctx := NewBrokerContext(log.New(buf, "", 0), "snowflake.torproject.net") + i := &IPC{ctx} Convey("Adds Snowflake", func() { So(ctx.snowflakes.Len(), ShouldEqual, 0) So(len(ctx.idToSnowflake), ShouldEqual, 0) - ctx.AddSnowflake("foo") + ctx.AddSnowflake("foo", "", NATUnrestricted, 0) So(ctx.snowflakes.Len(), ShouldEqual, 1) So(len(ctx.idToSnowflake), ShouldEqual, 1) }) @@ -36,7 +107,8 @@ func TestBroker(t *testing.T) { Convey("Broker goroutine matches clients with proxies", func() { p := new(ProxyPoll) p.id = "test" - p.offerChannel = make(chan []byte) + p.natType = "unrestricted" + p.offerChannel = make(chan *ClientOffer) go func(ctx *BrokerContext) { ctx.proxyPolls <- p close(ctx.proxyPolls) @@ -44,51 +116,105 @@ func TestBroker(t *testing.T) { ctx.Broker() So(ctx.snowflakes.Len(), ShouldEqual, 1) snowflake := heap.Pop(ctx.snowflakes).(*Snowflake) - snowflake.offerChannel <- []byte("test offer") + snowflake.offerChannel <- &ClientOffer{sdp: []byte("test offer")} offer := <-p.offerChannel So(ctx.idToSnowflake["test"], ShouldNotBeNil) - So(offer, ShouldResemble, []byte("test offer")) + So(offer.sdp, ShouldResemble, []byte("test offer")) So(ctx.snowflakes.Len(), ShouldEqual, 0) }) Convey("Request an offer from the Snowflake Heap", func() { - done := make(chan []byte) + done := make(chan *ClientOffer) go func() { - offer := ctx.RequestOffer("test") + offer := ctx.RequestOffer("test", "", NATUnrestricted, 0) done <- offer }() request := <-ctx.proxyPolls - request.offerChannel <- []byte("test offer") + request.offerChannel <- &ClientOffer{sdp: []byte("test offer")} offer := <-done - So(offer, ShouldResemble, []byte("test offer")) + So(offer.sdp, ShouldResemble, []byte("test offer")) }) - Convey("Responds to client offers...", func() { + Convey("Responds to HTTP client offers...", func() { w := httptest.NewRecorder() - data := bytes.NewReader([]byte("test")) + data, err := createClientOffer(sdp, NATUnknown, "") r, err := http.NewRequest("POST", "snowflake.broker/client", data) So(err, ShouldBeNil) - Convey("with 503 when no snowflakes are available.", func() { - clientOffers(ctx, w, r) - So(w.Code, ShouldEqual, http.StatusServiceUnavailable) - So(w.Body.String(), ShouldEqual, "") + Convey("with error when no snowflakes are available.", func() { + clientOffers(i, w, r) + So(w.Code, ShouldEqual, http.StatusOK) + So(w.Body.String(), ShouldEqual, `{"error":"no snowflake proxies currently available"}`) + + // Ensure that denial is correctly recorded in metrics + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, `client-denied-count 8 +client-restricted-denied-count 8 +client-unrestricted-denied-count 0 +client-snowflake-match-count 0 +client-snowflake-timeout-count 0 +client-http-count 8 +client-http-ips ??=8 +client-ampcache-count 0 +client-ampcache-ips +client-sqs-count 0 +client-sqs-ips +`) }) Convey("with a proxy answer if available.", func() { done := make(chan bool) // Prepare a fake proxy to respond with. - snowflake := ctx.AddSnowflake("fake") + snowflake := ctx.AddSnowflake("test", "", NATUnrestricted, 0) go func() { - clientOffers(ctx, w, r) + clientOffers(i, w, r) done <- true }() offer := <-snowflake.offerChannel - So(offer, ShouldResemble, []byte("test")) - snowflake.answerChannel <- []byte("fake answer") + So(offer.sdp, ShouldResemble, []byte(sdp)) + snowflake.answerChannel <- "test answer" <-done - So(w.Body.String(), ShouldEqual, "fake answer") + So(w.Body.String(), ShouldEqual, `{"answer":"test answer"}`) So(w.Code, ShouldEqual, http.StatusOK) + + // Ensure that match is correctly recorded in metrics + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, `client-denied-count 0 +client-restricted-denied-count 0 +client-unrestricted-denied-count 0 +client-snowflake-match-count 8 +client-snowflake-timeout-count 0 +client-http-count 8 +client-http-ips ??=8 +client-ampcache-count 0 +client-ampcache-ips +client-sqs-count 0 +client-sqs-ips +`) + }) + + Convey("with unrestricted proxy to unrestricted client if there are no restricted proxies", func() { + snowflake := ctx.AddSnowflake("test", "", NATUnrestricted, 0) + offerData, err := createClientOffer(sdp, NATUnrestricted, "") + So(err, ShouldBeNil) + r, err := http.NewRequest("POST", "snowflake.broker/client", offerData) + + done := make(chan bool) + go func() { + clientOffers(i, w, r) + done <- true + }() + + select { + case <-snowflake.offerChannel: + case <-time.After(250 * time.Millisecond): + So(false, ShouldBeTrue) + return + } + snowflake.answerChannel <- "test answer" + + <-done + So(w.Body.String(), ShouldEqual, `{"answer":"test answer"}`) }) Convey("Times out when no proxy responds.", func() { @@ -96,154 +222,418 @@ func TestBroker(t *testing.T) { return } done := make(chan bool) - snowflake := ctx.AddSnowflake("fake") + snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0) go func() { - clientOffers(ctx, w, r) + clientOffers(i, w, r) // Takes a few seconds here... done <- true }() offer := <-snowflake.offerChannel - So(offer, ShouldResemble, []byte("test")) + So(offer.sdp, ShouldResemble, []byte(sdp)) + <-done + So(w.Code, ShouldEqual, http.StatusOK) + So(w.Body.String(), ShouldEqual, `{"error":"timed out waiting for answer!"}`) + }) + }) + + Convey("Responds to HTTP legacy client offers...", func() { + w := httptest.NewRecorder() + // legacy offer starts with { + offer := bytes.NewReader([]byte(fmt.Sprintf(`{%v}`, sdp))) + r, err := http.NewRequest("POST", "snowflake.broker/client", offer) + So(err, ShouldBeNil) + r.Header.Set("Snowflake-NAT-TYPE", "restricted") + + Convey("with 503 when no snowflakes are available.", func() { + clientOffers(i, w, r) + So(w.Code, ShouldEqual, http.StatusServiceUnavailable) + So(w.Body.String(), ShouldEqual, "") + + // Ensure that denial is correctly recorded in metrics + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, `client-denied-count 8 +client-restricted-denied-count 8 +client-unrestricted-denied-count 0 +client-snowflake-match-count 0 +client-snowflake-timeout-count 0 +client-http-count 8 +client-http-ips ??=8 +client-ampcache-count 0 +client-ampcache-ips +client-sqs-count 0 +client-sqs-ips +`) + }) + + Convey("with a proxy answer if available.", func() { + done := make(chan bool) + // Prepare a fake proxy to respond with. + snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0) + go func() { + clientOffers(i, w, r) + done <- true + }() + offer := <-snowflake.offerChannel + So(offer.sdp, ShouldResemble, []byte(fmt.Sprintf(`{%v}`, sdp))) + snowflake.answerChannel <- "fake answer" + <-done + So(w.Body.String(), ShouldEqual, "fake answer") + So(w.Code, ShouldEqual, http.StatusOK) + + // Ensure that match is correctly recorded in metrics + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, `client-denied-count 0 +client-restricted-denied-count 0 +client-unrestricted-denied-count 0 +client-snowflake-match-count 8 +client-snowflake-timeout-count 0 +client-http-count 8 +client-http-ips ??=8 +client-ampcache-count 0 +client-ampcache-ips +client-sqs-count 0 +client-sqs-ips +`) + }) + + Convey("Times out when no proxy responds.", func() { + if testing.Short() { + return + } + done := make(chan bool) + snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0) + go func() { + clientOffers(i, w, r) + // Takes a few seconds here... + done <- true + }() + offer := <-snowflake.offerChannel + So(offer.sdp, ShouldResemble, []byte(fmt.Sprintf(`{%v}`, sdp))) <-done So(w.Code, ShouldEqual, http.StatusGatewayTimeout) }) + + }) + + Convey("Responds to AMP client offers...", func() { + w := httptest.NewRecorder() + encPollReq := []byte("1.0\n{\"offer\": \"fake\", \"nat\": \"unknown\"}") + r, err := http.NewRequest("GET", "/amp/client/"+amp.EncodePath(encPollReq), nil) + So(err, ShouldBeNil) + + Convey("with status 200 when request is badly formatted.", func() { + r, err := http.NewRequest("GET", "/amp/client/bad", nil) + So(err, ShouldBeNil) + ampClientOffers(i, w, r) + body, err := decodeAMPArmorToString(w.Body) + So(err, ShouldBeNil) + So(body, ShouldEqual, `{"error":"cannot decode URL path"}`) + }) + + Convey("with error when no snowflakes are available.", func() { + ampClientOffers(i, w, r) + So(w.Code, ShouldEqual, http.StatusOK) + body, err := decodeAMPArmorToString(w.Body) + So(err, ShouldBeNil) + So(body, ShouldEqual, `{"error":"no snowflake proxies currently available"}`) + + // Ensure that denial is correctly recorded in metrics + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, `client-denied-count 8 +client-restricted-denied-count 8 +client-unrestricted-denied-count 0 +client-snowflake-match-count 0 +client-snowflake-timeout-count 0 +client-http-count 0 +client-http-ips +client-ampcache-count 8 +client-ampcache-ips ??=8 +client-sqs-count 0 +client-sqs-ips +`) + }) + + Convey("with a proxy answer if available.", func() { + done := make(chan bool) + // Prepare a fake proxy to respond with. + snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0) + go func() { + ampClientOffers(i, w, r) + done <- true + }() + offer := <-snowflake.offerChannel + So(offer.sdp, ShouldResemble, []byte("fake")) + snowflake.answerChannel <- "fake answer" + <-done + body, err := decodeAMPArmorToString(w.Body) + So(err, ShouldBeNil) + So(body, ShouldEqual, `{"answer":"fake answer"}`) + So(w.Code, ShouldEqual, http.StatusOK) + + // Ensure that match is correctly recorded in metrics + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, `client-denied-count 0 +client-restricted-denied-count 0 +client-unrestricted-denied-count 0 +client-snowflake-match-count 8 +client-snowflake-timeout-count 0 +client-http-count 0 +client-http-ips +client-ampcache-count 8 +client-ampcache-ips ??=8 +client-sqs-count 0 +client-sqs-ips +`) + }) + + Convey("Times out when no proxy responds.", func() { + if testing.Short() { + return + } + done := make(chan bool) + snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0) + go func() { + ampClientOffers(i, w, r) + // Takes a few seconds here... + done <- true + }() + offer := <-snowflake.offerChannel + So(offer.sdp, ShouldResemble, []byte("fake")) + <-done + So(w.Code, ShouldEqual, http.StatusOK) + body, err := decodeAMPArmorToString(w.Body) + So(err, ShouldBeNil) + So(body, ShouldEqual, `{"error":"timed out waiting for answer!"}`) + }) + + Convey("and correctly geolocates remote addr.", func() { + err := ctx.metrics.LoadGeoipDatabases("test_geoip", "test_geoip6") + So(err, ShouldBeNil) + clientRequest := &messages.ClientPollRequest{ + Offer: rawOffer, + NAT: NATUnknown, + Fingerprint: "", + } + encOffer, err := clientRequest.EncodeClientPollRequest() + So(err, ShouldBeNil) + r, err = http.NewRequest("GET", "/amp/client/"+amp.EncodePath(encOffer), nil) + So(err, ShouldBeNil) + ampClientOffers(i, w, r) + So(w.Code, ShouldEqual, http.StatusOK) + body, err := decodeAMPArmorToString(w.Body) + So(err, ShouldBeNil) + So(body, ShouldEqual, `{"error":"no snowflake proxies currently available"}`) + + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, `client-denied-count 8 +client-restricted-denied-count 8 +client-unrestricted-denied-count 0 +client-snowflake-match-count 0 +client-snowflake-timeout-count 0 +client-http-count 0 +client-http-ips +client-ampcache-count 8 +client-ampcache-ips CA=8 +client-sqs-count 0 +client-sqs-ips +`) + }) + }) Convey("Responds to proxy polls...", func() { done := make(chan bool) w := httptest.NewRecorder() - data := bytes.NewReader([]byte("test")) + data := bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0", "AcceptedRelayPattern": "snowflake.torproject.net"}`)) r, err := http.NewRequest("POST", "snowflake.broker/proxy", data) - r.Header.Set("X-Session-ID", "test") So(err, ShouldBeNil) Convey("with a client offer if available.", func() { - go func(ctx *BrokerContext) { - proxyPolls(ctx, w, r) + go func(i *IPC) { + proxyPolls(i, w, r) done <- true - }(ctx) + }(i) // Pass a fake client offer to this proxy p := <-ctx.proxyPolls - So(p.id, ShouldEqual, "test") - p.offerChannel <- []byte("fake offer") + So(p.id, ShouldEqual, "ymbcCMto7KHNGYlp") + p.offerChannel <- &ClientOffer{sdp: []byte("fake offer"), fingerprint: defaultBridge[:]} <-done So(w.Code, ShouldEqual, http.StatusOK) - So(w.Body.String(), ShouldEqual, "fake offer") + So(w.Body.String(), ShouldEqual, `{"Status":"client match","Offer":"fake offer","NAT":"","RelayURL":"wss://snowflake.torproject.net/"}`) }) - Convey("times out when no client offer is available.", func() { - go func(ctx *BrokerContext) { - proxyPolls(ctx, w, r) + Convey("return empty 200 OK when no client offer is available.", func() { + go func(i *IPC) { + proxyPolls(i, w, r) done <- true - }(ctx) + }(i) p := <-ctx.proxyPolls - So(p.id, ShouldEqual, "test") + So(p.id, ShouldEqual, "ymbcCMto7KHNGYlp") // nil means timeout p.offerChannel <- nil <-done - So(w.Body.String(), ShouldEqual, "") - So(w.Code, ShouldEqual, http.StatusGatewayTimeout) + So(w.Body.String(), ShouldEqual, `{"Status":"no match","Offer":"","NAT":"","RelayURL":""}`) + So(w.Code, ShouldEqual, http.StatusOK) }) }) Convey("Responds to proxy answers...", func() { - s := ctx.AddSnowflake("test") + done := make(chan bool) + s := ctx.AddSnowflake(sid, "", NATUnrestricted, 0) w := httptest.NewRecorder() - data := bytes.NewReader([]byte("fake answer")) + + data, err := createProxyAnswer(sdp, sid) + So(err, ShouldBeNil) Convey("by passing to the client if valid.", func() { r, err := http.NewRequest("POST", "snowflake.broker/answer", data) So(err, ShouldBeNil) - r.Header.Set("X-Session-ID", "test") - go func(ctx *BrokerContext) { - proxyAnswers(ctx, w, r) - }(ctx) + go func(i *IPC) { + proxyAnswers(i, w, r) + done <- true + }(i) answer := <-s.answerChannel + <-done So(w.Code, ShouldEqual, http.StatusOK) - So(answer, ShouldResemble, []byte("fake answer")) + So(answer, ShouldResemble, sdp) }) - Convey("with error if the proxy is not recognized", func() { - r, err := http.NewRequest("POST", "snowflake.broker/answer", nil) + Convey("with client gone status if the proxy ID is not recognized", func() { + data, err := createProxyAnswer(sdp, "invalid") + r, err := http.NewRequest("POST", "snowflake.broker/answer", data) So(err, ShouldBeNil) - r.Header.Set("X-Session-ID", "invalid") - proxyAnswers(ctx, w, r) - So(w.Code, ShouldEqual, http.StatusGone) + proxyAnswers(i, w, r) + So(w.Code, ShouldEqual, http.StatusOK) + b, err := io.ReadAll(w.Body) + So(err, ShouldBeNil) + So(b, ShouldResemble, []byte(`{"Status":"client gone"}`)) }) Convey("with error if the proxy gives invalid answer", func() { data := bytes.NewReader(nil) r, err := http.NewRequest("POST", "snowflake.broker/answer", data) - r.Header.Set("X-Session-ID", "test") So(err, ShouldBeNil) - proxyAnswers(ctx, w, r) + proxyAnswers(i, w, r) So(w.Code, ShouldEqual, http.StatusBadRequest) }) Convey("with error if the proxy writes too much data", func() { - data := bytes.NewReader(make([]byte, 100001, 100001)) + data := bytes.NewReader(make([]byte, 100001)) r, err := http.NewRequest("POST", "snowflake.broker/answer", data) - r.Header.Set("X-Session-ID", "test") So(err, ShouldBeNil) - proxyAnswers(ctx, w, r) + proxyAnswers(i, w, r) So(w.Code, ShouldEqual, http.StatusBadRequest) }) }) + }) Convey("End-To-End", t, func() { - done := make(chan bool) - polled := make(chan bool) - ctx := NewBrokerContext(NullLogger()) + ctx := NewBrokerContext(NullLogger(), "snowflake.torproject.net") + i := &IPC{ctx} - // Proxy polls with its ID first... - dataP := bytes.NewReader([]byte("test")) - wP := httptest.NewRecorder() - rP, err := http.NewRequest("POST", "snowflake.broker/proxy", dataP) - So(err, ShouldBeNil) - rP.Header.Set("X-Session-ID", "test") - go func() { - proxyPolls(ctx, wP, rP) - polled <- true - }() + Convey("Check for client/proxy data race", func() { + proxy_done := make(chan bool) + client_done := make(chan bool) - // Manually do the Broker goroutine action here for full control. - p := <-ctx.proxyPolls - So(p.id, ShouldEqual, "test") - s := ctx.AddSnowflake(p.id) - go func() { - offer := <-s.offerChannel - p.offerChannel <- offer - }() - So(ctx.idToSnowflake["test"], ShouldNotBeNil) + go ctx.Broker() - // Client request blocks until proxy answer arrives. - dataC := bytes.NewReader([]byte("fake offer")) - wC := httptest.NewRecorder() - rC, err := http.NewRequest("POST", "snowflake.broker/client", dataC) - So(err, ShouldBeNil) - go func() { - clientOffers(ctx, wC, rC) - done <- true - }() + // Make proxy poll + wp := httptest.NewRecorder() + datap := bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0","AcceptedRelayPattern":"snowflake.torproject.net"}`)) + rp, err := http.NewRequest("POST", "snowflake.broker/proxy", datap) + So(err, ShouldBeNil) - <-polled - So(wP.Code, ShouldEqual, http.StatusOK) - So(wP.Body.String(), ShouldResemble, "fake offer") - So(ctx.idToSnowflake["test"], ShouldNotBeNil) - // Follow up with the answer request afterwards - wA := httptest.NewRecorder() - dataA := bytes.NewReader([]byte("fake answer")) - rA, err := http.NewRequest("POST", "snowflake.broker/proxy", dataA) - So(err, ShouldBeNil) - rA.Header.Set("X-Session-ID", "test") - proxyAnswers(ctx, wA, rA) - So(wA.Code, ShouldEqual, http.StatusOK) + go func(i *IPC) { + proxyPolls(i, wp, rp) + proxy_done <- true + }(i) - <-done - So(wC.Code, ShouldEqual, http.StatusOK) - So(wC.Body.String(), ShouldEqual, "fake answer") + // Client offer + wc := httptest.NewRecorder() + datac, err := createClientOffer(sdp, NATUnknown, "") + So(err, ShouldBeNil) + rc, err := http.NewRequest("POST", "snowflake.broker/client", datac) + So(err, ShouldBeNil) + + go func() { + clientOffers(i, wc, rc) + client_done <- true + }() + + <-proxy_done + So(wp.Code, ShouldEqual, http.StatusOK) + + // Proxy answers + wp = httptest.NewRecorder() + datap, err = createProxyAnswer(sdp, sid) + So(err, ShouldBeNil) + rp, err = http.NewRequest("POST", "snowflake.broker/answer", datap) + So(err, ShouldBeNil) + go func(i *IPC) { + proxyAnswers(i, wp, rp) + proxy_done <- true + }(i) + + <-proxy_done + <-client_done + + }) + + Convey("Ensure correct snowflake brokering", func() { + done := make(chan bool) + polled := make(chan bool) + + // Proxy polls with its ID first... + dataP := bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0","AcceptedRelayPattern":"snowflake.torproject.net"}`)) + wP := httptest.NewRecorder() + rP, err := http.NewRequest("POST", "snowflake.broker/proxy", dataP) + So(err, ShouldBeNil) + go func() { + proxyPolls(i, wP, rP) + polled <- true + }() + + // Manually do the Broker goroutine action here for full control. + p := <-ctx.proxyPolls + So(p.id, ShouldEqual, "ymbcCMto7KHNGYlp") + s := ctx.AddSnowflake(p.id, "", NATUnrestricted, 0) + go func() { + offer := <-s.offerChannel + p.offerChannel <- offer + }() + So(ctx.idToSnowflake["ymbcCMto7KHNGYlp"], ShouldNotBeNil) + + // Client request blocks until proxy answer arrives. + wC := httptest.NewRecorder() + dataC, err := createClientOffer(sdp, NATUnknown, "") + So(err, ShouldBeNil) + rC, err := http.NewRequest("POST", "snowflake.broker/client", dataC) + So(err, ShouldBeNil) + go func() { + clientOffers(i, wC, rC) + done <- true + }() + + <-polled + So(wP.Code, ShouldEqual, http.StatusOK) + So(wP.Body.String(), ShouldResemble, fmt.Sprintf(`{"Status":"client match","Offer":%#q,"NAT":"unknown","RelayURL":"wss://snowflake.torproject.net/"}`, sdp)) + So(ctx.idToSnowflake[sid], ShouldNotBeNil) + + // Follow up with the answer request afterwards + wA := httptest.NewRecorder() + dataA, err := createProxyAnswer(sdp, sid) + So(err, ShouldBeNil) + rA, err := http.NewRequest("POST", "snowflake.broker/answer", dataA) + So(err, ShouldBeNil) + proxyAnswers(i, wA, rA) + So(wA.Code, ShouldEqual, http.StatusOK) + + <-done + So(wC.Code, ShouldEqual, http.StatusOK) + So(wC.Body.String(), ShouldEqual, fmt.Sprintf(`{"answer":%#q}`, sdp)) + }) }) } @@ -290,232 +680,457 @@ func TestSnowflakeHeap(t *testing.T) { }) } -func TestGeoip(t *testing.T) { +func TestInvalidGeoipFile(t *testing.T) { Convey("Geoip", t, func() { - tv4 := new(GeoIPv4Table) - err := GeoIPLoadFile(tv4, "test_geoip") - So(err, ShouldEqual, nil) - tv6 := new(GeoIPv6Table) - err = GeoIPLoadFile(tv6, "test_geoip6") - So(err, ShouldEqual, nil) - - Convey("IPv4 Country Mapping Tests", func() { - for _, test := range []struct { - addr, cc string - ok bool - }{ - { - "129.97.208.23", //uwaterloo - "CA", - true, - }, - { - "127.0.0.1", - "", - false, - }, - { - "255.255.255.255", - "", - false, - }, - { - "0.0.0.0", - "", - false, - }, - { - "223.252.127.255", //test high end of range - "JP", - true, - }, - { - "223.252.127.255", //test low end of range - "JP", - true, - }, - } { - country, ok := GetCountryByAddr(tv4, net.ParseIP(test.addr)) - So(country, ShouldEqual, test.cc) - So(ok, ShouldResemble, test.ok) - } - }) - - Convey("IPv6 Country Mapping Tests", func() { - for _, test := range []struct { - addr, cc string - ok bool - }{ - { - "2620:101:f000:0:250:56ff:fe80:168e", //uwaterloo - "CA", - true, - }, - { - "fd00:0:0:0:0:0:0:1", - "", - false, - }, - { - "0:0:0:0:0:0:0:0", - "", - false, - }, - { - "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", - "", - false, - }, - { - "2a07:2e47:ffff:ffff:ffff:ffff:ffff:ffff", //test high end of range - "FR", - true, - }, - { - "2a07:2e40::", //test low end of range - "FR", - true, - }, - } { - country, ok := GetCountryByAddr(tv6, net.ParseIP(test.addr)) - So(country, ShouldEqual, test.cc) - So(ok, ShouldResemble, test.ok) - } - }) - // Make sure things behave properly if geoip file fails to load - ctx := NewBrokerContext(NullLogger()) - ctx.metrics.LoadGeoipDatabases("invalid_filename", "invalid_filename6") - ctx.metrics.UpdateCountryStats("127.0.0.1") - So(ctx.metrics.tablev4, ShouldEqual, nil) + ctx := NewBrokerContext(NullLogger(), "") + if err := ctx.metrics.LoadGeoipDatabases("invalid_filename", "invalid_filename6"); err != nil { + log.Printf("loading geo ip databases returned error: %v", err) + } + ctx.metrics.UpdateProxyStats("127.0.0.1", "", NATUnrestricted) + So(ctx.metrics.geoipdb, ShouldBeNil) }) } func TestMetrics(t *testing.T) { - Convey("Test metrics...", t, func() { done := make(chan bool) buf := new(bytes.Buffer) - ctx := NewBrokerContext(log.New(buf, "", 0)) + ctx := NewBrokerContext(log.New(buf, "", 0), "snowflake.torproject.net") + i := &IPC{ctx} err := ctx.metrics.LoadGeoipDatabases("test_geoip", "test_geoip6") - So(err, ShouldEqual, nil) + So(err, ShouldBeNil) //Test addition of proxy polls Convey("for proxy polls", func() { w := httptest.NewRecorder() - data := bytes.NewReader([]byte("test")) + data := bytes.NewReader([]byte("{\"Sid\":\"ymbcCMto7KHNGYlp\",\"Version\":\"1.0\",\"AcceptedRelayPattern\":\"snowflake.torproject.net\"}")) r, err := http.NewRequest("POST", "snowflake.broker/proxy", data) - r.Header.Set("X-Session-ID", "test") - r.RemoteAddr = "129.97.208.23:8888" //CA geoip + r.RemoteAddr = "129.97.208.23" //CA geoip So(err, ShouldBeNil) - go func(ctx *BrokerContext) { - proxyPolls(ctx, w, r) + go func(i *IPC) { + proxyPolls(i, w, r) done <- true - }(ctx) + }(i) p := <-ctx.proxyPolls //manually unblock poll p.offerChannel <- nil <-done + w = httptest.NewRecorder() + data = bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0","Type":"standalone","AcceptedRelayPattern":"snowflake.torproject.net"}`)) + r, err = http.NewRequest("POST", "snowflake.broker/proxy", data) + r.RemoteAddr = "129.97.208.24" //CA geoip + So(err, ShouldBeNil) + go func(i *IPC) { + proxyPolls(i, w, r) + done <- true + }(i) + p = <-ctx.proxyPolls //manually unblock poll + p.offerChannel <- nil + <-done + + w = httptest.NewRecorder() + data = bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0","Type":"badge","AcceptedRelayPattern":"snowflake.torproject.net"}`)) + r, err = http.NewRequest("POST", "snowflake.broker/proxy", data) + r.RemoteAddr = "129.97.208.25" //CA geoip + So(err, ShouldBeNil) + go func(i *IPC) { + proxyPolls(i, w, r) + done <- true + }(i) + p = <-ctx.proxyPolls //manually unblock poll + p.offerChannel <- nil + <-done + + w = httptest.NewRecorder() + data = bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0","Type":"webext","AcceptedRelayPattern":"snowflake.torproject.net"}`)) + r, err = http.NewRequest("POST", "snowflake.broker/proxy", data) + r.RemoteAddr = "129.97.208.26" //CA geoip + So(err, ShouldBeNil) + go func(i *IPC) { + proxyPolls(i, w, r) + done <- true + }(i) + p = <-ctx.proxyPolls //manually unblock poll + p.offerChannel <- nil + <-done ctx.metrics.printMetrics() - So(buf.String(), ShouldResemble, "snowflake-stats-end "+time.Now().UTC().Format("2006-01-02 15:04:05")+" (86400 s)\nsnowflake-ips CA=1\nsnowflake-ips-total 1\nsnowflake-idle-count 8\nclient-denied-count 0\nclient-snowflake-match-count 0\n") + + metricsStr := buf.String() + So(metricsStr, ShouldStartWith, "snowflake-stats-end "+time.Now().UTC().Format("2006-01-02 15:04:05")+" (86400 s)\nsnowflake-ips CA=4\n") + So(metricsStr, ShouldContainSubstring, "\nsnowflake-ips-standalone 1\n") + So(metricsStr, ShouldContainSubstring, "\nsnowflake-ips-badge 1\n") + So(metricsStr, ShouldContainSubstring, "\nsnowflake-ips-webext 1\n") + So(metricsStr, ShouldEndWith, `snowflake-ips-total 4 +snowflake-idle-count 8 +snowflake-proxy-poll-with-relay-url-count 8 +snowflake-proxy-poll-without-relay-url-count 0 +snowflake-proxy-rejected-for-relay-url-count 0 +client-denied-count 0 +client-restricted-denied-count 0 +client-unrestricted-denied-count 0 +client-snowflake-match-count 0 +client-snowflake-timeout-count 0 +client-http-count 0 +client-http-ips +client-ampcache-count 0 +client-ampcache-ips +client-sqs-count 0 +client-sqs-ips +snowflake-ips-nat-restricted 0 +snowflake-ips-nat-unrestricted 0 +snowflake-ips-nat-unknown 4 +`) }) //Test addition of client failures Convey("for no proxies available", func() { w := httptest.NewRecorder() - data := bytes.NewReader([]byte("test")) + data, err := createClientOffer(sdp, NATUnknown, "") + So(err, ShouldBeNil) r, err := http.NewRequest("POST", "snowflake.broker/client", data) + r.RemoteAddr = "129.97.208.23:8888" //CA geoip So(err, ShouldBeNil) - clientOffers(ctx, w, r) + clientOffers(i, w, r) ctx.metrics.printMetrics() - So(buf.String(), ShouldResemble, "snowflake-stats-end "+time.Now().UTC().Format("2006-01-02 15:04:05")+" (86400 s)\nsnowflake-ips \nsnowflake-ips-total 0\nsnowflake-idle-count 0\nclient-denied-count 8\nclient-snowflake-match-count 0\n") + So(buf.String(), ShouldContainSubstring, `client-denied-count 8 +client-restricted-denied-count 8 +client-unrestricted-denied-count 0 +client-snowflake-match-count 0 +client-snowflake-timeout-count 0 +client-http-count 8 +client-http-ips CA=8 +client-ampcache-count 0 +client-ampcache-ips +client-sqs-count 0 +client-sqs-ips `) // Test reset buf.Reset() - ctx.metrics.zeroMetrics() ctx.metrics.printMetrics() - So(buf.String(), ShouldResemble, "snowflake-stats-end "+time.Now().UTC().Format("2006-01-02 15:04:05")+" (86400 s)\nsnowflake-ips \nsnowflake-ips-total 0\nsnowflake-idle-count 0\nclient-denied-count 0\nclient-snowflake-match-count 0\n") + So(buf.String(), ShouldContainSubstring, "\nsnowflake-ips \n") + So(buf.String(), ShouldContainSubstring, "\nsnowflake-ips-standalone 0\n") + So(buf.String(), ShouldContainSubstring, "\nsnowflake-ips-badge 0\n") + So(buf.String(), ShouldContainSubstring, "\nsnowflake-ips-webext 0\n") + So(buf.String(), ShouldContainSubstring, `snowflake-ips-total 0 +snowflake-idle-count 0 +snowflake-proxy-poll-with-relay-url-count 0 +snowflake-proxy-poll-without-relay-url-count 0 +snowflake-proxy-rejected-for-relay-url-count 0 +client-denied-count 0 +client-restricted-denied-count 0 +client-unrestricted-denied-count 0 +client-snowflake-match-count 0 +client-snowflake-timeout-count 0 +client-http-count 0 +client-http-ips +client-ampcache-count 0 +client-ampcache-ips +client-sqs-count 0 +client-sqs-ips +snowflake-ips-nat-restricted 0 +snowflake-ips-nat-unrestricted 0 +snowflake-ips-nat-unknown 0 +`) }) //Test addition of client matches Convey("for client-proxy match", func() { w := httptest.NewRecorder() - data := bytes.NewReader([]byte("test")) + data, err := createClientOffer(sdp, NATUnknown, "") + So(err, ShouldBeNil) r, err := http.NewRequest("POST", "snowflake.broker/client", data) So(err, ShouldBeNil) // Prepare a fake proxy to respond with. - snowflake := ctx.AddSnowflake("fake") + snowflake := ctx.AddSnowflake("fake", "", NATUnrestricted, 0) go func() { - clientOffers(ctx, w, r) + clientOffers(i, w, r) done <- true }() offer := <-snowflake.offerChannel - So(offer, ShouldResemble, []byte("test")) - snowflake.answerChannel <- []byte("fake answer") + So(offer.sdp, ShouldResemble, []byte(sdp)) + snowflake.answerChannel <- "fake answer" <-done ctx.metrics.printMetrics() - So(buf.String(), ShouldResemble, "snowflake-stats-end "+time.Now().UTC().Format("2006-01-02 15:04:05")+" (86400 s)\nsnowflake-ips \nsnowflake-ips-total 0\nsnowflake-idle-count 0\nclient-denied-count 0\nclient-snowflake-match-count 8\n") + So(buf.String(), ShouldContainSubstring, "client-denied-count 0\nclient-restricted-denied-count 0\nclient-unrestricted-denied-count 0\nclient-snowflake-match-count 8") }) //Test rounding boundary Convey("binning boundary", func() { w := httptest.NewRecorder() - data := bytes.NewReader([]byte("test")) + data, err := createClientOffer(sdp, NATRestricted, "") + So(err, ShouldBeNil) r, err := http.NewRequest("POST", "snowflake.broker/client", data) So(err, ShouldBeNil) + clientOffers(i, w, r) - clientOffers(ctx, w, r) - clientOffers(ctx, w, r) - clientOffers(ctx, w, r) - clientOffers(ctx, w, r) - clientOffers(ctx, w, r) - clientOffers(ctx, w, r) - clientOffers(ctx, w, r) - clientOffers(ctx, w, r) + w = httptest.NewRecorder() + data, err = createClientOffer(sdp, NATRestricted, "") + So(err, ShouldBeNil) + r, err = http.NewRequest("POST", "snowflake.broker/client", data) + So(err, ShouldBeNil) + clientOffers(i, w, r) - ctx.metrics.printMetrics() - So(buf.String(), ShouldResemble, "snowflake-stats-end "+time.Now().UTC().Format("2006-01-02 15:04:05")+" (86400 s)\nsnowflake-ips \nsnowflake-ips-total 0\nsnowflake-idle-count 0\nclient-denied-count 8\nclient-snowflake-match-count 0\n") + w = httptest.NewRecorder() + data, err = createClientOffer(sdp, NATRestricted, "") + So(err, ShouldBeNil) + r, err = http.NewRequest("POST", "snowflake.broker/client", data) + So(err, ShouldBeNil) + clientOffers(i, w, r) + + w = httptest.NewRecorder() + data, err = createClientOffer(sdp, NATRestricted, "") + So(err, ShouldBeNil) + r, err = http.NewRequest("POST", "snowflake.broker/client", data) + So(err, ShouldBeNil) + clientOffers(i, w, r) + + w = httptest.NewRecorder() + data, err = createClientOffer(sdp, NATRestricted, "") + So(err, ShouldBeNil) + r, err = http.NewRequest("POST", "snowflake.broker/client", data) + So(err, ShouldBeNil) + clientOffers(i, w, r) + + w = httptest.NewRecorder() + data, err = createClientOffer(sdp, NATRestricted, "") + So(err, ShouldBeNil) + r, err = http.NewRequest("POST", "snowflake.broker/client", data) + So(err, ShouldBeNil) + clientOffers(i, w, r) + + w = httptest.NewRecorder() + data, err = createClientOffer(sdp, NATRestricted, "") + So(err, ShouldBeNil) + r, err = http.NewRequest("POST", "snowflake.broker/client", data) + So(err, ShouldBeNil) + clientOffers(i, w, r) + + w = httptest.NewRecorder() + data, err = createClientOffer(sdp, NATRestricted, "") + So(err, ShouldBeNil) + r, err = http.NewRequest("POST", "snowflake.broker/client", data) + So(err, ShouldBeNil) + clientOffers(i, w, r) + + w = httptest.NewRecorder() + data, err = createClientOffer(sdp, NATRestricted, "") + So(err, ShouldBeNil) + r, err = http.NewRequest("POST", "snowflake.broker/client", data) + So(err, ShouldBeNil) + clientOffers(i, w, r) - clientOffers(ctx, w, r) buf.Reset() ctx.metrics.printMetrics() - So(buf.String(), ShouldResemble, "snowflake-stats-end "+time.Now().UTC().Format("2006-01-02 15:04:05")+" (86400 s)\nsnowflake-ips \nsnowflake-ips-total 0\nsnowflake-idle-count 0\nclient-denied-count 16\nclient-snowflake-match-count 0\n") + So(buf.String(), ShouldContainSubstring, "client-denied-count 16\nclient-restricted-denied-count 16\nclient-unrestricted-denied-count 0\n") }) //Test unique ip Convey("proxy counts by unique ip", func() { w := httptest.NewRecorder() - data := bytes.NewReader([]byte("test")) + data := bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0","AcceptedRelayPattern":"snowflake.torproject.net"}`)) r, err := http.NewRequest("POST", "snowflake.broker/proxy", data) - r.Header.Set("X-Session-ID", "test") r.RemoteAddr = "129.97.208.23:8888" //CA geoip So(err, ShouldBeNil) - go func(ctx *BrokerContext) { - proxyPolls(ctx, w, r) + go func(i *IPC) { + proxyPolls(i, w, r) done <- true - }(ctx) + }(i) p := <-ctx.proxyPolls //manually unblock poll p.offerChannel <- nil <-done - data = bytes.NewReader([]byte("test")) + data = bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0","AcceptedRelayPattern":"snowflake.torproject.net"}`)) r, err = http.NewRequest("POST", "snowflake.broker/proxy", data) - r.Header.Set("X-Session-ID", "test") + if err != nil { + log.Printf("unable to get NewRequest with error: %v", err) + } r.RemoteAddr = "129.97.208.23:8888" //CA geoip - go func(ctx *BrokerContext) { - proxyPolls(ctx, w, r) + go func(i *IPC) { + proxyPolls(i, w, r) done <- true - }(ctx) + }(i) p = <-ctx.proxyPolls //manually unblock poll p.offerChannel <- nil <-done ctx.metrics.printMetrics() - So(buf.String(), ShouldResemble, "snowflake-stats-end "+time.Now().UTC().Format("2006-01-02 15:04:05")+" (86400 s)\nsnowflake-ips CA=1\nsnowflake-ips-total 1\nsnowflake-idle-count 8\nclient-denied-count 0\nclient-snowflake-match-count 0\n") + metricsStr := buf.String() + So(metricsStr, ShouldContainSubstring, "snowflake-ips CA=1\n") + So(metricsStr, ShouldContainSubstring, "snowflake-ips-total 1\n") + }) + //Test NAT types + Convey("proxy counts by NAT type", func() { + w := httptest.NewRecorder() + data := bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.2","Type":"unknown","NAT":"restricted","AcceptedRelayPattern":"snowflake.torproject.net"}`)) + r, err := http.NewRequest("POST", "snowflake.broker/proxy", data) + r.RemoteAddr = "129.97.208.23:8888" //CA geoip + So(err, ShouldBeNil) + go func(i *IPC) { + proxyPolls(i, w, r) + done <- true + }(i) + p := <-ctx.proxyPolls //manually unblock poll + p.offerChannel <- nil + <-done + + data = bytes.NewReader([]byte(`{"Sid":"ymbcCMto7KHNGYlp","Version":"1.2","Type":"unknown","NAT":"unrestricted","AcceptedRelayPattern":"snowflake.torproject.net"}`)) + r, err = http.NewRequest("POST", "snowflake.broker/proxy", data) + if err != nil { + log.Printf("unable to get NewRequest with error: %v", err) + } + r.RemoteAddr = "129.97.208.24:8888" //CA geoip + go func(i *IPC) { + proxyPolls(i, w, r) + done <- true + }(i) + p = <-ctx.proxyPolls //manually unblock poll + p.offerChannel <- nil + <-done + + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, "snowflake-ips-nat-restricted 1\nsnowflake-ips-nat-unrestricted 1\nsnowflake-ips-nat-unknown 0") + }) + + Convey("client failures by NAT type", func() { + w := httptest.NewRecorder() + data, err := createClientOffer(sdp, NATRestricted, "") + So(err, ShouldBeNil) + r, err := http.NewRequest("POST", "snowflake.broker/client", data) + So(err, ShouldBeNil) + + clientOffers(i, w, r) + + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, "client-denied-count 8\nclient-restricted-denied-count 8\nclient-unrestricted-denied-count 0\nclient-snowflake-match-count 0") + + buf.Reset() + + data, err = createClientOffer(sdp, NATUnrestricted, "") + So(err, ShouldBeNil) + r, err = http.NewRequest("POST", "snowflake.broker/client", data) + So(err, ShouldBeNil) + + clientOffers(i, w, r) + + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, "client-denied-count 8\nclient-restricted-denied-count 0\nclient-unrestricted-denied-count 8\nclient-snowflake-match-count 0") + + buf.Reset() + + data, err = createClientOffer(sdp, NATUnknown, "") + So(err, ShouldBeNil) + r, err = http.NewRequest("POST", "snowflake.broker/client", data) + So(err, ShouldBeNil) + + clientOffers(i, w, r) + + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, "client-denied-count 8\nclient-restricted-denied-count 8\nclient-unrestricted-denied-count 0\nclient-snowflake-match-count 0") + }) + Convey("that seen IPs map is cleared after each print", func() { + w := httptest.NewRecorder() + data := bytes.NewReader([]byte("{\"Sid\":\"ymbcCMto7KHNGYlp\",\"Version\":\"1.0\",\"AcceptedRelayPattern\":\"snowflake.torproject.net\"}")) + r, err := http.NewRequest("POST", "snowflake.broker/proxy", data) + r.RemoteAddr = "129.97.208.23" //CA geoip + So(err, ShouldBeNil) + go func(i *IPC) { + proxyPolls(i, w, r) + done <- true + }(i) + p := <-ctx.proxyPolls //manually unblock poll + p.offerChannel <- nil + <-done + + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, "snowflake-ips CA=1") + So(buf.String(), ShouldContainSubstring, "snowflake-ips-total 1") + buf.Reset() + + w = httptest.NewRecorder() + data = bytes.NewReader([]byte("{\"Sid\":\"ymbcCMto7KHNGYlp\",\"Version\":\"1.0\",\"AcceptedRelayPattern\":\"snowflake.torproject.net\"}")) + r, err = http.NewRequest("POST", "snowflake.broker/proxy", data) + r.RemoteAddr = "129.97.208.23" //CA geoip + So(err, ShouldBeNil) + go func(i *IPC) { + proxyPolls(i, w, r) + done <- true + }(i) + p = <-ctx.proxyPolls //manually unblock poll + p.offerChannel <- nil + <-done + + ctx.metrics.printMetrics() + So(buf.String(), ShouldContainSubstring, "snowflake-ips CA=1") + So(buf.String(), ShouldContainSubstring, "snowflake-ips-total 1") + buf.Reset() + + }) + }) +} + +func TestConcurrency(t *testing.T) { + Convey("Test concurency with", t, func() { + ctx := NewBrokerContext(NullLogger(), "snowflake.torproject.net") + i := &IPC{ctx} + Convey("multiple simultaneous polls", func(c C) { + go ctx.Broker() + + var proxies sync.WaitGroup + var wg sync.WaitGroup + + proxies.Add(1000) + // Multiple proxy polls + for x := 0; x < 1000; x++ { + wp := httptest.NewRecorder() + buf := make([]byte, 16) + _, err := rand.Read(buf) + id := strings.TrimRight(base64.StdEncoding.EncodeToString(buf), "=") + + datap := bytes.NewReader([]byte(fmt.Sprintf("{\"Sid\": \"%s\",\"Version\":\"1.0\",\"AcceptedRelayPattern\":\"snowflake.torproject.net\"}", id))) + rp, err := http.NewRequest("POST", "snowflake.broker/proxy", datap) + So(err, ShouldBeNil) + + go func() { + proxies.Done() + proxyPolls(i, wp, rp) + c.So(wp.Code, ShouldEqual, http.StatusOK) + + // Proxy answers + wp = httptest.NewRecorder() + datap, err = createProxyAnswer(sdp, id) + c.So(err, ShouldBeNil) + rp, err = http.NewRequest("POST", "snowflake.broker/answer", datap) + c.So(err, ShouldBeNil) + go func() { + proxyAnswers(i, wp, rp) + }() + }() + } + // Wait for all proxies to poll before sending client offers + proxies.Wait() + + // Multiple client offers + for x := 0; x < 500; x++ { + wg.Add(1) + wc := httptest.NewRecorder() + datac, err := createClientOffer(sdp, NATUnrestricted, "") + So(err, ShouldBeNil) + rc, err := http.NewRequest("POST", "snowflake.broker/client", datac) + So(err, ShouldBeNil) + + go func() { + clientOffers(i, wc, rc) + c.So(wc.Code, ShouldEqual, http.StatusOK) + c.So(wc.Body.String(), ShouldContainSubstring, "8.8.8.8") + wg.Done() + }() + } + wg.Wait() }) }) } diff --git a/broker/snowflake-heap.go b/broker/snowflake-heap.go index 419956f..80c1f57 100644 --- a/broker/snowflake-heap.go +++ b/broker/snowflake-heap.go @@ -10,8 +10,10 @@ over the offer and answer channels. */ type Snowflake struct { id string - offerChannel chan []byte - answerChannel chan []byte + proxyType string + natType string + offerChannel chan *ClientOffer + answerChannel chan string clients int index int } diff --git a/broker/sqs.go b/broker/sqs.go new file mode 100644 index 0000000..16a97c9 --- /dev/null +++ b/broker/sqs.go @@ -0,0 +1,217 @@ +package main + +import ( + "context" + "log" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqsclient" +) + +const ( + cleanupThreshold = -2 * time.Minute +) + +type sqsHandler struct { + SQSClient sqsclient.SQSClient + SQSQueueURL *string + IPC *IPC + cleanupInterval time.Duration +} + +func (r *sqsHandler) pollMessages(ctx context.Context, chn chan<- *types.Message) { + for { + select { + case <-ctx.Done(): + // if context is cancelled + return + default: + res, err := r.SQSClient.ReceiveMessage(ctx, &sqs.ReceiveMessageInput{ + QueueUrl: r.SQSQueueURL, + MaxNumberOfMessages: 10, + WaitTimeSeconds: 15, + MessageAttributeNames: []string{ + string(types.QueueAttributeNameAll), + }, + }) + + if err != nil { + log.Printf("SQSHandler: encountered error while polling for messages: %v\n", err) + continue + } + + for _, message := range res.Messages { + chn <- &message + } + } + } +} + +func (r *sqsHandler) cleanupClientQueues(ctx context.Context) { + for range time.NewTicker(r.cleanupInterval).C { + // Runs at fixed intervals to clean up any client queues that were last changed more than 2 minutes ago + select { + case <-ctx.Done(): + // if context is cancelled + return + default: + queueURLsList := []string{} + var nextToken *string + for { + res, err := r.SQSClient.ListQueues(ctx, &sqs.ListQueuesInput{ + QueueNamePrefix: aws.String("snowflake-client-"), + MaxResults: aws.Int32(1000), + NextToken: nextToken, + }) + if err != nil { + log.Printf("SQSHandler: encountered error while retrieving client queues to clean up: %v\n", err) + // client queues will be cleaned up the next time the cleanup operation is triggered automatically + break + } + queueURLsList = append(queueURLsList, res.QueueUrls...) + if res.NextToken == nil { + break + } else { + nextToken = res.NextToken + } + } + + numDeleted := 0 + cleanupCutoff := time.Now().Add(cleanupThreshold) + for _, queueURL := range queueURLsList { + if !strings.Contains(queueURL, "snowflake-client-") { + continue + } + res, err := r.SQSClient.GetQueueAttributes(ctx, &sqs.GetQueueAttributesInput{ + QueueUrl: aws.String(queueURL), + AttributeNames: []types.QueueAttributeName{types.QueueAttributeNameLastModifiedTimestamp}, + }) + if err != nil { + // According to the AWS SQS docs, the deletion process for a queue can take up to 60 seconds. So the queue + // can be in the process of being deleted, but will still be returned by the ListQueues operation, but + // fail when we try to GetQueueAttributes for the queue + log.Printf("SQSHandler: encountered error while getting attribute of client queue %s. queue may already be deleted.\n", queueURL) + continue + } + lastModifiedInt64, err := strconv.ParseInt(res.Attributes[string(types.QueueAttributeNameLastModifiedTimestamp)], 10, 64) + if err != nil { + log.Printf("SQSHandler: encountered invalid lastModifiedTimetamp value from client queue %s: %v\n", queueURL, err) + continue + } + lastModified := time.Unix(lastModifiedInt64, 0) + if lastModified.Before(cleanupCutoff) { + _, err := r.SQSClient.DeleteQueue(ctx, &sqs.DeleteQueueInput{ + QueueUrl: aws.String(queueURL), + }) + if err != nil { + log.Printf("SQSHandler: encountered error when deleting client queue %s: %v\n", queueURL, err) + continue + } else { + numDeleted += 1 + } + + } + } + } + } +} + +func (r *sqsHandler) handleMessage(mainCtx context.Context, message *types.Message) { + var encPollReq []byte + var response []byte + var err error + + ctx, cancel := context.WithTimeout(mainCtx, ClientTimeout*time.Second) + defer cancel() + + clientID := message.MessageAttributes["ClientID"].StringValue + if clientID == nil { + log.Println("SQSHandler: got SDP offer in SQS message with no client ID. ignoring this message.") + return + } + + res, err := r.SQSClient.CreateQueue(ctx, &sqs.CreateQueueInput{ + QueueName: aws.String("snowflake-client-" + *clientID), + }) + if err != nil { + log.Printf("SQSHandler: error encountered when creating answer queue for client %s: %v\n", *clientID, err) + return + } + answerSQSURL := res.QueueUrl + + encPollReq = []byte(*message.Body) + + arg := messages.Arg{ + Body: encPollReq, + RemoteAddr: "", + RendezvousMethod: messages.RendezvousSqs, + Context: ctx, + } + err = r.IPC.ClientOffers(arg, &response) + + if err != nil { + log.Printf("SQSHandler: error encountered when handling message: %v\n", err) + return + } + + r.SQSClient.SendMessage(ctx, &sqs.SendMessageInput{ + QueueUrl: answerSQSURL, + MessageBody: aws.String(string(response)), + }) +} + +func (r *sqsHandler) deleteMessage(context context.Context, message *types.Message) { + r.SQSClient.DeleteMessage(context, &sqs.DeleteMessageInput{ + QueueUrl: r.SQSQueueURL, + ReceiptHandle: message.ReceiptHandle, + }) +} + +func newSQSHandler(context context.Context, client sqsclient.SQSClient, sqsQueueName string, region string, i *IPC) (*sqsHandler, error) { + // Creates the queue if a queue with the same name doesn't exist. If a queue with the same name and attributes + // already exists, then nothing will happen. If a queue with the same name, but different attributes exists, then + // an error will be returned + res, err := client.CreateQueue(context, &sqs.CreateQueueInput{ + QueueName: aws.String(sqsQueueName), + Attributes: map[string]string{ + "MessageRetentionPeriod": strconv.FormatInt(int64((5 * time.Minute).Seconds()), 10), + }, + }) + + if err != nil { + return nil, err + } + + return &sqsHandler{ + SQSClient: client, + SQSQueueURL: res.QueueUrl, + IPC: i, + cleanupInterval: time.Second * 30, + }, nil +} + +func (r *sqsHandler) PollAndHandleMessages(ctx context.Context) { + log.Println("SQSHandler: Starting to poll for messages at: " + *r.SQSQueueURL) + messagesChn := make(chan *types.Message, 20) + go r.pollMessages(ctx, messagesChn) + go r.cleanupClientQueues(ctx) + + for message := range messagesChn { + select { + case <-ctx.Done(): + // if context is cancelled + return + default: + go func(msg *types.Message) { + r.handleMessage(ctx, msg) + r.deleteMessage(ctx, msg) + }(message) + } + } +} diff --git a/broker/sqs_test.go b/broker/sqs_test.go new file mode 100644 index 0000000..59fe701 --- /dev/null +++ b/broker/sqs_test.go @@ -0,0 +1,307 @@ +package main + +import ( + "bytes" + "context" + "errors" + "log" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/golang/mock/gomock" + . "github.com/smartystreets/goconvey/convey" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqsclient" +) + +func TestSQS(t *testing.T) { + + Convey("Context", t, func() { + buf := new(bytes.Buffer) + ipcCtx := NewBrokerContext(log.New(buf, "", 0), "") + i := &IPC{ipcCtx} + + Convey("Responds to SQS client offers...", func() { + ctrl := gomock.NewController(t) + mockSQSClient := sqsclient.NewMockSQSClient(ctrl) + + brokerSQSQueueName := "example-name" + responseQueueURL := aws.String("https://sqs.us-east-1.amazonaws.com/testing") + + runSQSHandler := func(sqsHandlerContext context.Context) { + mockSQSClient.EXPECT().CreateQueue(sqsHandlerContext, &sqs.CreateQueueInput{ + QueueName: aws.String(brokerSQSQueueName), + Attributes: map[string]string{ + "MessageRetentionPeriod": strconv.FormatInt(int64((5 * time.Minute).Seconds()), 10), + }, + }).Return(&sqs.CreateQueueOutput{ + QueueUrl: responseQueueURL, + }, nil).Times(1) + sqsHandler, err := newSQSHandler(sqsHandlerContext, mockSQSClient, brokerSQSQueueName, "example-region", i) + So(err, ShouldBeNil) + go sqsHandler.PollAndHandleMessages(sqsHandlerContext) + } + + messageBody := aws.String("1.0\n{\"offer\": \"fake\", \"nat\": \"unknown\"}") + receiptHandle := "fake-receipt-handle" + sqsReceiveMessageInput := sqs.ReceiveMessageInput{ + QueueUrl: responseQueueURL, + MaxNumberOfMessages: 10, + WaitTimeSeconds: 15, + MessageAttributeNames: []string{ + string(types.QueueAttributeNameAll), + }, + } + sqsDeleteMessageInput := sqs.DeleteMessageInput{ + QueueUrl: responseQueueURL, + ReceiptHandle: &receiptHandle, + } + + Convey("by ignoring it if no client id specified", func(c C) { + sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background()) + mockSQSClient.EXPECT().ReceiveMessage(sqsHandlerContext, &sqsReceiveMessageInput).MinTimes(1).DoAndReturn( + func(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error) { + return &sqs.ReceiveMessageOutput{ + Messages: []types.Message{ + { + Body: messageBody, + ReceiptHandle: &receiptHandle, + }, + }, + }, nil + }, + ) + mockSQSClient.EXPECT().DeleteMessage(sqsHandlerContext, &sqsDeleteMessageInput).MinTimes(1).Do( + func(ctx context.Context, input *sqs.DeleteMessageInput, optFns ...func(*sqs.Options)) { + sqsCancelFunc() + }, + ) + // We expect no queues to be created + mockSQSClient.EXPECT().CreateQueue(gomock.Any(), gomock.Any()).Times(0) + runSQSHandler(sqsHandlerContext) + <-sqsHandlerContext.Done() + }) + + Convey("by doing nothing if an error occurs upon receipt of the message", func(c C) { + + sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background()) + + mockSQSClient.EXPECT().ReceiveMessage(sqsHandlerContext, &sqsReceiveMessageInput).MinTimes(1).DoAndReturn( + func(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error) { + sqsCancelFunc() + return nil, errors.New("error") + }, + ) + // We expect no queues to be created or deleted + mockSQSClient.EXPECT().CreateQueue(gomock.Any(), gomock.Any()).Times(0) + mockSQSClient.EXPECT().DeleteMessage(gomock.Any(), gomock.Any()).Times(0) + runSQSHandler(sqsHandlerContext) + <-sqsHandlerContext.Done() + }) + + Convey("by attempting to create a new sqs queue...", func() { + clientId := "fake-id" + sqsCreateQueueInput := sqs.CreateQueueInput{ + QueueName: aws.String("snowflake-client-fake-id"), + } + validMessage := &sqs.ReceiveMessageOutput{ + Messages: []types.Message{ + { + Body: messageBody, + MessageAttributes: map[string]types.MessageAttributeValue{ + "ClientID": {StringValue: &clientId}, + }, + ReceiptHandle: &receiptHandle, + }, + }, + } + Convey("and does not attempt to send a message via SQS if queue creation fails.", func(c C) { + sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background()) + + mockSQSClient.EXPECT().ReceiveMessage(sqsHandlerContext, &sqsReceiveMessageInput).AnyTimes().DoAndReturn( + func(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error) { + sqsCancelFunc() + return validMessage, nil + }) + mockSQSClient.EXPECT().CreateQueue(sqsHandlerContext, &sqsCreateQueueInput).Return(nil, errors.New("error")).AnyTimes() + mockSQSClient.EXPECT().DeleteMessage(sqsHandlerContext, &sqsDeleteMessageInput).AnyTimes() + runSQSHandler(sqsHandlerContext) + <-sqsHandlerContext.Done() + }) + + Convey("and responds with a proxy answer if available.", func(c C) { + sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background()) + var numTimes atomic.Uint32 + + mockSQSClient.EXPECT().ReceiveMessage(gomock.Any(), &sqsReceiveMessageInput).AnyTimes().DoAndReturn( + func(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error) { + + n := numTimes.Add(1) + if n == 1 { + snowflake := ipcCtx.AddSnowflake("fake", "", NATUnrestricted, 0) + go func(c C) { + <-snowflake.offerChannel + snowflake.answerChannel <- "fake answer" + }(c) + return validMessage, nil + } + return nil, errors.New("error") + + }) + mockSQSClient.EXPECT().CreateQueue(gomock.Any(), &sqsCreateQueueInput).Return(&sqs.CreateQueueOutput{ + QueueUrl: responseQueueURL, + }, nil).AnyTimes() + mockSQSClient.EXPECT().DeleteMessage(gomock.Any(), gomock.Any()).AnyTimes() + mockSQSClient.EXPECT().SendMessage(gomock.Any(), gomock.Any()).Times(1).DoAndReturn( + func(ctx context.Context, input *sqs.SendMessageInput, optFns ...func(*sqs.Options)) (*sqs.SendMessageOutput, error) { + c.So(input.MessageBody, ShouldEqual, aws.String("{\"answer\":\"fake answer\"}")) + // Ensure that match is correctly recorded in metrics + ipcCtx.metrics.printMetrics() + c.So(buf.String(), ShouldContainSubstring, `client-denied-count 0 +client-restricted-denied-count 0 +client-unrestricted-denied-count 0 +client-snowflake-match-count 8 +client-snowflake-timeout-count 0 +client-http-count 0 +client-http-ips +client-ampcache-count 0 +client-ampcache-ips +client-sqs-count 8 +client-sqs-ips ??=8 +`) + sqsCancelFunc() + return &sqs.SendMessageOutput{}, nil + }, + ) + runSQSHandler(sqsHandlerContext) + + <-sqsHandlerContext.Done() + }) + }) + }) + + Convey("Cleans up SQS client queues...", func() { + brokerSQSQueueName := "example-name" + responseQueueURL := aws.String("https://sqs.us-east-1.amazonaws.com/testing") + + ctrl := gomock.NewController(t) + mockSQSClient := sqsclient.NewMockSQSClient(ctrl) + + runSQSHandler := func(sqsHandlerContext context.Context) { + + mockSQSClient.EXPECT().CreateQueue(sqsHandlerContext, &sqs.CreateQueueInput{ + QueueName: aws.String(brokerSQSQueueName), + Attributes: map[string]string{ + "MessageRetentionPeriod": strconv.FormatInt(int64((5 * time.Minute).Seconds()), 10), + }, + }).Return(&sqs.CreateQueueOutput{ + QueueUrl: responseQueueURL, + }, nil).Times(1) + + mockSQSClient.EXPECT().ReceiveMessage(sqsHandlerContext, gomock.Any()).AnyTimes().Return( + &sqs.ReceiveMessageOutput{ + Messages: []types.Message{}, + }, nil, + ) + + sqsHandler, err := newSQSHandler(sqsHandlerContext, mockSQSClient, brokerSQSQueueName, "example-region", i) + So(err, ShouldBeNil) + // Set the cleanup interval to 1 ns so we can immediately test the cleanup logic + sqsHandler.cleanupInterval = time.Nanosecond + + go sqsHandler.PollAndHandleMessages(sqsHandlerContext) + } + + Convey("does nothing if there are no open queues.", func() { + var wg sync.WaitGroup + wg.Add(1) + sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background()) + defer wg.Wait() + + mockSQSClient.EXPECT().ListQueues(sqsHandlerContext, &sqs.ListQueuesInput{ + QueueNamePrefix: aws.String("snowflake-client-"), + MaxResults: aws.Int32(1000), + NextToken: nil, + }).DoAndReturn(func(ctx context.Context, input *sqs.ListQueuesInput, optFns ...func(*sqs.Options)) (*sqs.ListQueuesOutput, error) { + wg.Done() + // Cancel the handler context since we are only interested in testing one iteration of the cleanup + sqsCancelFunc() + return &sqs.ListQueuesOutput{ + QueueUrls: []string{}, + }, nil + }) + + runSQSHandler(sqsHandlerContext) + }) + + Convey("deletes open queue when there is one open queue.", func(c C) { + var wg sync.WaitGroup + wg.Add(1) + sqsHandlerContext, sqsCancelFunc := context.WithCancel(context.Background()) + + clientQueueUrl1 := "https://sqs.us-east-1.amazonaws.com/snowflake-client-1" + clientQueueUrl2 := "https://sqs.us-east-1.amazonaws.com/snowflake-client-2" + + gomock.InOrder( + mockSQSClient.EXPECT().ListQueues(sqsHandlerContext, &sqs.ListQueuesInput{ + QueueNamePrefix: aws.String("snowflake-client-"), + MaxResults: aws.Int32(1000), + NextToken: nil, + }).Times(1).Return(&sqs.ListQueuesOutput{ + QueueUrls: []string{ + clientQueueUrl1, + clientQueueUrl2, + }, + }, nil), + mockSQSClient.EXPECT().ListQueues(sqsHandlerContext, &sqs.ListQueuesInput{ + QueueNamePrefix: aws.String("snowflake-client-"), + MaxResults: aws.Int32(1000), + NextToken: nil, + }).Times(1).DoAndReturn(func(ctx context.Context, input *sqs.ListQueuesInput, optFns ...func(*sqs.Options)) (*sqs.ListQueuesOutput, error) { + // Executed on second iteration of cleanupClientQueues loop. This means that one full iteration has completed and we can verify the results of that iteration + wg.Done() + sqsCancelFunc() + return &sqs.ListQueuesOutput{ + QueueUrls: []string{}, + }, nil + }), + ) + + gomock.InOrder( + mockSQSClient.EXPECT().GetQueueAttributes(sqsHandlerContext, &sqs.GetQueueAttributesInput{ + QueueUrl: aws.String(clientQueueUrl1), + AttributeNames: []types.QueueAttributeName{types.QueueAttributeNameLastModifiedTimestamp}, + }).Times(1).Return(&sqs.GetQueueAttributesOutput{ + Attributes: map[string]string{ + string(types.QueueAttributeNameLastModifiedTimestamp): "0", + }}, nil), + + mockSQSClient.EXPECT().GetQueueAttributes(sqsHandlerContext, &sqs.GetQueueAttributesInput{ + QueueUrl: aws.String(clientQueueUrl2), + AttributeNames: []types.QueueAttributeName{types.QueueAttributeNameLastModifiedTimestamp}, + }).Times(1).Return(&sqs.GetQueueAttributesOutput{ + Attributes: map[string]string{ + string(types.QueueAttributeNameLastModifiedTimestamp): "0", + }}, nil), + ) + + gomock.InOrder( + mockSQSClient.EXPECT().DeleteQueue(sqsHandlerContext, &sqs.DeleteQueueInput{ + QueueUrl: aws.String(clientQueueUrl1), + }).Return(&sqs.DeleteQueueOutput{}, nil), + mockSQSClient.EXPECT().DeleteQueue(sqsHandlerContext, &sqs.DeleteQueueInput{ + QueueUrl: aws.String(clientQueueUrl2), + }).Return(&sqs.DeleteQueueOutput{}, nil), + ) + + runSQSHandler(sqsHandlerContext) + wg.Wait() + }) + }) + }) +} diff --git a/broker/test_bridgeList.txt b/broker/test_bridgeList.txt new file mode 100644 index 0000000..5213318 --- /dev/null +++ b/broker/test_bridgeList.txt @@ -0,0 +1,2 @@ +{"displayName":"flakey", "webSocketAddress":"wss://snowflake.torproject.net", "fingerprint":"2B280B23E1107BB62ABFC40DDCC8824814F80A72"} +{"displayName":"second", "webSocketAddress":"wss://02.snowflake.torproject.net", "fingerprint":"8838024498816A039FCBBAB14E6F40A0843051FA"} diff --git a/client/README.md b/client/README.md index 50bdba3..2cbfb8f 100644 --- a/client/README.md +++ b/client/README.md @@ -1,20 +1,122 @@ + + +**Table of Contents** + +- [Dependencies](#dependencies) +- [Building the Snowflake client](#building-the-snowflake-client) +- [Running the Snowflake client with Tor](#running-the-snowflake-client-with-tor) + + + This is the Tor client component of Snowflake. -It is based on goptlib. +It is based on the [goptlib](https://gitweb.torproject.org/pluggable-transports/goptlib.git/) pluggable transports library for Tor. -### Flags -The client uses these following `torrc` options by default: +### Dependencies + +- Go 1.15+ +- We use the [pion/webrtc](https://github.com/pion/webrtc) library for WebRTC communication with Snowflake proxies. Note: running `go get` will fetch this dependency automatically during the build process. + +### Building the Snowflake client + +To build the Snowflake client, make sure you are in the `client/` directory, and then run: + ``` -ClientTransportPlugin snowflake exec ./client \ --url https://snowflake-broker.azureedge.net/ \ --front ajax.aspnetcdn.com \ --ice stun:stun.l.google.com:19302 +go get +go build ``` -`-url` should be the URL of a Broker instance. +### Running the Snowflake client with Tor -`-front` is an optional front domain for the Broker request. +The Snowflake client can be configured with SOCKS options. We have a few example `torrc` files in this directory. We recommend the following `torrc` options by default: +``` +UseBridges 1 -`-ice` is a comma-separated list of ICE servers. These can be STUN or TURN -servers. +ClientTransportPlugin snowflake exec ./client -log snowflake.log + +# CDN77 + +Bridge snowflake 192.0.2.4:80 8838024498816A039FCBBAB14E6F40A0843051FA fingerprint=8838024498816A039FCBBAB14E6F40A0843051FA url=https://1098762253.rsc.cdn77.org/ fronts=www.cdn77.com,www.phpmyadmin.net ice=stun:stun.antisip.com:3478,stun:stun.epygi.com:3478,stun:stun.uls.co.za:3478,stun:stun.voipgate.com:3478,stun:stun.mixvoip.com:3478,stun:stun.nextcloud.com:3478,stun:stun.bethesda.net:3478,stun:stun.nextcloud.com:443 utls-imitate=hellorandomizedalpn +Bridge snowflake 192.0.2.3:80 2B280B23E1107BB62ABFC40DDCC8824814F80A72 fingerprint=2B280B23E1107BB62ABFC40DDCC8824814F80A72 url=https://1098762253.rsc.cdn77.org/ fronts=www.cdn77.com,www.phpmyadmin.net ice=stun:stun.antisip.com:3478,stun:stun.epygi.com:3478,stun:stun.uls.co.za:3478,stun:stun.voipgate.com:3478,stun:stun.mixvoip.com:3478,stun:stun.nextcloud.com:3478,stun:stun.bethesda.net:3478,stun:stun.nextcloud.com:443 utls-imitate=hellorandomizedalpn + +# ampcache +#Bridge snowflake 192.0.2.5:80 2B280B23E1107BB62ABFC40DDCC8824814F80A72 fingerprint=2B280B23E1107BB62ABFC40DDCC8824814F80A72 url=https://snowflake-broker.torproject.net/ ampcache=https://cdn.ampproject.org/ front=www.google.com ice=stun:stun.antisip.com:3478,stun:stun.epygi.com:3478,stun:stun.uls.co.za:3478,stun:stun.voipgate.com:3478,stun:stun.mixvoip.com:3478,stun:stun.nextcloud.com:3478,stun:stun.bethesda.net:3478,stun:stun.nextcloud.com:443 utls-imitate=hellorandomizedalpn +#Bridge snowflake 192.0.2.6:80 8838024498816A039FCBBAB14E6F40A0843051FA fingerprint=8838024498816A039FCBBAB14E6F40A0843051FA url=https://snowflake-broker.torproject.net/ ampcache=https://cdn.ampproject.org/ front=www.google.com ice=stun:stun.antisip.com:3478,stun:stun.epygi.com:3478,stun:stun.uls.co.za:3478,stun:stun.voipgate.com:3478,stun:stun.mixvoip.com:3478,stun:stun.nextcloud.com:3478,stun:stun.bethesda.net:3478,stun:stun.nextcloud.com:443 utls-imitate=hellorandomizedalpn + +# sqs +#Bridge snowflake 192.0.2.5:80 2B280B23E1107BB62ABFC40DDCC8824814F80A72 fingerprint=2B280B23E1107BB62ABFC40DDCC8824814F80A72 sqsqueue=https://sqs.us-east-1.amazonaws.com/893902434899/snowflake-broker sqscreds=eyJhd3MtYWNjZXNzLWtleS1pZCI6IkFLSUE1QUlGNFdKSlhTN1lIRUczIiwiYXdzLXNlY3JldC1rZXkiOiI3U0RNc0pBNHM1RitXZWJ1L3pMOHZrMFFXV0lsa1c2Y1dOZlVsQ0tRIn0= ice=stun:stun.antisip.com:3478,stun:stun.epygi.com:3478,stun:stun.uls.co.za:3478,stun:stun.voipgate.com:3478,stun:stun.nextcloud.com:3478,stun:stun.bethesda.net:3478,stun:stun.nextcloud.com:443 utls-imitate=hellorandomizedalpn +#Bridge snowflake 192.0.2.6:80 8838024498816A039FCBBAB14E6F40A0843051FA fingerprint=8838024498816A039FCBBAB14E6F40A0843051FA sqsqueue=https://sqs.us-east-1.amazonaws.com/893902434899/snowflake-broker sqscreds=eyJhd3MtYWNjZXNzLWtleS1pZCI6IkFLSUE1QUlGNFdKSlhTN1lIRUczIiwiYXdzLXNlY3JldC1rZXkiOiI3U0RNc0pBNHM1RitXZWJ1L3pMOHZrMFFXV0lsa1c2Y1dOZlVsQ0tRIn0= ice=stun:stun.antisip.com:3478,stun:stun.epygi.com:3478,stun:stun.uls.co.za:3478,stun:stun.voipgate.com:3478,stun:stun.nextcloud.com:3478,stun:stun.bethesda.net:3478,stun:stun.nextcloud.com:443 utls-imitate=hellorandomizedalpn +``` + +`fingerprint=` is the fingerprint of bridge that the client will ultimately be connecting to. + +`url=` is the URL of a broker instance. If you would like to try out Snowflake with your own broker, simply provide the URL of your broker instance with this option. + +`fronts=` is an optional, comma-seperated list front domains for the broker request. + +`ice=` is a comma-separated list of ICE servers. These must be STUN (over UDP) servers with the form stun:host[:port]. We recommend using servers that have implemented NAT discovery. See our wiki page on [NAT traversal](https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/wikis/NAT-matching) for more information. + +`utls-imitate=` configuration instructs the client to use fingerprinting resistance when connecting when rendez-vous'ing with the broker. + +To bootstrap Tor, run: +``` +tor -f torrc +``` +This should start the client plugin, bootstrapping to 100% using WebRTC. + +### Registration methods + +The Snowflake client supports a few different ways of communicating with the broker. +This initial step is sometimes called rendezvous. + +#### Domain fronting HTTPS + +For domain fronting rendezvous, use the `-url` and `-front` command-line options together. +[Domain fronting](https://www.bamsoftware.com/papers/fronting/) +hides the externally visible domain name from an external observer, +making it appear that the Snowflake client is communicating with some server +other than the Snowflake broker. + +* `-url` is the HTTPS URL of a forwarder to the broker, on some service that supports domain fronting, such as a CDN. +* `-front` is the domain name to show externally. It must be another domain on the same service. + +Example: +``` +-url https://snowflake-broker.torproject.net.global.prod.fastly.net/ \ +-front cdn.sstatic.net \ +``` + +#### AMP cache + +For AMP cache rendezvous, use the `-url`, `-ampcache`, and `-front` command-line options together. +[AMP](https://amp.dev/documentation/) is a standard for web pages for mobile computers. +An [AMP cache](https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/how_amp_pages_are_cached/) +is a cache and proxy specialized for AMP pages. +The Snowflake broker has the ability to make its client registration responses look like AMP pages, +so it can be accessed through an AMP cache. +When you use AMP cache rendezvous, it appears to an observer that the Snowflake client +is accessing an AMP cache, or some other domain operated by the same organization. +You still need to use the `-front` command-line option, because the +[format of AMP cache URLs](https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/) +would otherwise reveal the domain name of the broker. + +There is only one AMP cache that works with this option, +the Google AMP cache at https://cdn.ampproject.org/. + +* `-url` is the HTTPS URL of the broker. +* `-ampcache` is `https://cdn.ampproject.org/`. +* `-front` is any Google domain, such as `www.google.com`. + +Example: +``` +-url https://snowflake-broker.torproject.net/ \ +-ampcache https://cdn.ampproject.org/ \ +-front www.google.com \ +``` + +#### Direct access + +It is also possible to access the broker directly using HTTPS, without domain fronting, +for testing purposes. This mode is not suitable for circumvention, because the +broker is easily blocked by its address. diff --git a/client/lib/interfaces.go b/client/lib/interfaces.go index f62d4f5..e8a5cf6 100644 --- a/client/lib/interfaces.go +++ b/client/lib/interfaces.go @@ -1,56 +1,24 @@ -package lib +package snowflake_client -import ( - "io" - "net" -) - -type Connector interface { - Connect() error -} - -type Resetter interface { - Reset() - WaitForReset() -} - -// Interface for a single remote WebRTC peer. -// In the Client context, "Snowflake" refers to the remote browser proxy. -type Snowflake interface { - io.ReadWriteCloser - Resetter - Connector -} - -// Interface for catching Snowflakes. (aka the remote dialer) +// Tongue is an interface for catching Snowflakes. (aka the remote dialer) type Tongue interface { - Catch() (Snowflake, error) + // Catch makes a connection to a new snowflake. + Catch() (*WebRTCPeer, error) + + // GetMax returns the maximum number of snowflakes a client can have. + GetMax() int } -// Interface for collecting some number of Snowflakes, for passing along -// ultimately to the SOCKS handler. +// SnowflakeCollector is an interface for managing a client's collection of snowflakes. type SnowflakeCollector interface { + // Collect adds a snowflake to the collection. + // The implementation of Collect should decide how to connect to and maintain + // the connection to the WebRTCPeer. + Collect() (*WebRTCPeer, error) - // Add a Snowflake to the collection. - // Implementation should decide how to connect and maintain the webRTCConn. - Collect() (Snowflake, error) + // Pop removes and returns the most available snowflake from the collection. + Pop() *WebRTCPeer - // Remove and return the most available Snowflake from the collection. - Pop() Snowflake - - // Signal when the collector has stopped collecting. + // Melted returns a channel that will signal when the collector has stopped. Melted() <-chan struct{} } - -// Interface to adapt to goptlib's SocksConn struct. -type SocksConnector interface { - Grant(*net.TCPAddr) error - Reject() error - net.Conn -} - -// Interface for the Snowflake's transport. (Typically just webrtc.DataChannel) -type SnowflakeDataChannel interface { - io.Closer - Send([]byte) -} diff --git a/client/lib/lib_test.go b/client/lib/lib_test.go index 4f74cb3..5eab4a0 100644 --- a/client/lib/lib_test.go +++ b/client/lib/lib_test.go @@ -1,57 +1,26 @@ -package lib +package snowflake_client import ( - "bytes" "fmt" - "io/ioutil" "net" - "net/http" "testing" + "time" - "github.com/keroserene/go-webrtc" . "github.com/smartystreets/goconvey/convey" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event" ) -type MockDataChannel struct { - destination bytes.Buffer - done chan bool +type FakeDialer struct { + max int } -func (m *MockDataChannel) Send(data []byte) { - m.destination.Write(data) - m.done <- true -} - -func (*MockDataChannel) Close() error { return nil } - -type MockResponse struct{} - -func (m *MockResponse) Read(p []byte) (int, error) { - p = []byte(`{"type":"answer","sdp":"fake"}`) - return 0, nil -} -func (m *MockResponse) Close() error { return nil } - -type MockTransport struct { - statusOverride int - body []byte -} - -// Just returns a response with fake SDP answer. -func (m *MockTransport) RoundTrip(req *http.Request) (*http.Response, error) { - s := ioutil.NopCloser(bytes.NewReader(m.body)) - r := &http.Response{ - StatusCode: m.statusOverride, - Body: s, - } - return r, nil -} - -type FakeDialer struct{} - -func (w FakeDialer) Catch() (Snowflake, error) { +func (w FakeDialer) Catch() (*WebRTCPeer, error) { fmt.Println("Caught a dummy snowflake.") - return &WebRTCPeer{}, nil + return &WebRTCPeer{closed: make(chan struct{})}, nil +} + +func (w FakeDialer) GetMax() int { + return w.max } type FakeSocksConn struct { @@ -65,29 +34,23 @@ func (f FakeSocksConn) Reject() error { } func (f FakeSocksConn) Grant(addr *net.TCPAddr) error { return nil } -type FakePeers struct{ toRelease *WebRTCPeer } - -func (f FakePeers) Collect() (Snowflake, error) { return &WebRTCPeer{}, nil } -func (f FakePeers) Pop() Snowflake { return nil } -func (f FakePeers) Melted() <-chan struct{} { return nil } - func TestSnowflakeClient(t *testing.T) { Convey("Peers", t, func() { Convey("Can construct", func() { - p := NewPeers(1) - So(p.capacity, ShouldEqual, 1) + d := &FakeDialer{max: 1} + p, _ := NewPeers(d) + So(p.Tongue.GetMax(), ShouldEqual, 1) So(p.snowflakeChan, ShouldNotBeNil) So(cap(p.snowflakeChan), ShouldEqual, 1) }) Convey("Collecting a Snowflake requires a Tongue.", func() { - p := NewPeers(1) - _, err := p.Collect() + p, err := NewPeers(nil) So(err, ShouldNotBeNil) - So(p.Count(), ShouldEqual, 0) // Set the dialer so that collection is possible. - p.Tongue = FakeDialer{} + d := &FakeDialer{max: 1} + p, err = NewPeers(d) _, err = p.Collect() So(err, ShouldBeNil) So(p.Count(), ShouldEqual, 1) @@ -97,8 +60,7 @@ func TestSnowflakeClient(t *testing.T) { Convey("Collection continues until capacity.", func() { c := 5 - p := NewPeers(c) - p.Tongue = FakeDialer{} + p, _ := NewPeers(FakeDialer{max: c}) // Fill up to capacity. for i := 0; i < c; i++ { fmt.Println("Adding snowflake ", i) @@ -112,7 +74,7 @@ func TestSnowflakeClient(t *testing.T) { So(err, ShouldNotBeNil) So(p.Count(), ShouldEqual, c) - // But popping and closing allows it to continue. + // But popping allows it to continue. s := p.Pop() s.Close() So(s, ShouldNotBeNil) @@ -124,8 +86,7 @@ func TestSnowflakeClient(t *testing.T) { }) Convey("Count correctly purges peers marked for deletion.", func() { - p := NewPeers(4) - p.Tongue = FakeDialer{} + p, _ := NewPeers(FakeDialer{max: 5}) p.Collect() p.Collect() p.Collect() @@ -141,9 +102,9 @@ func TestSnowflakeClient(t *testing.T) { Convey("End Closes all peers.", func() { cnt := 5 - p := NewPeers(cnt) + p, _ := NewPeers(FakeDialer{max: cnt}) for i := 0; i < cnt; i++ { - p.activePeers.PushBack(&WebRTCPeer{}) + p.activePeers.PushBack(&WebRTCPeer{closed: make(chan struct{})}) } So(p.Count(), ShouldEqual, cnt) p.End() @@ -152,8 +113,7 @@ func TestSnowflakeClient(t *testing.T) { }) Convey("Pop skips over closed peers.", func() { - p := NewPeers(4) - p.Tongue = FakeDialer{} + p, _ := NewPeers(FakeDialer{max: 4}) wc1, _ := p.Collect() wc2, _ := p.Collect() wc3, _ := p.Collect() @@ -171,163 +131,91 @@ func TestSnowflakeClient(t *testing.T) { So(r, ShouldEqual, wc4) }) - }) + Convey("Terminate Connect() loop", func() { + p, _ := NewPeers(FakeDialer{max: 4}) + go func() { + for { + p.Collect() + select { + case <-p.Melted(): + return + default: + } + } + }() + <-time.After(10 * time.Second) - Convey("Snowflake", t, func() { - - SkipConvey("Handler Grants correctly", func() { - socks := &FakeSocksConn{} - snowflakes := &FakePeers{} - - So(socks.rejected, ShouldEqual, false) - snowflakes.toRelease = nil - Handler(socks, snowflakes) - So(socks.rejected, ShouldEqual, true) + p.End() + <-p.Melted() + So(p.Count(), ShouldEqual, 0) }) - Convey("WebRTC Connection", func() { - c := NewWebRTCPeer(nil, nil) - So(c.buffer.Bytes(), ShouldEqual, nil) - - Convey("Can construct a WebRTCConn", func() { - s := NewWebRTCPeer(nil, nil) - So(s, ShouldNotBeNil) - So(s.offerChannel, ShouldNotBeNil) - So(s.answerChannel, ShouldNotBeNil) - s.Close() - }) - - Convey("Write buffers when datachannel is nil", func() { - c.Write([]byte("test")) - c.transport = nil - So(c.buffer.Bytes(), ShouldResemble, []byte("test")) - }) - - Convey("Write sends to datachannel when not nil", func() { - mock := new(MockDataChannel) - c.transport = mock - mock.done = make(chan bool, 1) - c.Write([]byte("test")) - <-mock.done - So(c.buffer.Bytes(), ShouldEqual, nil) - So(mock.destination.Bytes(), ShouldResemble, []byte("test")) - }) - - Convey("Exchange SDP sets remote description", func() { - c.offerChannel = make(chan *webrtc.SessionDescription, 1) - c.answerChannel = make(chan *webrtc.SessionDescription, 1) - - c.config = webrtc.NewConfiguration() - c.preparePeerConnection() - - c.offerChannel <- nil - answer := webrtc.DeserializeSessionDescription( - `{"type":"answer","sdp":""}`) - c.answerChannel <- answer - c.exchangeSDP() - }) - - SkipConvey("Exchange SDP fails on nil answer", func() { - c.reset = make(chan struct{}) - c.offerChannel = make(chan *webrtc.SessionDescription, 1) - c.answerChannel = make(chan *webrtc.SessionDescription, 1) - c.offerChannel <- nil - c.answerChannel <- nil - c.exchangeSDP() - <-c.reset - }) - - }) }) Convey("Dialers", t, func() { Convey("Can construct WebRTCDialer.", func() { - broker := &BrokerChannel{Host: "test"} - d := NewWebRTCDialer(broker, nil) + broker := &BrokerChannel{} + d := NewWebRTCDialer(broker, nil, 1) So(d, ShouldNotBeNil) So(d.BrokerChannel, ShouldNotBeNil) - So(d.BrokerChannel.Host, ShouldEqual, "test") - }) - Convey("WebRTCDialer cannot Catch a snowflake with nil broker.", func() { - d := NewWebRTCDialer(nil, nil) - conn, err := d.Catch() - So(conn, ShouldBeNil) - So(err, ShouldNotBeNil) }) SkipConvey("WebRTCDialer can Catch a snowflake.", func() { - broker := &BrokerChannel{Host: "test"} - d := NewWebRTCDialer(broker, nil) + broker := &BrokerChannel{} + d := NewWebRTCDialer(broker, nil, 1) conn, err := d.Catch() So(conn, ShouldBeNil) So(err, ShouldNotBeNil) }) }) - Convey("Rendezvous", t, func() { - webrtc.SetLoggingVerbosity(0) - transport := &MockTransport{ - http.StatusOK, - []byte(`{"type":"answer","sdp":"fake"}`), - } - fakeOffer := webrtc.DeserializeSessionDescription("test") +} - Convey("Construct BrokerChannel with no front domain", func() { - b := NewBrokerChannel("test.broker", "", transport) - So(b.url, ShouldNotBeNil) - So(b.url.Path, ShouldResemble, "test.broker") - So(b.transport, ShouldNotBeNil) - }) - - Convey("Construct BrokerChannel *with* front domain", func() { - b := NewBrokerChannel("test.broker", "front", transport) - So(b.url, ShouldNotBeNil) - So(b.url.Path, ShouldResemble, "test.broker") - So(b.url.Host, ShouldResemble, "front") - So(b.transport, ShouldNotBeNil) - }) - - Convey("BrokerChannel.Negotiate responds with answer", func() { - b := NewBrokerChannel("test.broker", "", transport) - answer, err := b.Negotiate(fakeOffer) - So(err, ShouldBeNil) - So(answer, ShouldNotBeNil) - So(answer.Sdp, ShouldResemble, "fake") - }) - - Convey("BrokerChannel.Negotiate fails with 503", func() { - b := NewBrokerChannel("test.broker", "", - &MockTransport{http.StatusServiceUnavailable, []byte("\n")}) - answer, err := b.Negotiate(fakeOffer) - So(err, ShouldNotBeNil) - So(answer, ShouldBeNil) - So(err.Error(), ShouldResemble, BrokerError503) - }) - - Convey("BrokerChannel.Negotiate fails with 400", func() { - b := NewBrokerChannel("test.broker", "", - &MockTransport{http.StatusBadRequest, []byte("\n")}) - answer, err := b.Negotiate(fakeOffer) - So(err, ShouldNotBeNil) - So(answer, ShouldBeNil) - So(err.Error(), ShouldResemble, BrokerError400) - }) - - Convey("BrokerChannel.Negotiate fails with large read", func() { - b := NewBrokerChannel("test.broker", "", - &MockTransport{http.StatusOK, make([]byte, 100001, 100001)}) - answer, err := b.Negotiate(fakeOffer) - So(err, ShouldNotBeNil) - So(answer, ShouldBeNil) - So(err.Error(), ShouldResemble, "unexpected EOF") - }) - - Convey("BrokerChannel.Negotiate fails with unexpected error", func() { - b := NewBrokerChannel("test.broker", "", - &MockTransport{123, []byte("")}) - answer, err := b.Negotiate(fakeOffer) - So(err, ShouldNotBeNil) - So(answer, ShouldBeNil) - So(err.Error(), ShouldResemble, BrokerErrorUnexpected) +func TestWebRTCPeer(t *testing.T) { + Convey("WebRTCPeer", t, func(c C) { + p := &WebRTCPeer{closed: make(chan struct{}), + eventsLogger: event.NewSnowflakeEventDispatcher()} + Convey("checks for staleness", func() { + go p.checkForStaleness(time.Second) + <-time.After(2 * time.Second) + So(p.Closed(), ShouldEqual, true) }) }) } + +func TestICEServerParser(t *testing.T) { + Convey("Test parsing of ICE servers", t, func() { + for _, test := range []struct { + input []string + urls [][]string + length int + }{ + { + []string{"stun:stun.l.google.com:19302", "stun:stun.ekiga.net"}, + [][]string{[]string{"stun:stun.l.google.com:19302"}, []string{"stun:stun.ekiga.net:3478"}}, + 2, + }, + { + []string{"stun:stun1.l.google.com:19302", "stun.ekiga.net", "stun:stun.example.com:1234/path?query", + "https://example.com", "turn:relay.metered.ca:80?transport=udp"}, + [][]string{[]string{"stun:stun1.l.google.com:19302"}}, + 1, + }, + } { + servers := parseIceServers(test.input) + + if test.urls == nil { + So(servers, ShouldBeNil) + } else { + So(servers, ShouldNotBeNil) + } + + So(len(servers), ShouldEqual, test.length) + + for _, server := range servers { + So(test.urls, ShouldContain, server.URLs) + } + + } + + }) +} diff --git a/client/lib/peers.go b/client/lib/peers.go index 21411ed..e828ce5 100644 --- a/client/lib/peers.go +++ b/client/lib/peers.go @@ -1,13 +1,14 @@ -package lib +package snowflake_client import ( "container/list" "errors" "fmt" "log" + "sync" ) -// Container which keeps track of multiple WebRTC remote peers. +// Peers is a container that keeps track of multiple WebRTC remote peers. // Implements |SnowflakeCollector|. // // Maintaining a set of pre-connected Peers with fresh but inactive datachannels @@ -20,38 +21,51 @@ import ( // version of Snowflake) type Peers struct { Tongue - BytesLogger + bytesLogger bytesLogger - snowflakeChan chan Snowflake + snowflakeChan chan *WebRTCPeer activePeers *list.List - capacity int melt chan struct{} + + collectLock sync.Mutex + closeOnce sync.Once } -// Construct a fresh container of remote peers. -func NewPeers(max int) *Peers { - p := &Peers{capacity: max} +// NewPeers constructs a fresh container of remote peers. +func NewPeers(tongue Tongue) (*Peers, error) { + p := &Peers{} // Use buffered go channel to pass snowflakes onwards to the SOCKS handler. - p.snowflakeChan = make(chan Snowflake, max) + if tongue == nil { + return nil, errors.New("missing Tongue to catch Snowflakes with") + } + p.snowflakeChan = make(chan *WebRTCPeer, tongue.GetMax()) p.activePeers = list.New() - p.melt = make(chan struct{}, 1) - return p + p.melt = make(chan struct{}) + p.Tongue = tongue + return p, nil } -// As part of |SnowflakeCollector| interface. -func (p *Peers) Collect() (Snowflake, error) { +// Collect connects to and adds a new remote peer as part of |SnowflakeCollector| interface. +func (p *Peers) Collect() (*WebRTCPeer, error) { + // Engage the Snowflake Catching interface, which must be available. + p.collectLock.Lock() + defer p.collectLock.Unlock() + select { + case <-p.melt: + return nil, fmt.Errorf("Snowflakes have melted") + default: + } + if nil == p.Tongue { + return nil, errors.New("missing Tongue to catch Snowflakes with") + } cnt := p.Count() - s := fmt.Sprintf("Currently at [%d/%d]", cnt, p.capacity) - if cnt >= p.capacity { - s := fmt.Sprintf("At capacity [%d/%d]", cnt, p.capacity) - return nil, errors.New(s) + capacity := p.Tongue.GetMax() + s := fmt.Sprintf("Currently at [%d/%d]", cnt, capacity) + if cnt >= capacity { + return nil, fmt.Errorf("At capacity [%d/%d]", cnt, capacity) } log.Println("WebRTC: Collecting a new Snowflake.", s) - // Engage the Snowflake Catching interface, which must be available. - if nil == p.Tongue { - return nil, errors.New("Missing Tongue to catch Snowflakes with.") - } // BUG: some broker conflict here. connection, err := p.Tongue.Catch() if nil != err { @@ -63,32 +77,30 @@ func (p *Peers) Collect() (Snowflake, error) { return connection, nil } -// As part of |SnowflakeCollector| interface. -func (p *Peers) Pop() Snowflake { - // Blocks until an available, valid snowflake appears. - var snowflake Snowflake - var ok bool - for nil == snowflake { - snowflake, ok = <-p.snowflakeChan - conn := snowflake.(*WebRTCPeer) +// Pop blocks until an available, valid snowflake appears. +// Pop will return nil after End has been called. +func (p *Peers) Pop() *WebRTCPeer { + for { + snowflake, ok := <-p.snowflakeChan if !ok { return nil } - if conn.closed { - snowflake = nil + if snowflake.Closed() { + continue } + // Set to use the same rate-limited traffic logger to keep consistency. + snowflake.bytesLogger = p.bytesLogger + return snowflake } - // Set to use the same rate-limited traffic logger to keep consistency. - snowflake.(*WebRTCPeer).BytesLogger = p.BytesLogger - return snowflake } -// As part of |SnowflakeCollector| interface. +// Melted returns a channel that will close when peers stop being collected. +// Melted is a necessary part of |SnowflakeCollector| interface. func (p *Peers) Melted() <-chan struct{} { return p.melt } -// Returns total available Snowflakes (including the active one) +// Count returns the total available Snowflakes (including the active ones) // The count only reduces when connections themselves close, rather than when // they are popped. func (p *Peers) Count() int { @@ -101,24 +113,29 @@ func (p *Peers) purgeClosedPeers() { next := e.Next() conn := e.Value.(*WebRTCPeer) // Purge those marked for deletion. - if conn.closed { + if conn.Closed() { p.activePeers.Remove(e) } e = next } } -// Close all Peers contained here. +// End closes all active connections to Peers contained here, and stops the +// collection of future Peers. func (p *Peers) End() { - close(p.snowflakeChan) - p.melt <- struct{}{} - cnt := p.Count() - for e := p.activePeers.Front(); e != nil; { - next := e.Next() - conn := e.Value.(*WebRTCPeer) - conn.Close() - p.activePeers.Remove(e) - e = next - } - log.Println("WebRTC: melted all", cnt, "snowflakes.") + p.closeOnce.Do(func() { + close(p.melt) + p.collectLock.Lock() + defer p.collectLock.Unlock() + close(p.snowflakeChan) + cnt := p.Count() + for e := p.activePeers.Front(); e != nil; { + next := e.Next() + conn := e.Value.(*WebRTCPeer) + conn.Close() + p.activePeers.Remove(e) + e = next + } + log.Printf("WebRTC: melted all %d snowflakes.", cnt) + }) } diff --git a/client/lib/rendezvous.go b/client/lib/rendezvous.go index 54ce459..76209ee 100644 --- a/client/lib/rendezvous.go +++ b/client/lib/rendezvous.go @@ -1,151 +1,323 @@ // WebRTC rendezvous requires the exchange of SessionDescriptions between // peers in order to establish a PeerConnection. -// -// This file contains the one method currently available to Snowflake: -// -// - Domain-fronted HTTP signaling. The Broker automatically exchange offers -// and answers between this client and some remote WebRTC proxy. -package lib +package snowflake_client import ( - "bytes" + "crypto/tls" "errors" - "io" - "io/ioutil" + "fmt" "log" "net/http" "net/url" + "sync" + "sync/atomic" + "time" - "github.com/keroserene/go-webrtc" + "github.com/pion/webrtc/v4" + utls "github.com/refraction-networking/utls" + + utlsutil "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/utls" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/certs" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/nat" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util" ) const ( - BrokerError503 string = "No snowflake proxies currently available." - BrokerError400 string = "You sent an invalid offer in the request." - BrokerErrorUnexpected string = "Unexpected error, no answer." - readLimit = 100000 //Maximum number of bytes to be read from an HTTP response + brokerErrorUnexpected string = "Unexpected error, no answer." + rendezvousErrorMsg string = "One of SQS, AmpCache, or Domain Fronting rendezvous methods must be used." + + readLimit = 100000 //Maximum number of bytes to be read from an HTTP response ) -// Signalling Channel to the Broker. +// RendezvousMethod represents a way of communicating with the broker: sending +// an encoded client poll request (SDP offer) and receiving an encoded client +// poll response (SDP answer) in return. RendezvousMethod is used by +// BrokerChannel, which is in charge of encoding and decoding, and all other +// tasks that are independent of the rendezvous method. +type RendezvousMethod interface { + Exchange([]byte) ([]byte, error) +} + +// BrokerChannel uses a RendezvousMethod to communicate with the Snowflake broker. +// The BrokerChannel is responsible for encoding and decoding SDP offers and answers; +// RendezvousMethod is responsible for the exchange of encoded information. type BrokerChannel struct { - // The Host header to put in the HTTP request (optional and may be - // different from the host name in URL). - Host string - url *url.URL - transport http.RoundTripper // Used to make all requests. + Rendezvous RendezvousMethod + keepLocalAddresses bool + natType string + lock sync.Mutex + BridgeFingerprint string } // We make a copy of DefaultTransport because we want the default Dial // and TLSHandshakeTimeout settings. But we want to disable the default // ProxyFromEnvironment setting. -func CreateBrokerTransport() http.RoundTripper { - transport := http.DefaultTransport.(*http.Transport) +func createBrokerTransport(proxy *url.URL) http.RoundTripper { + tlsConfig := &tls.Config{ + RootCAs: certs.GetRootCAs(), + } + transport := &http.Transport{TLSClientConfig: tlsConfig} transport.Proxy = nil + if proxy != nil { + transport.Proxy = http.ProxyURL(proxy) + } + transport.ResponseHeaderTimeout = 15 * time.Second return transport } -// Construct a new BrokerChannel, where: -// |broker| is the full URL of the facilitating program which assigns proxies -// to clients, and |front| is the option fronting domain. -func NewBrokerChannel(broker string, front string, transport http.RoundTripper) *BrokerChannel { - targetURL, err := url.Parse(broker) - if nil != err { - return nil - } - log.Println("Rendezvous using Broker at:", broker) - bc := new(BrokerChannel) - bc.url = targetURL - if "" != front { // Optional front domain. - log.Println("Domain fronting using:", front) - bc.Host = bc.url.Host - bc.url.Host = front +func newBrokerChannelFromConfig(config ClientConfig) (*BrokerChannel, error) { + log.Println("Rendezvous using Broker at:", config.BrokerURL) + + if len(config.FrontDomains) != 0 { + log.Printf("Domain fronting using a randomly selected domain from: %v", config.FrontDomains) } - bc.transport = transport - return bc -} + brokerTransport := createBrokerTransport(config.CommunicationProxy) -func limitedRead(r io.Reader, limit int64) ([]byte, error) { - p, err := ioutil.ReadAll(&io.LimitedReader{R: r, N: limit + 1}) - if err != nil { - return p, err - } else if int64(len(p)) == limit+1 { - return p[0:limit], io.ErrUnexpectedEOF - } - return p, err -} - -// Roundtrip HTTP POST using WebRTC SessionDescriptions. -// -// Send an SDP offer to the broker, which assigns a proxy and responds -// with an SDP answer from a designated remote WebRTC peer. -func (bc *BrokerChannel) Negotiate(offer *webrtc.SessionDescription) ( - *webrtc.SessionDescription, error) { - log.Println("Negotiating via BrokerChannel...\nTarget URL: ", - bc.Host, "\nFront URL: ", bc.url.Host) - data := bytes.NewReader([]byte(offer.Serialize())) - // Suffix with broker's client registration handler. - clientURL := bc.url.ResolveReference(&url.URL{Path: "client"}) - request, err := http.NewRequest("POST", clientURL.String(), data) - if nil != err { - return nil, err - } - if "" != bc.Host { // Set true host if necessary. - request.Host = bc.Host - } - resp, err := bc.transport.RoundTrip(request) - if nil != err { - return nil, err - } - defer resp.Body.Close() - log.Printf("BrokerChannel Response:\n%s\n\n", resp.Status) - - switch resp.StatusCode { - case http.StatusOK: - body, err := limitedRead(resp.Body, readLimit) - if nil != err { - return nil, err + if config.UTLSClientID != "" { + utlsClientHelloID, err := utlsutil.NameToUTLSID(config.UTLSClientID) + if err != nil { + return nil, fmt.Errorf("unable to create broker channel: %w", err) } - answer := webrtc.DeserializeSessionDescription(string(body)) - return answer, nil + utlsConfig := &utls.Config{ + RootCAs: certs.GetRootCAs(), + } + brokerTransport = utlsutil.NewUTLSHTTPRoundTripperWithProxy(utlsClientHelloID, utlsConfig, brokerTransport, + config.UTLSRemoveSNI, config.CommunicationProxy) + } - case http.StatusServiceUnavailable: - return nil, errors.New(BrokerError503) - case http.StatusBadRequest: - return nil, errors.New(BrokerError400) - default: - return nil, errors.New(BrokerErrorUnexpected) + var rendezvous RendezvousMethod + var err error + if config.SQSQueueURL != "" { + if config.AmpCacheURL != "" || config.BrokerURL != "" { + log.Fatalln("Multiple rendezvous methods specified. " + rendezvousErrorMsg) + } + if config.SQSCredsStr == "" { + log.Fatalln("sqscreds must be specified to use SQS rendezvous method.") + } + log.Println("Through SQS queue at:", config.SQSQueueURL) + rendezvous, err = newSQSRendezvous(config.SQSQueueURL, config.SQSCredsStr, brokerTransport) + } else if config.AmpCacheURL != "" && config.BrokerURL != "" { + log.Println("Through AMP cache at:", config.AmpCacheURL) + rendezvous, err = newAMPCacheRendezvous( + config.BrokerURL, config.AmpCacheURL, config.FrontDomains, + brokerTransport) + } else if config.BrokerURL != "" { + rendezvous, err = newHTTPRendezvous( + config.BrokerURL, config.FrontDomains, brokerTransport) + } else { + log.Fatalln("No rendezvous method was specified. " + rendezvousErrorMsg) + } + if err != nil { + return nil, err + } + + return &BrokerChannel{ + Rendezvous: rendezvous, + keepLocalAddresses: config.KeepLocalAddresses, + natType: nat.NATUnknown, + BridgeFingerprint: config.BridgeFingerprint, + }, nil +} + +// Negotiate uses a RendezvousMethod to send the client's WebRTC SDP offer +// and receive a snowflake proxy WebRTC SDP answer in return. +func (bc *BrokerChannel) Negotiate( + offer *webrtc.SessionDescription, + natTypeToSend string, +) ( + *webrtc.SessionDescription, error, +) { + encReq, err := preparePollRequest(offer, natTypeToSend, bc.BridgeFingerprint) + if err != nil { + return nil, err + } + + // Do the exchange using our RendezvousMethod. + encResp, err := bc.Rendezvous.Exchange(encReq) + if err != nil { + return nil, err + } + log.Printf("Received answer: %s", string(encResp)) + + // Decode the client poll response. + resp, err := messages.DecodeClientPollResponse(encResp) + if err != nil { + return nil, err + } + if resp.Error != "" { + return nil, errors.New(resp.Error) + } + return util.DeserializeSessionDescription(resp.Answer) +} + +// Pure function +func preparePollRequest( + offer *webrtc.SessionDescription, + natType string, + bridgeFingerprint string, +) (encReq []byte, err error) { + offerSDP, err := util.SerializeSessionDescription(offer) + if err != nil { + return nil, err + } + req := &messages.ClientPollRequest{ + Offer: offerSDP, + NAT: natType, + Fingerprint: bridgeFingerprint, + } + encReq, err = req.EncodeClientPollRequest() + return +} + +// SetNATType sets the NAT type of the client so we can send it to the WebRTC broker. +func (bc *BrokerChannel) SetNATType(NATType string) { + bc.lock.Lock() + bc.natType = NATType + bc.lock.Unlock() + log.Printf("NAT Type: %s", NATType) +} + +func (bc *BrokerChannel) GetNATType() string { + bc.lock.Lock() + defer bc.lock.Unlock() + return bc.natType +} + +// All of the methods of the struct are thread-safe. +type NATPolicy struct { + assumedUnrestrictedNATAndFailedToConnect atomic.Bool +} + +// When our NAT type is unknown, we want to try to connect to a +// restricted / unknown proxy initially +// to offload the unrestricted ones. +// So, instead of always sending the actual NAT type, +// we should use this function to determine the NAT type to send. +// +// This is useful when our STUN servers are blocked or don't support +// the NAT discovery feature, or if they're just slow. +func (p *NATPolicy) NATTypeToSend(actualNatType string) string { + if !p.assumedUnrestrictedNATAndFailedToConnect.Load() && + actualNatType == nat.NATUnknown { + // If our NAT type is unknown, and we haven't failed to connect + // with a spoofed NAT type yet, then spoof a NATUnrestricted + // type. + return nat.NATUnrestricted + } else { + // In all other cases, do not spoof, and just return our actual + // NAT type (even if it is NATUnknown). + return actualNatType } } -// Implements the |Tongue| interface to catch snowflakes, using BrokerChannel. +// This function must be called whenever a connection with a proxy succeeds, +// because the connection outcome tells us about NAT compatibility +// between the proxy and us. +func (p *NATPolicy) Success(actualNATType, sentNATType string) { + // Yes, right now this does nothing but log. + if actualNATType != sentNATType { + log.Printf( + "Connected to a proxy by using a spoofed NAT type \"%v\"! "+ + "Our actual NAT type was \"%v\"", + sentNATType, + actualNATType, + ) + } +} + +// This function must be called whenever a connection with a proxy fails, +// because the connection outcome tells us about NAT compatibility +// between the proxy and us. +func (p *NATPolicy) Failure(actualNATType, sentNATType string) { + if actualNATType == nat.NATUnknown && sentNATType == nat.NATUnrestricted { + log.Printf( + "Tried to connect to a restricted proxy while our NAT type "+ + "is \"%v\", and failed. Let's not do that again.", + actualNATType, + ) + p.assumedUnrestrictedNATAndFailedToConnect.Store(true) + } +} + +// WebRTCDialer implements the |Tongue| interface to catch snowflakes, using BrokerChannel. type WebRTCDialer struct { *BrokerChannel + // Can be `nil`, in which case we won't apply special logic, + // and simply always send the current NAT type instead. + natPolicy *NATPolicy webrtcConfig *webrtc.Configuration + max int + + eventLogger event.SnowflakeEventReceiver + proxy *url.URL } -func NewWebRTCDialer( - broker *BrokerChannel, iceServers IceServerList) *WebRTCDialer { - config := webrtc.NewConfiguration(iceServers...) - if nil == config { - log.Println("Unable to prepare WebRTC configuration.") - return nil +// Deprecated: Use NewWebRTCDialerWithNatPolicyAndEventsAndProxy instead +func NewWebRTCDialer(broker *BrokerChannel, iceServers []webrtc.ICEServer, max int) *WebRTCDialer { + return NewWebRTCDialerWithNatPolicyAndEventsAndProxy( + broker, nil, iceServers, max, nil, nil, + ) +} + +// Deprecated: Use NewWebRTCDialerWithNatPolicyAndEventsAndProxy instead +func NewWebRTCDialerWithEvents(broker *BrokerChannel, iceServers []webrtc.ICEServer, max int, eventLogger event.SnowflakeEventReceiver) *WebRTCDialer { + return NewWebRTCDialerWithNatPolicyAndEventsAndProxy( + broker, nil, iceServers, max, eventLogger, nil, + ) +} + +// Deprecated: Use NewWebRTCDialerWithNatPolicyAndEventsAndProxy instead +func NewWebRTCDialerWithEventsAndProxy(broker *BrokerChannel, iceServers []webrtc.ICEServer, max int, + eventLogger event.SnowflakeEventReceiver, proxy *url.URL, +) *WebRTCDialer { + return NewWebRTCDialerWithNatPolicyAndEventsAndProxy( + broker, + nil, + iceServers, + max, + eventLogger, + proxy, + ) +} + +// NewWebRTCDialerWithNatPolicyAndEventsAndProxy constructs a new WebRTCDialer. +func NewWebRTCDialerWithNatPolicyAndEventsAndProxy( + broker *BrokerChannel, + natPolicy *NATPolicy, + iceServers []webrtc.ICEServer, + max int, + eventLogger event.SnowflakeEventReceiver, + proxy *url.URL, +) *WebRTCDialer { + config := webrtc.Configuration{ + ICEServers: iceServers, } + return &WebRTCDialer{ BrokerChannel: broker, - webrtcConfig: config, + natPolicy: natPolicy, + webrtcConfig: &config, + max: max, + + eventLogger: eventLogger, + proxy: proxy, } } -// Initialize a WebRTC Connection by signaling through the broker. -func (w WebRTCDialer) Catch() (Snowflake, error) { - if nil == w.BrokerChannel { - return nil, errors.New("Cannot Dial WebRTC without a BrokerChannel.") - } - // TODO: [#3] Fetch ICE server information from Broker. - // TODO: [#18] Consider TURN servers here too. - connection := NewWebRTCPeer(w.webrtcConfig, w.BrokerChannel) - err := connection.Connect() - return connection, err +// Catch initializes a WebRTC Connection by signaling through the BrokerChannel. +func (w WebRTCDialer) Catch() (*WebRTCPeer, error) { + // TODO: [#25591] Fetch ICE server information from Broker. + // TODO: [#25596] Consider TURN servers here too. + return NewWebRTCPeerWithNatPolicyAndEventsAndProxy( + w.webrtcConfig, w.BrokerChannel, w.natPolicy, w.eventLogger, w.proxy, + ) +} + +// GetMax returns the maximum number of snowflakes to collect. +func (w WebRTCDialer) GetMax() int { + return w.max } diff --git a/client/lib/rendezvous_ampcache.go b/client/lib/rendezvous_ampcache.go new file mode 100644 index 0000000..f6e1d0c --- /dev/null +++ b/client/lib/rendezvous_ampcache.go @@ -0,0 +1,127 @@ +package snowflake_client + +import ( + "errors" + "io" + "log" + "math/rand" + "net/http" + "net/url" + "time" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/amp" +) + +// ampCacheRendezvous is a RendezvousMethod that communicates with the +// .../amp/client route of the broker, optionally over an AMP cache proxy, and +// with optional domain fronting. +type ampCacheRendezvous struct { + brokerURL *url.URL + cacheURL *url.URL // Optional AMP cache URL. + fronts []string // Optional front domains to replace url.Host in requests. + transport http.RoundTripper // Used to make all requests. +} + +// newAMPCacheRendezvous creates a new ampCacheRendezvous that contacts the +// broker at the given URL, optionally proxying through an AMP cache, and with +// an optional front domain. transport is the http.RoundTripper used to make all +// requests. +func newAMPCacheRendezvous(broker, cache string, fronts []string, transport http.RoundTripper) (*ampCacheRendezvous, error) { + brokerURL, err := url.Parse(broker) + if err != nil { + return nil, err + } + var cacheURL *url.URL + if cache != "" { + var err error + cacheURL, err = url.Parse(cache) + if err != nil { + return nil, err + } + } + return &CacheRendezvous{ + brokerURL: brokerURL, + cacheURL: cacheURL, + fronts: fronts, + transport: transport, + }, nil +} + +func (r *ampCacheRendezvous) Exchange(encPollReq []byte) ([]byte, error) { + log.Println("Negotiating via AMP cache rendezvous...") + log.Println("Broker URL:", r.brokerURL) + log.Println("AMP cache URL:", r.cacheURL) + + // We cannot POST a body through an AMP cache, so instead we GET and + // encode the client poll request message into the URL. + reqURL := r.brokerURL.ResolveReference(&url.URL{ + Path: "amp/client/" + amp.EncodePath(encPollReq), + }) + + if r.cacheURL != nil { + // Rewrite reqURL to its AMP cache version. + var err error + reqURL, err = amp.CacheURL(reqURL, r.cacheURL, "c") + if err != nil { + return nil, err + } + } + + req, err := http.NewRequest("GET", reqURL.String(), nil) + if err != nil { + return nil, err + } + + if len(r.fronts) != 0 { + // Do domain fronting. Replace the domain in the URL's with a randomly + // selected front, and store the original domain the HTTP Host header. + rand.Seed(time.Now().UnixNano()) + front := r.fronts[rand.Intn(len(r.fronts))] + log.Println("Front domain:", front) + req.Host = req.URL.Host + req.URL.Host = front + } + + resp, err := r.transport.RoundTrip(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + log.Printf("AMP cache rendezvous response: %s", resp.Status) + if resp.StatusCode != http.StatusOK { + // A non-200 status indicates an error: + // * If the broker returns a page with invalid AMP, then the AMP + // cache returns a redirect that would bypass the cache. + // * If the broker returns a 5xx status, the AMP cache + // translates it to a 404. + // https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#redirect-%26-error-handling + return nil, errors.New(brokerErrorUnexpected) + } + if _, err := resp.Location(); err == nil { + // The Google AMP Cache may return a "silent redirect" with + // status 200, a Location header set, and a JavaScript redirect + // in the body. The redirect points directly at the origin + // server for the request (bypassing the AMP cache). We do not + // follow redirects nor execute JavaScript, but in any case we + // cannot extract information from this response and can only + // treat it as an error. + return nil, errors.New(brokerErrorUnexpected) + } + + lr := io.LimitReader(resp.Body, readLimit+1) + dec, err := amp.NewArmorDecoder(lr) + if err != nil { + return nil, err + } + encPollResp, err := io.ReadAll(dec) + if err != nil { + return nil, err + } + if lr.(*io.LimitedReader).N == 0 { + // We hit readLimit while decoding AMP armor, that's an error. + return nil, io.ErrUnexpectedEOF + } + + return encPollResp, err +} diff --git a/client/lib/rendezvous_http.go b/client/lib/rendezvous_http.go new file mode 100644 index 0000000..2f7dd9e --- /dev/null +++ b/client/lib/rendezvous_http.go @@ -0,0 +1,80 @@ +package snowflake_client + +import ( + "bytes" + "errors" + "io" + "log" + "math/rand" + "net/http" + "net/url" + "time" +) + +// httpRendezvous is a RendezvousMethod that communicates with the .../client +// route of the broker over HTTP or HTTPS, with optional domain fronting. +type httpRendezvous struct { + brokerURL *url.URL + fronts []string // Optional front domain to replace url.Host in requests. + transport http.RoundTripper // Used to make all requests. +} + +// newHTTPRendezvous creates a new httpRendezvous that contacts the broker at +// the given URL, with an optional front domain. transport is the +// http.RoundTripper used to make all requests. +func newHTTPRendezvous(broker string, fronts []string, transport http.RoundTripper) (*httpRendezvous, error) { + brokerURL, err := url.Parse(broker) + if err != nil { + return nil, err + } + return &httpRendezvous{ + brokerURL: brokerURL, + fronts: fronts, + transport: transport, + }, nil +} + +func (r *httpRendezvous) Exchange(encPollReq []byte) ([]byte, error) { + log.Println("Negotiating via HTTP rendezvous...") + log.Println("Target URL: ", r.brokerURL.Host) + + // Suffix the path with the broker's client registration handler. + reqURL := r.brokerURL.ResolveReference(&url.URL{Path: "client"}) + req, err := http.NewRequest("POST", reqURL.String(), bytes.NewReader(encPollReq)) + if err != nil { + return nil, err + } + + if len(r.fronts) != 0 { + // Do domain fronting. Replace the domain in the URL's with a randomly + // selected front, and store the original domain the HTTP Host header. + rand.Seed(time.Now().UnixNano()) + front := r.fronts[rand.Intn(len(r.fronts))] + log.Println("Front URL: ", front) + req.Host = req.URL.Host + req.URL.Host = front + } + + resp, err := r.transport.RoundTrip(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + log.Printf("HTTP rendezvous response: %s", resp.Status) + if resp.StatusCode != http.StatusOK { + return nil, errors.New(brokerErrorUnexpected) + } + + return limitedRead(resp.Body, readLimit) +} + +func limitedRead(r io.Reader, limit int64) ([]byte, error) { + p, err := io.ReadAll(&io.LimitedReader{R: r, N: limit + 1}) + if err != nil { + return p, err + } else if int64(len(p)) == limit+1 { + return p[0:limit], io.ErrUnexpectedEOF + } + return p, err +} diff --git a/client/lib/rendezvous_sqs.go b/client/lib/rendezvous_sqs.go new file mode 100644 index 0000000..6b1c073 --- /dev/null +++ b/client/lib/rendezvous_sqs.go @@ -0,0 +1,143 @@ +package snowflake_client + +import ( + "context" + "crypto/rand" + "encoding/hex" + "log" + "net/http" + "net/url" + "regexp" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqsclient" + sqscreds "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqscreds/lib" +) + +type sqsRendezvous struct { + transport http.RoundTripper + sqsClient sqsclient.SQSClient + sqsURL *url.URL + timeout time.Duration + numRetries int +} + +func newSQSRendezvous(sqsQueue string, sqsCredsStr string, transport http.RoundTripper) (*sqsRendezvous, error) { + sqsURL, err := url.Parse(sqsQueue) + if err != nil { + return nil, err + } + + sqsCreds, err := sqscreds.AwsCredsFromBase64(sqsCredsStr) + if err != nil { + return nil, err + } + + queueURL := sqsURL.String() + hostName := sqsURL.Hostname() + + regionRegex, _ := regexp.Compile(`^sqs\.([\w-]+)\.amazonaws\.com$`) + res := regionRegex.FindStringSubmatch(hostName) + if len(res) < 2 { + log.Fatal("Could not extract AWS region from SQS URL. Ensure that the SQS Queue URL provided is valid.") + } + region := res[1] + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithCredentialsProvider( + credentials.NewStaticCredentialsProvider(sqsCreds.AwsAccessKeyId, sqsCreds.AwsSecretKey, ""), + ), + config.WithRegion(region), + ) + if err != nil { + log.Fatal(err) + } + client := sqs.NewFromConfig(cfg) + + log.Println("Queue URL: ", queueURL) + + return &sqsRendezvous{ + transport: transport, + sqsClient: client, + sqsURL: sqsURL, + timeout: time.Second, + numRetries: 5, + }, nil +} + +func (r *sqsRendezvous) Exchange(encPollReq []byte) ([]byte, error) { + log.Println("Negotiating via SQS Queue rendezvous...") + + var id [8]byte + _, err := rand.Read(id[:]) + if err != nil { + return nil, err + } + sqsClientID := hex.EncodeToString(id[:]) + log.Println("SQS Client ID for rendezvous: " + sqsClientID) + + _, err = r.sqsClient.SendMessage(context.TODO(), &sqs.SendMessageInput{ + MessageAttributes: map[string]types.MessageAttributeValue{ + "ClientID": { + DataType: aws.String("String"), + StringValue: aws.String(sqsClientID), + }, + }, + MessageBody: aws.String(string(encPollReq)), + QueueUrl: aws.String(r.sqsURL.String()), + }) + if err != nil { + return nil, err + } + + time.Sleep(r.timeout) // wait for client queue to be created by the broker + + var responseQueueURL *string + for i := 0; i < r.numRetries; i++ { + // The SQS queue corresponding to the client where the SDP Answer will be placed + // may not be created yet. We will retry up to 5 times before we error out. + var res *sqs.GetQueueUrlOutput + res, err = r.sqsClient.GetQueueUrl(context.TODO(), &sqs.GetQueueUrlInput{ + QueueName: aws.String("snowflake-client-" + sqsClientID), + }) + if err != nil { + log.Println(err) + log.Printf("Attempt %d of %d to retrieve URL of response SQS queue failed.\n", i+1, r.numRetries) + time.Sleep(r.timeout) + } else { + responseQueueURL = res.QueueUrl + break + } + } + if err != nil { + return nil, err + } + + var answer string + for i := 0; i < r.numRetries; i++ { + // Waiting for SDP Answer from proxy to be placed in SQS queue. + // We will retry upt to 5 times before we error out. + res, err := r.sqsClient.ReceiveMessage(context.TODO(), &sqs.ReceiveMessageInput{ + QueueUrl: responseQueueURL, + MaxNumberOfMessages: 1, + WaitTimeSeconds: 20, + }) + if err != nil { + return nil, err + } + if len(res.Messages) == 0 { + log.Printf("Attempt %d of %d to receive message from response SQS queue failed. No message found in queue.\n", i+1, r.numRetries) + delay := float64(i)/2.0 + 1 + time.Sleep(time.Duration(delay*1000) * (r.timeout / 1000)) + } else { + answer = *res.Messages[0].Body + break + } + } + + return []byte(answer), nil +} diff --git a/client/lib/rendezvous_test.go b/client/lib/rendezvous_test.go new file mode 100644 index 0000000..16a0cb3 --- /dev/null +++ b/client/lib/rendezvous_test.go @@ -0,0 +1,442 @@ +package snowflake_client + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sqs" + "github.com/aws/aws-sdk-go-v2/service/sqs/types" + "github.com/golang/mock/gomock" + "github.com/pion/webrtc/v4" + . "github.com/smartystreets/goconvey/convey" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/amp" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/nat" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqsclient" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util" +) + +// mockTransport's RoundTrip method returns a response with a fake status and +// body. +type mockTransport struct { + statusCode int + body []byte +} + +func (t *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return &http.Response{ + Status: fmt.Sprintf("%d %s", t.statusCode, http.StatusText(t.statusCode)), + StatusCode: t.statusCode, + Body: io.NopCloser(bytes.NewReader(t.body)), + }, nil +} + +// errorTransport's RoundTrip method returns an error. +type errorTransport struct { + err error +} + +func (t errorTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return nil, t.err +} + +// makeEncPollReq returns an encoded client poll request containing a given +// offer. +func makeEncPollReq(offer string) []byte { + encPollReq, err := (&messages.ClientPollRequest{ + Offer: offer, + NAT: nat.NATUnknown, + }).EncodeClientPollRequest() + if err != nil { + panic(err) + } + return encPollReq +} + +// makeEncPollResp returns an encoded client poll response with given answer and +// error strings. +func makeEncPollResp(answer, errorStr string) []byte { + encPollResp, err := (&messages.ClientPollResponse{ + Answer: answer, + Error: errorStr, + }).EncodePollResponse() + if err != nil { + panic(err) + } + return encPollResp +} + +var fakeEncPollReq = makeEncPollReq(`{"type":"offer","sdp":"test"}`) + +func TestHTTPRendezvous(t *testing.T) { + Convey("HTTP rendezvous", t, func() { + Convey("Construct httpRendezvous with no front domain", func() { + transport := &mockTransport{http.StatusOK, []byte{}} + rend, err := newHTTPRendezvous("http://test.broker", []string{}, transport) + So(err, ShouldBeNil) + So(rend.brokerURL, ShouldNotBeNil) + So(rend.brokerURL.Host, ShouldResemble, "test.broker") + So(rend.fronts, ShouldEqual, []string{}) + So(rend.transport, ShouldEqual, transport) + }) + + Convey("Construct httpRendezvous *with* front domain", func() { + transport := &mockTransport{http.StatusOK, []byte{}} + rend, err := newHTTPRendezvous("http://test.broker", []string{"front"}, transport) + So(err, ShouldBeNil) + So(rend.brokerURL, ShouldNotBeNil) + So(rend.brokerURL.Host, ShouldResemble, "test.broker") + So(rend.fronts, ShouldContain, "front") + So(rend.transport, ShouldEqual, transport) + }) + + Convey("httpRendezvous.Exchange responds with answer", func() { + fakeEncPollResp := makeEncPollResp( + `{"answer": "{\"type\":\"answer\",\"sdp\":\"fake\"}" }`, + "", + ) + rend, err := newHTTPRendezvous("http://test.broker", []string{}, + &mockTransport{http.StatusOK, fakeEncPollResp}) + So(err, ShouldBeNil) + answer, err := rend.Exchange(fakeEncPollReq) + So(err, ShouldBeNil) + So(answer, ShouldResemble, fakeEncPollResp) + }) + + Convey("httpRendezvous.Exchange responds with no answer", func() { + fakeEncPollResp := makeEncPollResp( + "", + `{"error": "no snowflake proxies currently available"}`, + ) + rend, err := newHTTPRendezvous("http://test.broker", []string{}, + &mockTransport{http.StatusOK, fakeEncPollResp}) + So(err, ShouldBeNil) + answer, err := rend.Exchange(fakeEncPollReq) + So(err, ShouldBeNil) + So(answer, ShouldResemble, fakeEncPollResp) + }) + + Convey("httpRendezvous.Exchange fails with unexpected HTTP status code", func() { + rend, err := newHTTPRendezvous("http://test.broker", []string{}, + &mockTransport{http.StatusInternalServerError, []byte{}}) + So(err, ShouldBeNil) + answer, err := rend.Exchange(fakeEncPollReq) + So(err, ShouldNotBeNil) + So(answer, ShouldBeNil) + So(err.Error(), ShouldResemble, brokerErrorUnexpected) + }) + + Convey("httpRendezvous.Exchange fails with error", func() { + transportErr := errors.New("error") + rend, err := newHTTPRendezvous("http://test.broker", []string{}, + &errorTransport{err: transportErr}) + So(err, ShouldBeNil) + answer, err := rend.Exchange(fakeEncPollReq) + So(err, ShouldEqual, transportErr) + So(answer, ShouldBeNil) + }) + + Convey("httpRendezvous.Exchange fails with large read", func() { + rend, err := newHTTPRendezvous("http://test.broker", []string{}, + &mockTransport{http.StatusOK, make([]byte, readLimit+1)}) + So(err, ShouldBeNil) + _, err = rend.Exchange(fakeEncPollReq) + So(err, ShouldEqual, io.ErrUnexpectedEOF) + }) + }) +} + +func ampArmorEncode(p []byte) []byte { + var buf bytes.Buffer + enc, err := amp.NewArmorEncoder(&buf) + if err != nil { + panic(err) + } + _, err = enc.Write(p) + if err != nil { + panic(err) + } + err = enc.Close() + if err != nil { + panic(err) + } + return buf.Bytes() +} + +func TestAMPCacheRendezvous(t *testing.T) { + Convey("AMP cache rendezvous", t, func() { + Convey("Construct ampCacheRendezvous with no cache and no front domain", func() { + transport := &mockTransport{http.StatusOK, []byte{}} + rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{}, transport) + So(err, ShouldBeNil) + So(rend.brokerURL, ShouldNotBeNil) + So(rend.brokerURL.String(), ShouldResemble, "http://test.broker") + So(rend.cacheURL, ShouldBeNil) + So(rend.fronts, ShouldResemble, []string{}) + So(rend.transport, ShouldEqual, transport) + }) + + Convey("Construct ampCacheRendezvous with cache and no front domain", func() { + transport := &mockTransport{http.StatusOK, []byte{}} + rend, err := newAMPCacheRendezvous("http://test.broker", "https://amp.cache/", []string{}, transport) + So(err, ShouldBeNil) + So(rend.brokerURL, ShouldNotBeNil) + So(rend.brokerURL.String(), ShouldResemble, "http://test.broker") + So(rend.cacheURL, ShouldNotBeNil) + So(rend.cacheURL.String(), ShouldResemble, "https://amp.cache/") + So(rend.fronts, ShouldResemble, []string{}) + So(rend.transport, ShouldEqual, transport) + }) + + Convey("Construct ampCacheRendezvous with no cache and front domain", func() { + transport := &mockTransport{http.StatusOK, []byte{}} + rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{"front"}, transport) + So(err, ShouldBeNil) + So(rend.brokerURL, ShouldNotBeNil) + So(rend.brokerURL.String(), ShouldResemble, "http://test.broker") + So(rend.cacheURL, ShouldBeNil) + So(rend.fronts, ShouldContain, "front") + So(rend.transport, ShouldEqual, transport) + }) + + Convey("Construct ampCacheRendezvous with cache and front domain", func() { + transport := &mockTransport{http.StatusOK, []byte{}} + rend, err := newAMPCacheRendezvous("http://test.broker", "https://amp.cache/", []string{"front"}, transport) + So(err, ShouldBeNil) + So(rend.brokerURL, ShouldNotBeNil) + So(rend.brokerURL.String(), ShouldResemble, "http://test.broker") + So(rend.cacheURL, ShouldNotBeNil) + So(rend.cacheURL.String(), ShouldResemble, "https://amp.cache/") + So(rend.fronts, ShouldContain, "front") + So(rend.transport, ShouldEqual, transport) + }) + + Convey("ampCacheRendezvous.Exchange responds with answer", func() { + fakeEncPollResp := makeEncPollResp( + `{"answer": "{\"type\":\"answer\",\"sdp\":\"fake\"}" }`, + "", + ) + rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{}, + &mockTransport{http.StatusOK, ampArmorEncode(fakeEncPollResp)}) + So(err, ShouldBeNil) + answer, err := rend.Exchange(fakeEncPollReq) + So(err, ShouldBeNil) + So(answer, ShouldResemble, fakeEncPollResp) + }) + + Convey("ampCacheRendezvous.Exchange responds with no answer", func() { + fakeEncPollResp := makeEncPollResp( + "", + `{"error": "no snowflake proxies currently available"}`, + ) + rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{}, + &mockTransport{http.StatusOK, ampArmorEncode(fakeEncPollResp)}) + So(err, ShouldBeNil) + answer, err := rend.Exchange(fakeEncPollReq) + So(err, ShouldBeNil) + So(answer, ShouldResemble, fakeEncPollResp) + }) + + Convey("ampCacheRendezvous.Exchange fails with unexpected HTTP status code", func() { + rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{}, + &mockTransport{http.StatusInternalServerError, []byte{}}) + So(err, ShouldBeNil) + answer, err := rend.Exchange(fakeEncPollReq) + So(err, ShouldNotBeNil) + So(answer, ShouldBeNil) + So(err.Error(), ShouldResemble, brokerErrorUnexpected) + }) + + Convey("ampCacheRendezvous.Exchange fails with error", func() { + transportErr := errors.New("error") + rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{}, + &errorTransport{err: transportErr}) + So(err, ShouldBeNil) + answer, err := rend.Exchange(fakeEncPollReq) + So(err, ShouldEqual, transportErr) + So(answer, ShouldBeNil) + }) + + Convey("ampCacheRendezvous.Exchange fails with large read", func() { + // readLimit should apply to the raw HTTP body, not the + // encoded bytes. Encode readLimit bytes—the encoded + // size will be larger—and try to read the body. It + // should fail. + rend, err := newAMPCacheRendezvous("http://test.broker", "", []string{}, + &mockTransport{http.StatusOK, ampArmorEncode(make([]byte, readLimit))}) + So(err, ShouldBeNil) + _, err = rend.Exchange(fakeEncPollReq) + // We may get io.ErrUnexpectedEOF here, or something + // like "missing tag". + So(err, ShouldNotBeNil) + }) + }) +} + +func TestSQSRendezvous(t *testing.T) { + Convey("SQS Rendezvous", t, func() { + var sendMessageInput *sqs.SendMessageInput + var getQueueUrlInput *sqs.GetQueueUrlInput + + Convey("Construct SQS queue rendezvous", func() { + transport := &mockTransport{http.StatusOK, []byte{}} + rend, err := newSQSRendezvous("https://sqs.us-east-1.amazonaws.com", "eyJhd3MtYWNjZXNzLWtleS1pZCI6InRlc3QtYWNjZXNzLWtleSIsImF3cy1zZWNyZXQta2V5IjoidGVzdC1zZWNyZXQta2V5In0=", transport) + + So(err, ShouldBeNil) + So(rend.sqsClient, ShouldNotBeNil) + So(rend.sqsURL, ShouldNotBeNil) + So(rend.sqsURL.String(), ShouldResemble, "https://sqs.us-east-1.amazonaws.com") + }) + + ctrl := gomock.NewController(t) + mockSqsClient := sqsclient.NewMockSQSClient(ctrl) + responseQueueURL := "https://sqs.us-east-1.amazonaws.com/testing" + sqsUrl, _ := url.Parse("https://sqs.us-east-1.amazonaws.com/broker") + fakeEncPollResp := makeEncPollResp( + `{"answer": "{\"type\":\"answer\",\"sdp\":\"fake\"}" }`, + "", + ) + sqsRendezvous := sqsRendezvous{ + transport: &mockTransport{http.StatusOK, []byte{}}, + sqsClient: mockSqsClient, + sqsURL: sqsUrl, + timeout: 0, + numRetries: 5, + } + + Convey("sqsRendezvous.Exchange responds with answer", func() { + sqsClientId := "" + mockSqsClient.EXPECT().SendMessage(gomock.Any(), gomock.AssignableToTypeOf(sendMessageInput)).Do(func(ctx interface{}, input *sqs.SendMessageInput, optFns ...interface{}) { + So(*input.MessageBody, ShouldEqual, string(fakeEncPollResp)) + So(*input.QueueUrl, ShouldEqual, sqsUrl.String()) + sqsClientId = *input.MessageAttributes["ClientID"].StringValue + }) + mockSqsClient.EXPECT().GetQueueUrl(gomock.Any(), gomock.AssignableToTypeOf(getQueueUrlInput)).DoAndReturn(func(ctx interface{}, input *sqs.GetQueueUrlInput, optFns ...interface{}) (*sqs.GetQueueUrlOutput, error) { + So(*input.QueueName, ShouldEqual, "snowflake-client-"+sqsClientId) + return &sqs.GetQueueUrlOutput{ + QueueUrl: aws.String(responseQueueURL), + }, nil + }) + mockSqsClient.EXPECT().ReceiveMessage(gomock.Any(), gomock.Eq(&sqs.ReceiveMessageInput{ + QueueUrl: &responseQueueURL, + MaxNumberOfMessages: 1, + WaitTimeSeconds: 20, + })).Return(&sqs.ReceiveMessageOutput{ + Messages: []types.Message{{Body: aws.String("answer")}}, + }, nil) + + answer, err := sqsRendezvous.Exchange(fakeEncPollResp) + + So(answer, ShouldEqual, []byte("answer")) + So(err, ShouldBeNil) + }) + + Convey("sqsRendezvous.Exchange cannot get queue url", func() { + sqsClientId := "" + mockSqsClient.EXPECT().SendMessage(gomock.Any(), gomock.AssignableToTypeOf(sendMessageInput)).Do(func(ctx interface{}, input *sqs.SendMessageInput, optFns ...interface{}) { + So(*input.MessageBody, ShouldEqual, string(fakeEncPollResp)) + So(*input.QueueUrl, ShouldEqual, sqsUrl.String()) + sqsClientId = *input.MessageAttributes["ClientID"].StringValue + }) + for i := 0; i < sqsRendezvous.numRetries; i++ { + mockSqsClient.EXPECT().GetQueueUrl(gomock.Any(), gomock.AssignableToTypeOf(getQueueUrlInput)).DoAndReturn(func(ctx interface{}, input *sqs.GetQueueUrlInput, optFns ...interface{}) (*sqs.GetQueueUrlOutput, error) { + So(*input.QueueName, ShouldEqual, "snowflake-client-"+sqsClientId) + return nil, errors.New("test error") + }) + } + + answer, err := sqsRendezvous.Exchange(fakeEncPollResp) + + So(answer, ShouldBeNil) + So(err, ShouldNotBeNil) + So(err, ShouldEqual, errors.New("test error")) + }) + + Convey("sqsRendezvous.Exchange does not receive answer", func() { + sqsClientId := "" + mockSqsClient.EXPECT().SendMessage(gomock.Any(), gomock.AssignableToTypeOf(sendMessageInput)).Do(func(ctx interface{}, input *sqs.SendMessageInput, optFns ...interface{}) { + So(*input.MessageBody, ShouldEqual, string(fakeEncPollResp)) + So(*input.QueueUrl, ShouldEqual, sqsUrl.String()) + sqsClientId = *input.MessageAttributes["ClientID"].StringValue + }) + mockSqsClient.EXPECT().GetQueueUrl(gomock.Any(), gomock.AssignableToTypeOf(getQueueUrlInput)).DoAndReturn(func(ctx interface{}, input *sqs.GetQueueUrlInput, optFns ...interface{}) (*sqs.GetQueueUrlOutput, error) { + So(*input.QueueName, ShouldEqual, "snowflake-client-"+sqsClientId) + return &sqs.GetQueueUrlOutput{ + QueueUrl: aws.String(responseQueueURL), + }, nil + }) + for i := 0; i < sqsRendezvous.numRetries; i++ { + mockSqsClient.EXPECT().ReceiveMessage(gomock.Any(), gomock.Eq(&sqs.ReceiveMessageInput{ + QueueUrl: &responseQueueURL, + MaxNumberOfMessages: 1, + WaitTimeSeconds: 20, + })).Return(&sqs.ReceiveMessageOutput{ + Messages: []types.Message{}, + }, nil) + } + + answer, err := sqsRendezvous.Exchange(fakeEncPollResp) + + So(answer, ShouldEqual, []byte{}) + So(err, ShouldBeNil) + }) + }) +} + +func TestBrokerChannel(t *testing.T) { + Convey("Requests a proxy and handles response", t, func() { + answerSdp := &webrtc.SessionDescription{ + Type: webrtc.SDPTypeAnswer, + SDP: "test", + } + answerSdpStr, _ := util.SerializeSessionDescription(answerSdp) + serverResponse, _ := (&messages.ClientPollResponse{ + Answer: answerSdpStr, + }).EncodePollResponse() + + offerSdp := &webrtc.SessionDescription{ + Type: webrtc.SDPTypeOffer, + SDP: "test", + } + + requestBodyChan := make(chan []byte) + mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + body, _ := io.ReadAll(r.Body) + go func() { + requestBodyChan <- body + }() + w.Write(serverResponse) + })) + defer mockServer.Close() + + brokerChannel, err := newBrokerChannelFromConfig(ClientConfig{ + BrokerURL: mockServer.URL, + BridgeFingerprint: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + }) + So(err, ShouldBeNil) + brokerChannel.SetNATType(nat.NATRestricted) + + answerSdpReturned, err := brokerChannel.Negotiate( + offerSdp, + brokerChannel.GetNATType(), + ) + So(err, ShouldBeNil) + So(answerSdpReturned, ShouldEqual, answerSdp) + + body := <-requestBodyChan + pollReq, err := messages.DecodeClientPollRequest(body) + So(err, ShouldBeNil) + So(pollReq.Fingerprint, ShouldEqual, "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") + So(pollReq.NAT, ShouldEqual, nat.NATRestricted) + requestSdp, err := util.DeserializeSessionDescription(pollReq.Offer) + So(err, ShouldBeNil) + So(requestSdp, ShouldEqual, offerSdp) + }) +} diff --git a/client/lib/snowflake.go b/client/lib/snowflake.go index 900af88..f1a3bad 100644 --- a/client/lib/snowflake.go +++ b/client/lib/snowflake.go @@ -1,69 +1,411 @@ -package lib +/* +Package snowflake_client implements functionality necessary for a client to establish a connection +to a server using Snowflake. + +Included in the package is a Transport type that implements the Pluggable Transports v2.1 Go API +specification. To use Snowflake, you must first create a client from a configuration: + + config := snowflake_client.ClientConfig{ + BrokerURL: "https://snowflake-broker.example.com", + FrontDomain: "https://friendlyfrontdomain.net", + // ... + } + transport, err := snowflake_client.NewSnowflakeClient(config) + if err != nil { + // handle error + } + +The Dial function connects to a Snowflake server: + + conn, err := transport.Dial() + if err != nil { + // handle error + } + defer conn.Close() +*/ +package snowflake_client import ( + "context" "errors" - "io" "log" + "math/rand" "net" - "sync" + "net/url" + "strings" + "time" + + "github.com/pion/ice/v4" + "github.com/pion/webrtc/v4" + "github.com/xtaci/kcp-go/v5" + "github.com/xtaci/smux" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/nat" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/turbotunnel" ) const ( - ReconnectTimeout = 10 - SnowflakeTimeout = 30 + // ReconnectTimeout is the time a Snowflake client will wait before collecting + // more snowflakes. + ReconnectTimeout = 10 * time.Second + // SnowflakeTimeout is the time a Snowflake client will wait before determining that + // a remote snowflake has been disconnected. If no new messages are sent or received + // in this time period, the client will terminate the connection with the remote + // peer and collect a new snowflake. + SnowflakeTimeout = 20 * time.Second + // DataChannelTimeout is how long the client will wait for the OnOpen callback + // on a newly created DataChannel. + DataChannelTimeout = 10 * time.Second + + // WindowSize is the number of packets in the send and receive window of a KCP connection. + WindowSize = 65535 + // StreamSize controls the maximum amount of in flight data between a client and server. + StreamSize = 1048576 // 1MB ) -// When a connection handler starts, +1 is written to this channel; when it -// ends, -1 is written. -var HandlerChan = make(chan int) +type dummyAddr struct{} -// Given an accepted SOCKS connection, establish a WebRTC connection to the -// remote peer and exchange traffic. -func Handler(socks SocksConnector, snowflakes SnowflakeCollector) error { - HandlerChan <- 1 - defer func() { - HandlerChan <- -1 - }() - // Obtain an available WebRTC remote. May block. - snowflake := snowflakes.Pop() - if nil == snowflake { - socks.Reject() - return errors.New("handler: Received invalid Snowflake") +func (addr dummyAddr) Network() string { return "dummy" } +func (addr dummyAddr) String() string { return "dummy" } + +// Transport is a structure with methods that conform to the Go PT v2.1 API +// https://github.com/Pluggable-Transports/Pluggable-Transports-spec/blob/master/releases/PTSpecV2.1/Pluggable%20Transport%20Specification%20v2.1%20-%20Go%20Transport%20API.pdf +type Transport struct { + dialer *WebRTCDialer + + // EventDispatcher is the event bus for snowflake events. + // When an important event happens, it will be distributed here. + eventDispatcher event.SnowflakeEventDispatcher +} + +// ClientConfig defines how the SnowflakeClient will connect to the broker and Snowflake proxies. +type ClientConfig struct { + // BrokerURL is the full URL of the Snowflake broker that the client will connect to. + BrokerURL string + // AmpCacheURL is the full URL of a valid AMP cache. A nonzero value indicates + // that AMP cache will be used as the rendezvous method with the broker. + AmpCacheURL string + // SQSQueueURL is the full URL of an AWS SQS Queue. A nonzero value indicates + // that SQS queue will be used as the rendezvous method with the broker. + SQSQueueURL string + // Base64 encoded string of the credentials containing access Key ID and secret key used to access the AWS SQS Qeueue + SQSCredsStr string + // FrontDomain is the full URL of an optional front domain that can be used with either + // the AMP cache or HTTP domain fronting rendezvous method. + FrontDomain string + // ICEAddresses are a slice of ICE server URLs that will be used for NAT traversal and + // the creation of the client's WebRTC SDP offer. + FrontDomains []string + // ICEAddresses are a slice of ICE server URLs that will be used for NAT traversal and + // the creation of the client's WebRTC SDP offer. + ICEAddresses []string + // KeepLocalAddresses is an optional setting that will prevent the removal of local or + // invalid addresses from the client's SDP offer. This is useful for local deployments + // and testing. + KeepLocalAddresses bool + // Max is the maximum number of snowflake proxy peers that the client should attempt to + // connect to. Defaults to 1. + Max int + // UTLSClientID is the type of user application that snowflake should imitate. + // If an empty value is provided, it will use Go's default TLS implementation + UTLSClientID string + // UTLSRemoveSNI is the flag to control whether SNI should be removed from Client Hello + // when uTLS is used. + UTLSRemoveSNI bool + // BridgeFingerprint is the fingerprint of the bridge that the client will eventually + // connect to, as specified in the Bridge line of the torrc. + BridgeFingerprint string + // CommunicationProxy is the proxy address for network communication + CommunicationProxy *url.URL +} + +// NewSnowflakeClient creates a new Snowflake transport client that can spawn multiple +// Snowflake connections. +// +// brokerURL and frontDomain are the urls for the broker host and domain fronting host +// iceAddresses are the STUN/TURN urls needed for WebRTC negotiation +// keepLocalAddresses is a flag to enable sending local network addresses (for testing purposes) +// max is the maximum number of snowflakes the client should gather for each SOCKS connection +func NewSnowflakeClient(config ClientConfig) (*Transport, error) { + log.Println("\n\n\n --- Starting Snowflake Client ---") + + iceServers := parseIceServers(config.ICEAddresses) + // chooses a random subset of servers from inputs + rand.Seed(time.Now().UnixNano()) + rand.Shuffle(len(iceServers), func(i, j int) { + iceServers[i], iceServers[j] = iceServers[j], iceServers[i] + }) + if len(iceServers) > 2 { + iceServers = iceServers[:(len(iceServers)+1)/2] } - defer socks.Close() - defer snowflake.Close() - log.Println("---- Handler: snowflake assigned ----") - err := socks.Grant(&net.TCPAddr{IP: net.IPv4zero, Port: 0}) + log.Printf("Using ICE servers:") + for _, server := range iceServers { + log.Printf("url: %v", strings.Join(server.URLs, " ")) + } + + // Maintain backwards compatibility with old FrontDomain field of ClientConfig + if (len(config.FrontDomains) == 0) && (config.FrontDomain != "") { + config.FrontDomains = []string{config.FrontDomain} + } + + // Rendezvous with broker using the given parameters. + broker, err := newBrokerChannelFromConfig(config) if err != nil { - return err + return nil, err + } + go updateNATType(iceServers, broker, config.CommunicationProxy) + + natPolicy := &NATPolicy{} + + max := 1 + if config.Max > max { + max = config.Max + } + eventsLogger := event.NewSnowflakeEventDispatcher() + transport := &Transport{dialer: NewWebRTCDialerWithNatPolicyAndEventsAndProxy(broker, natPolicy, iceServers, max, eventsLogger, config.CommunicationProxy), eventDispatcher: eventsLogger} + + return transport, nil +} + +// Dial creates a new Snowflake connection. +// Dial starts the collection of snowflakes and returns a SnowflakeConn that is a +// wrapper around a smux.Stream that will reliably deliver data to a Snowflake +// server through one or more snowflake proxies. +func (t *Transport) Dial() (net.Conn, error) { + // Cleanup functions to run before returning, in case of an error. + var cleanup []func() + defer func() { + // Run cleanup in reverse order, as defer does. + for i := len(cleanup) - 1; i >= 0; i-- { + cleanup[i]() + } + }() + + // Prepare to collect remote WebRTC peers. + snowflakes, err := NewPeers(t.dialer) + if err != nil { + return nil, err + } + cleanup = append(cleanup, func() { snowflakes.End() }) + + // Use a real logger to periodically output how much traffic is happening. + snowflakes.bytesLogger = newBytesSyncLogger() + + log.Printf("---- SnowflakeConn: begin collecting snowflakes ---") + go connectLoop(snowflakes) + + // Create a new smux session + log.Printf("---- SnowflakeConn: starting a new session ---") + pconn, sess, err := newSession(snowflakes) + if err != nil { + return nil, err + } + cleanup = append(cleanup, func() { + pconn.Close() + sess.Close() + }) + + // On the smux session we overlay a stream. + stream, err := sess.OpenStream() + if err != nil { + return nil, err + } + // Begin exchanging data. + log.Printf("---- SnowflakeConn: begin stream %v ---", stream.ID()) + cleanup = append(cleanup, func() { stream.Close() }) + + // All good, clear the cleanup list. + cleanup = nil + return &SnowflakeConn{Stream: stream, sess: sess, pconn: pconn, snowflakes: snowflakes}, nil +} + +func (t *Transport) AddSnowflakeEventListener(receiver event.SnowflakeEventReceiver) { + t.eventDispatcher.AddSnowflakeEventListener(receiver) +} + +func (t *Transport) RemoveSnowflakeEventListener(receiver event.SnowflakeEventReceiver) { + t.eventDispatcher.RemoveSnowflakeEventListener(receiver) +} + +// SetRendezvousMethod sets the rendezvous method to the Snowflake broker. +func (t *Transport) SetRendezvousMethod(r RendezvousMethod) { + t.dialer.Rendezvous = r +} + +// SnowflakeConn is a reliable connection to a snowflake server that implements net.Conn. +type SnowflakeConn struct { + *smux.Stream + sess *smux.Session + pconn net.PacketConn + snowflakes *Peers +} + +// Close closes the connection. +// +// The collection of snowflake proxies for this connection is stopped. +func (conn *SnowflakeConn) Close() error { + var err error + log.Printf("---- SnowflakeConn: closed stream %v ---", conn.ID()) + err = conn.Stream.Close() + log.Printf("---- SnowflakeConn: end collecting snowflakes ---") + conn.snowflakes.End() + if inerr := conn.pconn.Close(); err == nil { + err = inerr + } + log.Printf("---- SnowflakeConn: discarding finished session ---") + if inerr := conn.sess.Close(); err == nil { + err = inerr + } + return err +} + +// loop through all provided STUN servers until we exhaust the list or find +// one that is compatible with RFC 5780 +func updateNATType(servers []webrtc.ICEServer, broker *BrokerChannel, proxy *url.URL) { + var restrictedNAT bool + var err error + for _, server := range servers { + addr := strings.TrimPrefix(server.URLs[0], "stun:") + restrictedNAT, err = nat.CheckIfRestrictedNATWithProxy(addr, proxy) + + if err != nil { + log.Printf("Warning: NAT checking failed for server at %s: %s", addr, err) + } else { + if restrictedNAT { + broker.SetNATType(nat.NATRestricted) + } else { + broker.SetNATType(nat.NATUnrestricted) + } + break + } + } + if err != nil { + broker.SetNATType(nat.NATUnknown) + } +} + +// Returns a slice of webrtc.ICEServer given a slice of addresses +func parseIceServers(addresses []string) []webrtc.ICEServer { + var servers []webrtc.ICEServer + if len(addresses) == 0 { + return nil + } + for _, address := range addresses { + address = strings.TrimSpace(address) + + // ice.ParseURL recognizes many types of ICE servers, + // but we only support stun over UDP currently + u, err := url.Parse(address) + if err != nil { + log.Printf("Warning: Parsing ICE server %v resulted in error: %v, skipping", address, err) + continue + } + if u.Scheme != "stun" { + log.Printf("Warning: Only stun: (STUN over UDP) servers are supported currently, skipping %v", address) + continue + } + + // add default port, other sanity checks + parsedURL, err := ice.ParseURL(address) + if err != nil { + log.Printf("Warning: Parsing ICE server %v resulted in error: %v, skipping", address, err) + continue + } + + servers = append(servers, webrtc.ICEServer{ + URLs: []string{parsedURL.String()}, + }) + } + return servers +} + +// newSession returns a new smux.Session and the net.PacketConn it is running +// over. The net.PacketConn successively connects through Snowflake proxies +// pulled from snowflakes. +func newSession(snowflakes SnowflakeCollector) (net.PacketConn, *smux.Session, error) { + clientID := turbotunnel.NewClientID() + + // We build a persistent KCP session on a sequence of ephemeral WebRTC + // connections. This dialContext tells RedialPacketConn how to get a new + // WebRTC connection when the previous one dies. Inside each WebRTC + // connection, we use encapsulationPacketConn to encode packets into a + // stream. + dialContext := func(ctx context.Context) (net.PacketConn, error) { + log.Printf("redialing on same connection") + // Obtain an available WebRTC remote. May block. + conn := snowflakes.Pop() + if conn == nil { + return nil, errors.New("handler: Received invalid Snowflake") + } + log.Println("---- Handler: snowflake assigned ----") + // Send the magic Turbo Tunnel token. + _, err := conn.Write(turbotunnel.Token[:]) + if err != nil { + return nil, err + } + // Send ClientID prefix. + _, err = conn.Write(clientID[:]) + if err != nil { + return nil, err + } + return newEncapsulationPacketConn(dummyAddr{}, dummyAddr{}, conn), nil + } + pconn := turbotunnel.NewRedialPacketConn(dummyAddr{}, dummyAddr{}, dialContext) + + // conn is built on the underlying RedialPacketConn—when one WebRTC + // connection dies, another one will be found to take its place. The + // sequence of packets across multiple WebRTC connections drives the KCP + // engine. + conn, err := kcp.NewConn2(dummyAddr{}, nil, 0, 0, pconn) + if err != nil { + pconn.Close() + return nil, nil, err + } + // Permit coalescing the payloads of consecutive sends. + conn.SetStreamMode(true) + // Set the maximum send and receive window sizes to a high number + // Removes KCP bottlenecks: https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40026 + conn.SetWindowSize(WindowSize, WindowSize) + // Disable the dynamic congestion window (limit only by the + // maximum of local and remote static windows). + conn.SetNoDelay( + 0, // default nodelay + 0, // default interval + 0, // default resend + 1, // nc=1 => congestion window off + ) + // On the KCP connection we overlay an smux session and stream. + smuxConfig := smux.DefaultConfig() + smuxConfig.Version = 2 + smuxConfig.KeepAliveTimeout = 10 * time.Minute + smuxConfig.MaxStreamBuffer = StreamSize + + sess, err := smux.Client(conn, smuxConfig) + if err != nil { + conn.Close() + pconn.Close() + return nil, nil, err } - go func() { - // When WebRTC resets, close the SOCKS connection too. - snowflake.WaitForReset() - socks.Close() - }() - - // Begin exchanging data. Either WebRTC or localhost SOCKS will close first. - // In eithercase, this closes the handler and induces a new handler. - copyLoop(socks, snowflake) - log.Println("---- Handler: closed ---") - return nil + return pconn, sess, err } -// Exchanges bytes between two ReadWriters. -// (In this case, between a SOCKS and WebRTC connection.) -func copyLoop(a, b io.ReadWriter) { - var wg sync.WaitGroup - wg.Add(2) - go func() { - io.Copy(b, a) - wg.Done() - }() - go func() { - io.Copy(a, b) - wg.Done() - }() - wg.Wait() - log.Println("copy loop ended") +// Maintain |SnowflakeCapacity| number of available WebRTC connections, to +// transfer to the Tor SOCKS handler when needed. +func connectLoop(snowflakes SnowflakeCollector) { + for { + timer := time.After(ReconnectTimeout) + _, err := snowflakes.Collect() + if err != nil { + log.Printf("WebRTC: %v Retrying...", err) + } + select { + case <-timer: + continue + case <-snowflakes.Melted(): + log.Println("ConnectLoop: stopped.") + return + } + } } diff --git a/client/lib/turbotunnel.go b/client/lib/turbotunnel.go new file mode 100644 index 0000000..45646e6 --- /dev/null +++ b/client/lib/turbotunnel.go @@ -0,0 +1,69 @@ +package snowflake_client + +import ( + "bufio" + "errors" + "io" + "net" + "time" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/encapsulation" +) + +var errNotImplemented = errors.New("not implemented") + +// encapsulationPacketConn implements the net.PacketConn interface over an +// io.ReadWriteCloser stream, using the encapsulation package to represent +// packets in a stream. +type encapsulationPacketConn struct { + io.ReadWriteCloser + localAddr net.Addr + remoteAddr net.Addr + bw *bufio.Writer +} + +// newEncapsulationPacketConn makes an encapsulationPacketConn out of a given +// io.ReadWriteCloser and provided local and remote addresses. +func newEncapsulationPacketConn( + localAddr, remoteAddr net.Addr, + conn io.ReadWriteCloser, +) *encapsulationPacketConn { + return &encapsulationPacketConn{ + ReadWriteCloser: conn, + localAddr: localAddr, + remoteAddr: remoteAddr, + bw: bufio.NewWriter(conn), + } +} + +// ReadFrom reads an encapsulated packet from the stream. +func (c *encapsulationPacketConn) ReadFrom(p []byte) (int, net.Addr, error) { + n, err := encapsulation.ReadData(c.ReadWriteCloser, p) + if err == io.ErrShortBuffer { + err = nil + } + return n, c.remoteAddr, err +} + +// WriteTo writes an encapsulated packet to the stream. +func (c *encapsulationPacketConn) WriteTo(p []byte, addr net.Addr) (int, error) { + // addr is ignored. + _, err := encapsulation.WriteData(c.bw, p) + if err == nil { + err = c.bw.Flush() + } + if err != nil { + return 0, err + } + return len(p), nil +} + +// LocalAddr returns the localAddr value that was passed to +// NewEncapsulationPacketConn. +func (c *encapsulationPacketConn) LocalAddr() net.Addr { + return c.localAddr +} + +func (c *encapsulationPacketConn) SetDeadline(t time.Time) error { return errNotImplemented } +func (c *encapsulationPacketConn) SetReadDeadline(t time.Time) error { return errNotImplemented } +func (c *encapsulationPacketConn) SetWriteDeadline(t time.Time) error { return errNotImplemented } diff --git a/client/lib/util.go b/client/lib/util.go index 028fb1c..536fa17 100644 --- a/client/lib/util.go +++ b/client/lib/util.go @@ -1,95 +1,71 @@ -package lib +package snowflake_client import ( - "fmt" "log" "time" - - "github.com/keroserene/go-webrtc" ) const ( - LogTimeInterval = 5 + LogTimeInterval = 5 * time.Second ) -type IceServerList []webrtc.ConfigurationOption - -func (i *IceServerList) String() string { - return fmt.Sprint(*i) +type bytesLogger interface { + addOutbound(int64) + addInbound(int64) } -type BytesLogger interface { - Log() - AddOutbound(int) - AddInbound(int) -} +// Default bytesLogger does nothing. +type bytesNullLogger struct{} -// Default BytesLogger does nothing. -type BytesNullLogger struct{} +func (b bytesNullLogger) addOutbound(amount int64) {} +func (b bytesNullLogger) addInbound(amount int64) {} -func (b BytesNullLogger) Log() {} -func (b BytesNullLogger) AddOutbound(amount int) {} -func (b BytesNullLogger) AddInbound(amount int) {} - -// BytesSyncLogger uses channels to safely log from multiple sources with output +// bytesSyncLogger uses channels to safely log from multiple sources with output // occuring at reasonable intervals. -type BytesSyncLogger struct { - OutboundChan chan int - InboundChan chan int - Outbound int - Inbound int - OutEvents int - InEvents int - IsLogging bool +type bytesSyncLogger struct { + outboundChan chan int64 + inboundChan chan int64 } -func (b *BytesSyncLogger) Log() { - b.IsLogging = true - var amount int - output := func() { - log.Printf("Traffic Bytes (in|out): %d | %d -- (%d OnMessages, %d Sends)", - b.Inbound, b.Outbound, b.InEvents, b.OutEvents) - b.Outbound = 0 - b.OutEvents = 0 - b.Inbound = 0 - b.InEvents = 0 +// newBytesSyncLogger returns a new bytesSyncLogger and starts it loggin. +func newBytesSyncLogger() *bytesSyncLogger { + b := &bytesSyncLogger{ + outboundChan: make(chan int64, 5), + inboundChan: make(chan int64, 5), } - last := time.Now() + go b.log() + return b +} + +func (b *bytesSyncLogger) log() { + var outbound, inbound int64 + var outEvents, inEvents int + ticker := time.NewTicker(LogTimeInterval) for { select { - case amount = <-b.OutboundChan: - b.Outbound += amount - b.OutEvents++ - last := time.Now() - if time.Since(last) > time.Second*LogTimeInterval { - last = time.Now() - output() - } - case amount = <-b.InboundChan: - b.Inbound += amount - b.InEvents++ - if time.Since(last) > time.Second*LogTimeInterval { - last = time.Now() - output() - } - case <-time.After(time.Second * LogTimeInterval): - if b.InEvents > 0 || b.OutEvents > 0 { - output() + case <-ticker.C: + if outEvents > 0 || inEvents > 0 { + log.Printf("Traffic Bytes (in|out): %d | %d -- (%d OnMessages, %d Sends)", + inbound, outbound, inEvents, outEvents) } + outbound = 0 + outEvents = 0 + inbound = 0 + inEvents = 0 + case amount := <-b.outboundChan: + outbound += amount + outEvents++ + case amount := <-b.inboundChan: + inbound += amount + inEvents++ } } } -func (b *BytesSyncLogger) AddOutbound(amount int) { - if !b.IsLogging { - return - } - b.OutboundChan <- amount +func (b *bytesSyncLogger) addOutbound(amount int64) { + b.outboundChan <- amount } -func (b *BytesSyncLogger) AddInbound(amount int) { - if !b.IsLogging { - return - } - b.InboundChan <- amount +func (b *bytesSyncLogger) addInbound(amount int64) { + b.inboundChan <- amount } diff --git a/client/lib/webrtc.go b/client/lib/webrtc.go index 6406da5..397ba85 100644 --- a/client/lib/webrtc.go +++ b/client/lib/webrtc.go @@ -1,67 +1,118 @@ -package lib +package snowflake_client import ( - "bytes" + "crypto/rand" + "encoding/hex" "errors" "io" "log" + "net" + "net/url" "sync" "time" - "github.com/dchest/uniuri" - "github.com/keroserene/go-webrtc" + "github.com/pion/ice/v4" + "github.com/pion/transport/v3" + "github.com/pion/transport/v3/stdnet" + "github.com/pion/webrtc/v4" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/proxy" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util" ) -// Remote WebRTC peer. -// Implements the |Snowflake| interface, which includes -// |io.ReadWriter|, |Resetter|, and |Connector|. +// WebRTCPeer represents a WebRTC connection to a remote snowflake proxy. // -// Handles preparation of go-webrtc PeerConnection. Only ever has -// one DataChannel. +// Each WebRTCPeer only ever has one DataChannel that is used as the peer's transport. type WebRTCPeer struct { id string - config *webrtc.Configuration pc *webrtc.PeerConnection - transport SnowflakeDataChannel // Holds the WebRTC DataChannel. - broker *BrokerChannel + transport *webrtc.DataChannel - offerChannel chan *webrtc.SessionDescription - answerChannel chan *webrtc.SessionDescription - errorChannel chan error - recvPipe *io.PipeReader - writePipe *io.PipeWriter - lastReceive time.Time - buffer bytes.Buffer - reset chan struct{} + recvPipe *io.PipeReader + writePipe *io.PipeWriter - closed bool + mu sync.Mutex // protects the following: + lastReceive time.Time - lock sync.Mutex // Synchronization for DataChannel destruction - once sync.Once // Synchronization for PeerConnection destruction + open chan struct{} // Channel to notify when datachannel opens + closed chan struct{} - BytesLogger + once sync.Once // Synchronization for PeerConnection destruction + + bytesLogger bytesLogger + eventsLogger event.SnowflakeEventReceiver + proxy *url.URL } -// Construct a WebRTC PeerConnection. -func NewWebRTCPeer(config *webrtc.Configuration, - broker *BrokerChannel) *WebRTCPeer { +// Deprecated: Use NewWebRTCPeerWithNatPolicyAndEventsAndProxy Instead. +func NewWebRTCPeer( + config *webrtc.Configuration, broker *BrokerChannel, +) (*WebRTCPeer, error) { + return NewWebRTCPeerWithNatPolicyAndEventsAndProxy( + config, broker, nil, nil, nil, + ) +} + +// Deprecated: Use NewWebRTCPeerWithNatPolicyAndEventsAndProxy Instead. +func NewWebRTCPeerWithEvents( + config *webrtc.Configuration, broker *BrokerChannel, + eventsLogger event.SnowflakeEventReceiver, +) (*WebRTCPeer, error) { + return NewWebRTCPeerWithNatPolicyAndEventsAndProxy( + config, broker, nil, eventsLogger, nil, + ) +} + +// Deprecated: Use NewWebRTCPeerWithNatPolicyAndEventsAndProxy Instead. +func NewWebRTCPeerWithEventsAndProxy( + config *webrtc.Configuration, broker *BrokerChannel, + eventsLogger event.SnowflakeEventReceiver, proxy *url.URL, +) (*WebRTCPeer, error) { + return NewWebRTCPeerWithNatPolicyAndEventsAndProxy( + config, broker, nil, eventsLogger, proxy, + ) +} + +// NewWebRTCPeerWithNatPolicyAndEventsAndProxy constructs +// a WebRTC PeerConnection to a snowflake proxy. +// +// The creation of the peer handles the signaling to the Snowflake broker, including +// the exchange of SDP information, the creation of a PeerConnection, and the establishment +// of a DataChannel to the Snowflake proxy. +func NewWebRTCPeerWithNatPolicyAndEventsAndProxy( + config *webrtc.Configuration, broker *BrokerChannel, natPolicy *NATPolicy, + eventsLogger event.SnowflakeEventReceiver, proxy *url.URL, +) (*WebRTCPeer, error) { + if eventsLogger == nil { + eventsLogger = event.NewSnowflakeEventDispatcher() + } + connection := new(WebRTCPeer) - connection.id = "snowflake-" + uniuri.New() - connection.config = config - connection.broker = broker - connection.offerChannel = make(chan *webrtc.SessionDescription, 1) - connection.answerChannel = make(chan *webrtc.SessionDescription, 1) - // Error channel is mostly for reporting during the initial SDP offer - // creation & local description setting, which happens asynchronously. - connection.errorChannel = make(chan error, 1) - connection.reset = make(chan struct{}, 1) + { + var buf [8]byte + if _, err := rand.Read(buf[:]); err != nil { + panic(err) + } + connection.id = "snowflake-" + hex.EncodeToString(buf[:]) + } + connection.closed = make(chan struct{}) // Override with something that's not NullLogger to have real logging. - connection.BytesLogger = &BytesNullLogger{} + connection.bytesLogger = &bytesNullLogger{} // Pipes remain the same even when DataChannel gets switched. connection.recvPipe, connection.writePipe = io.Pipe() - return connection + + connection.eventsLogger = eventsLogger + connection.proxy = proxy + + err := connection.connect(config, broker, natPolicy) + if err != nil { + connection.Close() + return nil, err + } + return connection, nil } // Read bytes from local SOCKS. @@ -73,285 +124,262 @@ func (c *WebRTCPeer) Read(b []byte) (int, error) { // Writes bytes out to remote WebRTC. // As part of |io.ReadWriter| func (c *WebRTCPeer) Write(b []byte) (int, error) { - c.lock.Lock() - defer c.lock.Unlock() - c.BytesLogger.AddOutbound(len(b)) - // TODO: Buffering could be improved / separated out of WebRTCPeer. - if nil == c.transport { - log.Printf("Buffered %d bytes --> WebRTC", len(b)) - c.buffer.Write(b) - } else { - c.transport.Send(b) + err := c.transport.Send(b) + if err != nil { + return 0, err } + c.bytesLogger.addOutbound(int64(len(b))) return len(b), nil } -// As part of |Snowflake| +// Closed returns a boolean indicated whether the peer is closed. +func (c *WebRTCPeer) Closed() bool { + select { + case <-c.closed: + return true + default: + } + return false +} + +// Close closes the connection the snowflake proxy. func (c *WebRTCPeer) Close() error { c.once.Do(func() { - c.closed = true + close(c.closed) c.cleanup() - c.Reset() log.Printf("WebRTC: Closing") }) return nil } -// As part of |Resetter| -func (c *WebRTCPeer) Reset() { - if nil == c.reset { - return - } - c.reset <- struct{}{} -} - -// As part of |Resetter| -func (c *WebRTCPeer) WaitForReset() { <-c.reset } - // Prevent long-lived broken remotes. // Should also update the DataChannel in underlying go-webrtc's to make Closes // more immediate / responsive. -func (c *WebRTCPeer) checkForStaleness() { +func (c *WebRTCPeer) checkForStaleness(timeout time.Duration) { + c.mu.Lock() c.lastReceive = time.Now() + c.mu.Unlock() for { - if c.closed { - return - } - if time.Since(c.lastReceive).Seconds() > SnowflakeTimeout { - log.Println("WebRTC: No messages received for", SnowflakeTimeout, - "seconds -- closing stale connection.") + c.mu.Lock() + lastReceive := c.lastReceive + c.mu.Unlock() + if time.Since(lastReceive) > timeout { + log.Printf("WebRTC: No messages received for %v -- closing stale connection.", + timeout) + err := errors.New("no messages received, closing stale connection") + c.eventsLogger.OnNewSnowflakeEvent(event.EventOnSnowflakeConnectionFailed{Error: err}) c.Close() return } - <-time.After(time.Second) - } -} - -// As part of |Connector| interface. -func (c *WebRTCPeer) Connect() error { - log.Println(c.id, " connecting...") - // TODO: When go-webrtc is more stable, it's possible that a new - // PeerConnection won't need to be re-prepared each time. - err := c.preparePeerConnection() - if err != nil { - return err - } - err = c.establishDataChannel() - if err != nil { - return errors.New("WebRTC: Could not establish DataChannel.") - } - err = c.exchangeSDP() - if err != nil { - return err - } - go c.checkForStaleness() - return nil -} - -// Create and prepare callbacks on a new WebRTC PeerConnection. -func (c *WebRTCPeer) preparePeerConnection() error { - if nil != c.pc { - c.pc.Destroy() - c.pc = nil - } - pc, err := webrtc.NewPeerConnection(c.config) - if err != nil { - log.Printf("NewPeerConnection ERROR: %s", err) - return err - } - // Prepare PeerConnection callbacks. - pc.OnNegotiationNeeded = func() { - log.Println("WebRTC: OnNegotiationNeeded") - go func() { - offer, err := pc.CreateOffer() - // TODO: Potentially timeout and retry if ICE isn't working. - if err != nil { - c.errorChannel <- err - return - } - err = pc.SetLocalDescription(offer) - if err != nil { - c.errorChannel <- err - return - } - }() - } - // Allow candidates to accumulate until IceGatheringStateComplete. - pc.OnIceCandidate = func(candidate webrtc.IceCandidate) { - log.Printf(candidate.Candidate) - } - pc.OnIceGatheringStateChange = func(state webrtc.IceGatheringState) { - if state == webrtc.IceGatheringStateComplete { - log.Printf("WebRTC: IceGatheringStateComplete") - c.offerChannel <- pc.LocalDescription() - } - } - // This callback is not expected, as the Client initiates the creation - // of the data channel, not the remote peer. - pc.OnDataChannel = func(channel *webrtc.DataChannel) { - log.Println("OnDataChannel") - panic("Unexpected OnDataChannel!") - } - c.pc = pc - log.Println("WebRTC: PeerConnection created.") - return nil -} - -// Create a WebRTC DataChannel locally. -func (c *WebRTCPeer) establishDataChannel() error { - c.lock.Lock() - defer c.lock.Unlock() - if c.transport != nil { - panic("Unexpected datachannel already exists!") - } - dc, err := c.pc.CreateDataChannel(c.id) - // Triggers "OnNegotiationNeeded" on the PeerConnection, which will prepare - // an SDP offer while other goroutines operating on this struct handle the - // signaling. Eventually fires "OnOpen". - if err != nil { - log.Printf("CreateDataChannel ERROR: %s", err) - return err - } - dc.OnOpen = func() { - c.lock.Lock() - defer c.lock.Unlock() - log.Println("WebRTC: DataChannel.OnOpen") - if nil != c.transport { - panic("WebRTC: transport already exists.") - } - // Flush buffered outgoing SOCKS data if necessary. - if c.buffer.Len() > 0 { - dc.Send(c.buffer.Bytes()) - log.Println("Flushed", c.buffer.Len(), "bytes.") - c.buffer.Reset() - } - // Then enable the datachannel. - c.transport = dc - } - dc.OnClose = func() { - c.lock.Lock() - // Future writes will go to the buffer until a new DataChannel is available. - if nil == c.transport { - // Closed locally, as part of a reset. - log.Println("WebRTC: DataChannel.OnClose [locally]") - c.lock.Unlock() + select { + case <-c.closed: return + case <-time.After(time.Second): } - // Closed remotely, need to reset everything. - // Disable the DataChannel as a write destination. - log.Println("WebRTC: DataChannel.OnClose [remotely]") - c.transport = nil - c.pc.DeleteDataChannel(dc) - // Unlock before Close'ing, since it calls cleanup and asks for the - // lock to check if the transport needs to be be deleted. - c.lock.Unlock() - c.Close() } - dc.OnMessage = func(msg []byte) { - if len(msg) <= 0 { - log.Println("0 length message---") - } - c.BytesLogger.AddInbound(len(msg)) - n, err := c.writePipe.Write(msg) - if err != nil { - // TODO: Maybe shouldn't actually close. - log.Println("Error writing to SOCKS pipe") - c.writePipe.CloseWithError(err) - } - if n != len(msg) { - log.Println("Error: short write") - panic("short write") - } - c.lastReceive = time.Now() - } - log.Println("WebRTC: DataChannel created.") - return nil } -func (c *WebRTCPeer) sendOfferToBroker() { - if nil == c.broker { - return - } - offer := c.pc.LocalDescription() - answer, err := c.broker.Negotiate(offer) - if nil != err || nil == answer { - log.Printf("BrokerChannel Error: %s", err) - answer = nil - } - c.answerChannel <- answer -} +// connect does the bulk of the work: gather ICE candidates, send the SDP offer to broker, +// receive an answer from broker, and wait for data channel to open. +// +// `natPolicy` can be nil, in which case we'll always send our actual +// NAT type to the broker. +func (c *WebRTCPeer) connect( + config *webrtc.Configuration, + broker *BrokerChannel, + natPolicy *NATPolicy, +) error { + log.Println(c.id, " connecting...") -// Block until an SDP offer is available, send it to either -// the Broker or signal pipe, then await for the SDP answer. -func (c *WebRTCPeer) exchangeSDP() error { - select { - case <-c.offerChannel: - case err := <-c.errorChannel: - log.Println("Failed to prepare offer", err) - c.Close() + err := c.preparePeerConnection(config, broker.keepLocalAddresses) + localDescription := c.pc.LocalDescription() + c.eventsLogger.OnNewSnowflakeEvent(event.EventOnOfferCreated{ + WebRTCLocalDescription: localDescription, + Error: err, + }) + if err != nil { return err } - // Keep trying the same offer until a valid answer arrives. - var ok bool - var answer *webrtc.SessionDescription = nil - for nil == answer { - go c.sendOfferToBroker() - answer, ok = <-c.answerChannel // Blocks... - if !ok || nil == answer { - log.Printf("Failed to retrieve answer. Retrying in %d seconds", ReconnectTimeout) - <-time.After(time.Second * ReconnectTimeout) - answer = nil - } + + actualNatType := broker.GetNATType() + var natTypeToSend string + if natPolicy != nil { + natTypeToSend = natPolicy.NATTypeToSend(actualNatType) + } else { + natTypeToSend = actualNatType + } + if natTypeToSend != actualNatType { + log.Printf( + "Our NAT type is \"%v\", but let's tell the broker it's \"%v\".", + actualNatType, + natTypeToSend, + ) + } else { + log.Printf("natTypeToSend: \"%v\" (same as actualNatType)", natTypeToSend) + } + + answer, err := broker.Negotiate(localDescription, natTypeToSend) + c.eventsLogger.OnNewSnowflakeEvent(event.EventOnBrokerRendezvous{ + WebRTCRemoteDescription: answer, + Error: err, + }) + if err != nil { + return err } log.Printf("Received Answer.\n") - err := c.pc.SetRemoteDescription(answer) + err = c.pc.SetRemoteDescription(*answer) if nil != err { log.Println("WebRTC: Unable to SetRemoteDescription:", err) return err } + + // Wait for the datachannel to open or time out + select { + case <-c.open: + if natPolicy != nil { + natPolicy.Success(actualNatType, natTypeToSend) + } + case <-time.After(DataChannelTimeout): + c.transport.Close() + err := errors.New("timeout waiting for DataChannel.OnOpen") + if natPolicy != nil { + natPolicy.Failure(actualNatType, natTypeToSend) + } + c.eventsLogger.OnNewSnowflakeEvent(event.EventOnSnowflakeConnectionFailed{Error: err}) + return err + } + + go c.checkForStaleness(SnowflakeTimeout) return nil } -// Close all channels and transports +// preparePeerConnection creates a new WebRTC PeerConnection and returns it +// after non-trickle ICE candidate gathering is complete. +func (c *WebRTCPeer) preparePeerConnection( + config *webrtc.Configuration, + keepLocalAddresses bool, +) error { + s := webrtc.SettingEngine{} + + if !keepLocalAddresses { + s.SetIPFilter(func(ip net.IP) (keep bool) { + // `IsLoopback()` and `IsUnspecified` are likely not neded here, + // but let's keep them just in case. + // FYI there is similar code in other files in this project. + keep = !util.IsLocal(ip) && !ip.IsLoopback() && !ip.IsUnspecified() + return + }) + s.SetICEMulticastDNSMode(ice.MulticastDNSModeDisabled) + } + s.SetIncludeLoopbackCandidate(keepLocalAddresses) + + // Use the SetNet setting https://pkg.go.dev/github.com/pion/webrtc/v3#SettingEngine.SetNet + // to get snowflake working in shadow (where the AF_NETLINK family is not implemented). + // These two lines of code functionally revert a new change in pion by silently ignoring + // when net.Interfaces() fails, rather than throwing an error + var vnet transport.Net + vnet, _ = stdnet.NewNet() + + if c.proxy != nil { + if err := proxy.CheckProxyProtocolSupport(c.proxy); err != nil { + return err + } + socksClient := proxy.NewSocks5UDPClient(c.proxy) + vnet = proxy.NewTransportWrapper(&socksClient, vnet) + } + + s.SetNet(vnet) + api := webrtc.NewAPI(webrtc.WithSettingEngine(s)) + var err error + c.pc, err = api.NewPeerConnection(*config) + if err != nil { + log.Printf("NewPeerConnection ERROR: %s", err) + return err + } + ordered := true + dataChannelOptions := &webrtc.DataChannelInit{ + Ordered: &ordered, + } + // We must create the data channel before creating an offer + // https://github.com/pion/webrtc/wiki/Release-WebRTC@v3.0.0#a-data-channel-is-no-longer-implicitly-created-with-a-peerconnection + dc, err := c.pc.CreateDataChannel(c.id, dataChannelOptions) + if err != nil { + log.Printf("CreateDataChannel ERROR: %s", err) + return err + } + dc.OnOpen(func() { + c.eventsLogger.OnNewSnowflakeEvent(event.EventOnSnowflakeConnected{}) + log.Println("WebRTC: DataChannel.OnOpen") + close(c.open) + }) + dc.OnClose(func() { + log.Println("WebRTC: DataChannel.OnClose") + c.Close() + }) + dc.OnError(func(err error) { + c.eventsLogger.OnNewSnowflakeEvent(event.EventOnSnowflakeConnectionFailed{Error: err}) + }) + dc.OnMessage(func(msg webrtc.DataChannelMessage) { + if len(msg.Data) <= 0 { + log.Println("0 length message---") + } + n, err := c.writePipe.Write(msg.Data) + c.bytesLogger.addInbound(int64(n)) + if err != nil { + // TODO: Maybe shouldn't actually close. + log.Println("Error writing to SOCKS pipe") + if inerr := c.writePipe.CloseWithError(err); inerr != nil { + log.Printf("c.writePipe.CloseWithError returned error: %v", inerr) + } + } + c.mu.Lock() + c.lastReceive = time.Now() + c.mu.Unlock() + }) + c.transport = dc + c.open = make(chan struct{}) + log.Println("WebRTC: DataChannel created") + + offer, err := c.pc.CreateOffer(nil) + // TODO: Potentially timeout and retry if ICE isn't working. + if err != nil { + log.Println("Failed to prepare offer", err) + c.pc.Close() + return err + } + log.Println("WebRTC: Created offer") + + // Allow candidates to accumulate until ICEGatheringStateComplete. + done := webrtc.GatheringCompletePromise(c.pc) + // Start gathering candidates + err = c.pc.SetLocalDescription(offer) + if err != nil { + log.Println("Failed to apply offer", err) + c.pc.Close() + return err + } + log.Println("WebRTC: Set local description") + + <-done // Wait for ICE candidate gathering to complete. + + return nil +} + +// cleanup closes all channels and transports func (c *WebRTCPeer) cleanup() { - if nil != c.offerChannel { - close(c.offerChannel) - } - if nil != c.answerChannel { - close(c.answerChannel) - } - if nil != c.errorChannel { - close(c.errorChannel) - } // Close this side of the SOCKS pipe. - if nil != c.writePipe { + if c.writePipe != nil { // c.writePipe can be nil in tests. c.writePipe.Close() - c.writePipe = nil } - c.lock.Lock() if nil != c.transport { log.Printf("WebRTC: closing DataChannel") - dataChannel := c.transport - // Setting transport to nil *before* dc Close indicates to OnClose that - // this was locally triggered. - c.transport = nil - // Release the lock before calling DeleteDataChannel (which in turn - // calls Close on the dataChannel), but after nil'ing out the transport, - // since otherwise we'll end up in the onClose handler in a deadlock. - c.lock.Unlock() - if c.pc == nil { - panic("DataChannel w/o PeerConnection, not good.") - } - c.pc.DeleteDataChannel(dataChannel.(*webrtc.DataChannel)) - } else { - c.lock.Unlock() + c.transport.Close() } if nil != c.pc { log.Printf("WebRTC: closing PeerConnection") - err := c.pc.Destroy() + err := c.pc.Close() if nil != err { log.Printf("Error closing peerconnection...") } - c.pc = nil } } diff --git a/client/snowflake.go b/client/snowflake.go index 9098de7..d7cecbb 100644 --- a/client/snowflake.go +++ b/client/snowflake.go @@ -3,65 +3,161 @@ package main import ( "flag" + "fmt" "io" - "io/ioutil" "log" "net" "os" "os/signal" "path/filepath" + "strconv" "strings" + "sync" "syscall" - "time" - "git.torproject.org/pluggable-transports/goptlib.git" - sf "git.torproject.org/pluggable-transports/snowflake.git/client/lib" - "git.torproject.org/pluggable-transports/snowflake.git/common/safelog" - "github.com/keroserene/go-webrtc" + pt "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog" + + sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/client/lib" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/proxy" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/version" ) const ( DefaultSnowflakeCapacity = 1 ) -// Maintain |SnowflakeCapacity| number of available WebRTC connections, to -// transfer to the Tor SOCKS handler when needed. -func ConnectLoop(snowflakes sf.SnowflakeCollector) { - for { - // Check if ending is necessary. - _, err := snowflakes.Collect() - if nil != err { - log.Println("WebRTC:", err, - " Retrying in", sf.ReconnectTimeout, "seconds...") - } - select { - case <-time.After(time.Second * sf.ReconnectTimeout): - continue - case <-snowflakes.Melted(): - log.Println("ConnectLoop: stopped.") - return - } - } +type ptEventLogger struct { } -// Accept local SOCKS connections and pass them to the handler. -func socksAcceptLoop(ln *pt.SocksListener, snowflakes sf.SnowflakeCollector) error { +func NewPTEventLogger() event.SnowflakeEventReceiver { + return &ptEventLogger{} +} + +func (p ptEventLogger) OnNewSnowflakeEvent(e event.SnowflakeEvent) { + pt.Log(pt.LogSeverityNotice, e.String()) +} + +// Exchanges bytes between two ReadWriters. +// (In this case, between a SOCKS connection and a snowflake transport conn) +func copyLoop(socks, sfconn io.ReadWriter) { + done := make(chan struct{}, 2) + go func() { + if _, err := io.Copy(socks, sfconn); err != nil { + log.Printf("copying Snowflake to SOCKS resulted in error: %v", err) + } + done <- struct{}{} + }() + go func() { + if _, err := io.Copy(sfconn, socks); err != nil { + log.Printf("copying SOCKS to Snowflake resulted in error: %v", err) + } + done <- struct{}{} + }() + <-done + log.Println("copy loop ended") +} + +// Accept local SOCKS connections and connect to a Snowflake connection +func socksAcceptLoop(ln *pt.SocksListener, baseConfig sf.ClientConfig, + shutdown chan struct{}, wg *sync.WaitGroup) { defer ln.Close() - log.Println("Started SOCKS listener.") for { - log.Println("SOCKS listening...") conn, err := ln.AcceptSocks() if err != nil { - if e, ok := err.(net.Error); ok && e.Temporary() { + if err, ok := err.(net.Error); ok && err.Temporary() { continue } - return err - } - log.Println("SOCKS accepted: ", conn.Req) - err = sf.Handler(conn, snowflakes) - if err != nil { - log.Printf("handler error: %s", err) + log.Printf("SOCKS accept error: %s", err) + break } + log.Printf("SOCKS accepted: %v", conn.Req) + wg.Add(1) + go func() { + defer wg.Done() + defer conn.Close() + + config := baseConfig + // Check to see if our command line options are overriden by SOCKS options + if arg, ok := conn.Req.Args.Get("ampcache"); ok { + config.AmpCacheURL = arg + } + if arg, ok := conn.Req.Args.Get("sqsqueue"); ok { + config.SQSQueueURL = arg + } + if arg, ok := conn.Req.Args.Get("sqscreds"); ok { + config.SQSCredsStr = arg + } + if arg, ok := conn.Req.Args.Get("fronts"); ok { + if arg != "" { + config.FrontDomains = strings.Split(strings.TrimSpace(arg), ",") + } + } else if arg, ok := conn.Req.Args.Get("front"); ok { + config.FrontDomains = strings.Split(strings.TrimSpace(arg), ",") + } + if arg, ok := conn.Req.Args.Get("ice"); ok { + config.ICEAddresses = strings.Split(strings.TrimSpace(arg), ",") + } + if arg, ok := conn.Req.Args.Get("max"); ok { + max, err := strconv.Atoi(arg) + if err != nil { + conn.Reject() + log.Println("Invalid SOCKS arg: max=", arg) + return + } + config.Max = max + } + if arg, ok := conn.Req.Args.Get("url"); ok { + config.BrokerURL = arg + } + if arg, ok := conn.Req.Args.Get("utls-nosni"); ok { + switch strings.ToLower(arg) { + case "true": + fallthrough + case "yes": + config.UTLSRemoveSNI = true + } + } + if arg, ok := conn.Req.Args.Get("utls-imitate"); ok { + config.UTLSClientID = arg + } + if arg, ok := conn.Req.Args.Get("fingerprint"); ok { + config.BridgeFingerprint = arg + } + transport, err := sf.NewSnowflakeClient(config) + if err != nil { + conn.Reject() + log.Println("Failed to start snowflake transport: ", err) + return + } + transport.AddSnowflakeEventListener(NewPTEventLogger()) + err = conn.Grant(&net.TCPAddr{IP: net.IPv4zero, Port: 0}) + if err != nil { + log.Printf("conn.Grant error: %s", err) + return + } + + handler := make(chan struct{}) + go func() { + defer close(handler) + sconn, err := transport.Dial() + if err != nil { + log.Printf("dial error: %s", err) + return + } + defer sconn.Close() + // copy between the created Snowflake conn and the SOCKS conn + copyLoop(conn, sconn) + }() + select { + case <-shutdown: + log.Println("Received shutdown signal") + case <-handler: + log.Println("Handler ended") + } + return + }() } } @@ -69,23 +165,39 @@ func main() { iceServersCommas := flag.String("ice", "", "comma-separated list of ICE servers") brokerURL := flag.String("url", "", "URL of signaling broker") frontDomain := flag.String("front", "", "front domain") + frontDomainsCommas := flag.String("fronts", "", "comma-separated list of front domains") + ampCacheURL := flag.String("ampcache", "", "URL of AMP cache to use as a proxy for signaling") + sqsQueueURL := flag.String("sqsqueue", "", "URL of SQS Queue to use as a proxy for signaling") + sqsCredsStr := flag.String("sqscreds", "", "credentials to access SQS Queue") logFilename := flag.String("log", "", "name of log file") - logToStateDir := flag.Bool("logToStateDir", false, "resolve the log file relative to tor's pt state dir") + logToStateDir := flag.Bool("log-to-state-dir", false, "resolve the log file relative to tor's pt state dir") + keepLocalAddresses := flag.Bool("keep-local-addresses", false, "keep local LAN address ICE candidates.\nThis is usually pointless because Snowflake proxies don't usually reside on the same local network as the client.") + unsafeLogging := flag.Bool("unsafe-logging", false, "keep IP addresses and other sensitive info in the logs") max := flag.Int("max", DefaultSnowflakeCapacity, "capacity for number of multiplexed WebRTC peers") + versionFlag := flag.Bool("version", false, "display version info to stderr and quit") + + // Deprecated + oldLogToStateDir := flag.Bool("logToStateDir", false, "use -log-to-state-dir instead") + oldKeepLocalAddresses := flag.Bool("keepLocalAddresses", false, "use -keep-local-addresses instead") + flag.Parse() - webrtc.SetLoggingVerbosity(1) + if *versionFlag { + fmt.Fprintf(os.Stderr, "snowflake-client %s", version.ConstructResult()) + os.Exit(0) + } + log.SetFlags(log.LstdFlags | log.LUTC) - // Don't write to stderr; versions of tor earlier than about - // 0.3.5.6 do not read from the pipe, and eventually we will - // deadlock because the buffer is full. + // Don't write to stderr; versions of tor earlier than about 0.3.5.6 do + // not read from the pipe, and eventually we will deadlock because the + // buffer is full. // https://bugs.torproject.org/26360 // https://bugs.torproject.org/25600#comment:14 - var logOutput io.Writer = ioutil.Discard + var logOutput = io.Discard if *logFilename != "" { - if *logToStateDir { + if *logToStateDir || *oldLogToStateDir { stateDir, err := pt.MakeStateDir() if err != nil { log.Fatal(err) @@ -100,40 +212,37 @@ func main() { defer logFile.Close() logOutput = logFile } - //We want to send the log output through our scrubber first - log.SetOutput(&safelog.LogScrubber{Output: logOutput}) - - log.Println("\n\n\n --- Starting Snowflake Client ---") - - var iceServers sf.IceServerList - if len(strings.TrimSpace(*iceServersCommas)) > 0 { - option := webrtc.OptionIceServer(*iceServersCommas) - iceServers = append(iceServers, option) + if *unsafeLogging { + log.SetOutput(logOutput) + } else { + // We want to send the log output through our scrubber first + log.SetOutput(&safelog.LogScrubber{Output: logOutput}) } - // Prepare to collect remote WebRTC peers. - snowflakes := sf.NewPeers(*max) + log.Printf("snowflake-client %s\n", version.GetVersion()) - // Use potentially domain-fronting broker to rendezvous. - broker := sf.NewBrokerChannel(*brokerURL, *frontDomain, sf.CreateBrokerTransport()) - snowflakes.Tongue = sf.NewWebRTCDialer(broker, iceServers) + iceAddresses := strings.Split(strings.TrimSpace(*iceServersCommas), ",") - if nil == snowflakes.Tongue { - log.Fatal("Unable to prepare rendezvous method.") - return + var frontDomains []string + if *frontDomainsCommas != "" { + frontDomains = strings.Split(strings.TrimSpace(*frontDomainsCommas), ",") } - // Use a real logger to periodically output how much traffic is happening. - snowflakes.BytesLogger = &sf.BytesSyncLogger{ - InboundChan: make(chan int, 5), - OutboundChan: make(chan int, 5), - Inbound: 0, - Outbound: 0, - InEvents: 0, - OutEvents: 0, - } - go snowflakes.BytesLogger.Log() - go ConnectLoop(snowflakes) + // Maintain backwards compatability with legacy commandline option + if (len(frontDomains) == 0) && (*frontDomain != "") { + frontDomains = []string{*frontDomain} + } + + config := sf.ClientConfig{ + BrokerURL: *brokerURL, + AmpCacheURL: *ampCacheURL, + SQSQueueURL: *sqsQueueURL, + SQSCredsStr: *sqsCredsStr, + FrontDomains: frontDomains, + ICEAddresses: iceAddresses, + KeepLocalAddresses: *keepLocalAddresses || *oldKeepLocalAddresses, + Max: *max, + } // Begin goptlib client process. ptInfo, err := pt.ClientSetup(nil) @@ -141,10 +250,25 @@ func main() { log.Fatal(err) } if ptInfo.ProxyURL != nil { - pt.ProxyError("proxy is not supported") - os.Exit(1) + if err := proxy.CheckProxyProtocolSupport(ptInfo.ProxyURL); err != nil { + pt.ProxyError("proxy is not supported:" + err.Error()) + os.Exit(1) + } else { + config.CommunicationProxy = ptInfo.ProxyURL + client := proxy.NewSocks5UDPClient(config.CommunicationProxy) + conn, err := client.ListenPacket("udp", nil) + if err != nil { + pt.ProxyError("proxy test failure:" + err.Error()) + os.Exit(1) + } + conn.Close() + pt.ProxyDone() + } } + pt.ReportVersion("snowflake-client", version.GetVersion()) listeners := make([]net.Listener, 0) + shutdown := make(chan struct{}) + var wg sync.WaitGroup for _, methodName := range ptInfo.MethodNames { switch methodName { case "snowflake": @@ -154,7 +278,8 @@ func main() { pt.CmethodError(methodName, err.Error()) break } - go socksAcceptLoop(ln, snowflakes) + log.Printf("Started SOCKS listener at %v.", ln.Addr()) + go socksAcceptLoop(ln, config, shutdown, &wg) pt.Cmethod(methodName, ln.Version(), ln.Addr()) listeners = append(listeners, ln) default: @@ -163,8 +288,6 @@ func main() { } pt.CmethodsDone() - var numHandlers int = 0 - var sig os.Signal sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGTERM) @@ -172,29 +295,23 @@ func main() { // This environment variable means we should treat EOF on stdin // just like SIGTERM: https://bugs.torproject.org/15435. go func() { - io.Copy(ioutil.Discard, os.Stdin) + if _, err := io.Copy(io.Discard, os.Stdin); err != nil { + log.Printf("calling io.Copy(io.Discard, os.Stdin) returned error: %v", err) + } log.Printf("synthesizing SIGTERM because of stdin close") sigChan <- syscall.SIGTERM }() } - // keep track of handlers and wait for a signal - sig = nil - for sig == nil { - select { - case n := <-sf.HandlerChan: - numHandlers += n - case sig = <-sigChan: - } - } + // Wait for a signal. + <-sigChan + log.Println("stopping snowflake") - // signal received, shut down + // Signal received, shut down. for _, ln := range listeners { ln.Close() } - snowflakes.End() - for numHandlers > 0 { - numHandlers += <-sf.HandlerChan - } + close(shutdown) + wg.Wait() log.Println("snowflake is done.") } diff --git a/client/torrc b/client/torrc index 6acf1c4..3a506a2 100644 --- a/client/torrc +++ b/client/torrc @@ -1,10 +1,9 @@ UseBridges 1 DataDirectory datadir -ClientTransportPlugin snowflake exec ./client \ --url https://snowflake-broker.azureedge.net/ \ --front ajax.aspnetcdn.com \ --ice stun:stun.l.google.com:19302 \ --max 3 +ClientTransportPlugin snowflake exec ./client -log snowflake.log -Bridge snowflake 0.0.3.0:1 +Bridge snowflake 192.0.2.3:80 2B280B23E1107BB62ABFC40DDCC8824814F80A72 fingerprint=2B280B23E1107BB62ABFC40DDCC8824814F80A72 url=https://1098762253.rsc.cdn77.org/ fronts=www.cdn77.com,www.phpmyadmin.net ice=stun:stun.antisip.com:3478,stun:stun.epygi.com:3478,stun:stun.uls.co.za:3478,stun:stun.voipgate.com:3478,stun:stun.mixvoip.com:3478,stun:stun.nextcloud.com:3478,stun:stun.bethesda.net:3478,stun:stun.nextcloud.com:443 utls-imitate=hellorandomizedalpn +Bridge snowflake 192.0.2.4:80 8838024498816A039FCBBAB14E6F40A0843051FA fingerprint=8838024498816A039FCBBAB14E6F40A0843051FA url=https://1098762253.rsc.cdn77.org/ fronts=www.cdn77.com,www.phpmyadmin.net ice=stun:stun.antisip.com:3478,stun:stun.epygi.com:3478,stun:stun.uls.co.za:3478,stun:stun.voipgate.com:3478,stun:stun.mixvoip.com:3478,stun:stun.nextcloud.com:3478,stun:stun.bethesda.net:3478,stun:stun.nextcloud.com:443 utls-imitate=hellorandomizedalpn + +SocksPort auto diff --git a/client/torrc-localhost b/client/torrc-localhost deleted file mode 100644 index 7d539fb..0000000 --- a/client/torrc-localhost +++ /dev/null @@ -1,7 +0,0 @@ -UseBridges 1 -DataDirectory datadir - -ClientTransportPlugin snowflake exec ./client \ --url http://localhost:8080/ \ - -Bridge snowflake 0.0.3.0:1 diff --git a/client/torrc.localhost b/client/torrc.localhost new file mode 100644 index 0000000..e09f94c --- /dev/null +++ b/client/torrc.localhost @@ -0,0 +1,6 @@ +UseBridges 1 +DataDirectory datadir + +ClientTransportPlugin snowflake exec ./client -keep-local-addresses + +Bridge snowflake 192.0.2.3:1 url=http://localhost:8080/ diff --git a/common/amp/armor_decoder.go b/common/amp/armor_decoder.go new file mode 100644 index 0000000..fed44a6 --- /dev/null +++ b/common/amp/armor_decoder.go @@ -0,0 +1,136 @@ +package amp + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "io" + + "golang.org/x/net/html" +) + +// ErrUnknownVersion is the error returned when the first character inside the +// element encoding (but outside the base64 encoding) is not '0'. +type ErrUnknownVersion byte + +func (err ErrUnknownVersion) Error() string { + return fmt.Sprintf("unknown armor version indicator %+q", byte(err)) +} + +func isASCIIWhitespace(b byte) bool { + switch b { + // https://infra.spec.whatwg.org/#ascii-whitespace + case '\x09', '\x0a', '\x0c', '\x0d', '\x20': + return true + default: + return false + } +} + +func splitASCIIWhitespace(data []byte, atEOF bool) (advance int, token []byte, err error) { + var i, j int + // Skip initial whitespace. + for i = 0; i < len(data); i++ { + if !isASCIIWhitespace(data[i]) { + break + } + } + // Look for next whitespace. + for j = i; j < len(data); j++ { + if isASCIIWhitespace(data[j]) { + return j + 1, data[i:j], nil + } + } + // We reached the end of data without finding more whitespace. Only + // consider it a token if we are at EOF. + if atEOF && i < j { + return j, data[i:j], nil + } + // Otherwise, request more data. + return i, nil, nil +} + +func decodeToWriter(w io.Writer, r io.Reader) (int64, error) { + tokenizer := html.NewTokenizer(r) + // Set a memory limit on token sizes, otherwise the tokenizer will + // buffer text indefinitely if it is not broken up by other token types. + tokenizer.SetMaxBuf(elementSizeLimit) + active := false + total := int64(0) + for { + tt := tokenizer.Next() + switch tt { + case html.ErrorToken: + err := tokenizer.Err() + if err == io.EOF { + err = nil + } + if err == nil && active { + return total, fmt.Errorf("missing tag") + } + return total, err + case html.TextToken: + if active { + // Re-join the separate chunks of text and + // feed them to the decoder. + scanner := bufio.NewScanner(bytes.NewReader(tokenizer.Text())) + scanner.Split(splitASCIIWhitespace) + for scanner.Scan() { + n, err := w.Write(scanner.Bytes()) + total += int64(n) + if err != nil { + return total, err + } + } + if err := scanner.Err(); err != nil { + return total, err + } + } + case html.StartTagToken: + tn, _ := tokenizer.TagName() + if string(tn) == "pre" { + if active { + // nesting not allowed + return total, fmt.Errorf("unexpected %s", tokenizer.Token()) + } + active = true + } + case html.EndTagToken: + tn, _ := tokenizer.TagName() + if string(tn) == "pre" { + if !active { + // stray end tag + return total, fmt.Errorf("unexpected %s", tokenizer.Token()) + } + active = false + } + } + } +} + +// NewArmorDecoder returns a new AMP armor decoder. +func NewArmorDecoder(r io.Reader) (io.Reader, error) { + pr, pw := io.Pipe() + go func() { + _, err := decodeToWriter(pw, r) + pw.CloseWithError(err) + }() + + // The first byte inside the element encoding is a server–client + // protocol version indicator. + var version [1]byte + _, err := pr.Read(version[:]) + if err != nil { + pr.CloseWithError(err) + return nil, err + } + switch version[0] { + case '0': + return base64.NewDecoder(base64.StdEncoding, pr), nil + default: + err := ErrUnknownVersion(version[0]) + pr.CloseWithError(err) + return nil, err + } +} diff --git a/common/amp/armor_encoder.go b/common/amp/armor_encoder.go new file mode 100644 index 0000000..7fea372 --- /dev/null +++ b/common/amp/armor_encoder.go @@ -0,0 +1,176 @@ +package amp + +import ( + "encoding/base64" + "io" +) + +// https://amp.dev/boilerplate/ +// https://amp.dev/documentation/guides-and-tutorials/learn/spec/amp-boilerplate/?format=websites +// https://amp.dev/documentation/guides-and-tutorials/learn/spec/amphtml/?format=websites#the-amp-html-format +const ( + boilerplateStart = ` + + + + + + + + + +` + boilerplateEnd = ` +` +) + +const ( + // We restrict the amount of text may go inside an HTML element, in + // order to limit the amount a decoder may have to buffer. + elementSizeLimit = 32 * 1024 + + // The payload is conceptually a long base64-encoded string, but we + // break the string into short chunks separated by whitespace. This is + // to protect against modification by AMP caches, which reportedly may + // truncate long words in text: + // https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/25985#note_2592348 + bytesPerChunk = 32 + + // We set the number of chunks per element so as to stay under + // elementSizeLimit. Here, we assume that there is 1 byte of whitespace + // after each chunk (with an additional whitespace byte at the beginning + // of the element). + chunksPerElement = (elementSizeLimit - 1) / (bytesPerChunk + 1) +) + +// The AMP armor encoder is a chain of a base64 encoder (base64.NewEncoder) and +// an HTML element encoder (elementEncoder). A top-level encoder (armorEncoder) +// coordinates these two, and handles prepending and appending the AMP +// boilerplate. armorEncoder's Write method writes data into the base64 encoder, +// where it makes its way through the chain. + +// NewArmorEncoder returns a new AMP armor encoder. Anything written to the +// returned io.WriteCloser will be encoded and written to w. The caller must +// call Close to flush any partially written data and output the AMP boilerplate +// trailer. +func NewArmorEncoder(w io.Writer) (io.WriteCloser, error) { + // Immediately write the AMP boilerplate header. + _, err := w.Write([]byte(boilerplateStart)) + if err != nil { + return nil, err + } + + element := &elementEncoder{w: w} + // Write a server–client protocol version indicator, outside the base64 + // layer. + _, err = element.Write([]byte{'0'}) + if err != nil { + return nil, err + } + + base64 := base64.NewEncoder(base64.StdEncoding, element) + return &armorEncoder{ + w: w, + element: element, + base64: base64, + }, nil +} + +type armorEncoder struct { + base64 io.WriteCloser + element *elementEncoder + w io.Writer +} + +func (enc *armorEncoder) Write(p []byte) (int, error) { + // Write into the chain base64 | element | w. + return enc.base64.Write(p) +} + +func (enc *armorEncoder) Close() error { + // Close the base64 encoder first, to flush out any buffered data and + // the final padding. + err := enc.base64.Close() + if err != nil { + return err + } + + // Next, close the element encoder, to close any open elements. + err = enc.element.Close() + if err != nil { + return err + } + + // Finally, output the AMP boilerplate trailer. + _, err = enc.w.Write([]byte(boilerplateEnd)) + if err != nil { + return err + } + + return nil +} + +// elementEncoder arranges written data into pre elements, with the text within +// separated into chunks. It does no HTML encoding, so data written must not +// contain any bytes that are meaningful in HTML. +type elementEncoder struct { + w io.Writer + chunkCounter int + elementCounter int +} + +func (enc *elementEncoder) Write(p []byte) (n int, err error) { + total := 0 + for len(p) > 0 { + if enc.elementCounter == 0 && enc.chunkCounter == 0 { + _, err := enc.w.Write([]byte("
\n"))
+			if err != nil {
+				return total, err
+			}
+		}
+
+		n := bytesPerChunk - enc.chunkCounter
+		if n > len(p) {
+			n = len(p)
+		}
+		nn, err := enc.w.Write(p[:n])
+		if err != nil {
+			return total, err
+		}
+		total += nn
+		p = p[n:]
+
+		enc.chunkCounter += n
+		if enc.chunkCounter >= bytesPerChunk {
+			enc.chunkCounter = 0
+			enc.elementCounter += 1
+			nn, err := enc.w.Write([]byte("\n"))
+			if err != nil {
+				return total, err
+			}
+			total += nn
+		}
+
+		if enc.elementCounter >= chunksPerElement {
+			enc.elementCounter = 0
+			nn, err := enc.w.Write([]byte("
\n")) + if err != nil { + return total, err + } + total += nn + } + } + return total, nil +} + +func (enc *elementEncoder) Close() error { + var err error + if !(enc.elementCounter == 0 && enc.chunkCounter == 0) { + if enc.chunkCounter == 0 { + _, err = enc.w.Write([]byte("\n")) + } else { + _, err = enc.w.Write([]byte("\n\n")) + } + } + return err +} diff --git a/common/amp/armor_test.go b/common/amp/armor_test.go new file mode 100644 index 0000000..c65c5f1 --- /dev/null +++ b/common/amp/armor_test.go @@ -0,0 +1,226 @@ +package amp + +import ( + "io" + "math/rand" + "strings" + "testing" +) + +func armorDecodeToString(src string) (string, error) { + dec, err := NewArmorDecoder(strings.NewReader(src)) + if err != nil { + return "", err + } + p, err := io.ReadAll(dec) + return string(p), err +} + +func TestArmorDecoder(t *testing.T) { + for _, test := range []struct { + input string + expectedOutput string + expectedErr bool + }{ + {` +
+0
+
+`, + "", + false, + }, + {` +
+0aGVsbG8gd29ybGQK
+
+`, + "hello world\n", + false, + }, + // bad version indicator + {` +
+1aGVsbG8gd29ybGQK
+
+`, + "", + true, + }, + // text outside
 elements
+		{`
+0aGVsbG8gd29ybGQK
+blah blah blah
+
+0aGVsbG8gd29ybGQK
+
+0aGVsbG8gd29ybGQK +blah blah blah +`, + "hello world\n", + false, + }, + {` +
+0QUJDREV
+GR0hJSkt
+MTU5PUFF
+SU1RVVld
+
+junk +
+YWVowMTI
+zNDU2Nzg
+5Cg
+=
+
+
+=
+
+`, + "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\n", + false, + }, + // no
 elements, hence no version indicator
+		{`
+aGVsbG8gd29ybGQK
+blah blah blah
+aGVsbG8gd29ybGQK
+aGVsbG8gd29ybGQK
+blah blah blah
+`,
+			"",
+			true,
+		},
+		// empty 
 elements, hence no version indicator
+		{`
+aGVsbG8gd29ybGQK
+blah blah blah
+
   
+aGVsbG8gd29ybGQK +aGVsbG8gd29ybGQK

+blah blah blah
+`,
+			"",
+			true,
+		},
+		// other elements inside 
+		{
+			"blah 
0aGVsb

G8gd29

ybGQK
", + "hello world\n", + false, + }, + // HTML comment + { + "blah ", + "", + true, + }, + // all kinds of ASCII whitespace + { + "blah
\x200\x09aG\x0aV\x0csb\x0dG8\x20gd29ybGQK
", + "hello world\n", + false, + }, + + // bad padding + {` +
+0QUJDREV
+GR0hJSkt
+MTU5PUFF
+SU1RVVld
+
+junk +
+YWVowMTI
+zNDU2Nzg
+5Cg
+=
+
+`, + "", + true, + }, + /* + // per-chunk base64 + // test disabled because Go stdlib handles this incorrectly: + // https://github.com/golang/go/issues/31626 + { + "
QQ==
Qg==
", + "", + true, + }, + */ + // missing
+ { + "blah
0aGVsbG8gd29ybGQK",
+			"",
+			true,
+		},
+		// nested 
+		{
+			"blah 
0aGVsb
G8gd29
ybGQK
", + "", + true, + }, + } { + output, err := armorDecodeToString(test.input) + if test.expectedErr && err == nil { + t.Errorf("%+q → (%+q, %v), expected error", test.input, output, err) + continue + } + if !test.expectedErr && err != nil { + t.Errorf("%+q → (%+q, %v), expected no error", test.input, output, err) + continue + } + if !test.expectedErr && output != test.expectedOutput { + t.Errorf("%+q → (%+q, %v), expected (%+q, %v)", + test.input, output, err, test.expectedOutput, nil) + continue + } + } +} + +func armorRoundTrip(s string) (string, error) { + var encoded strings.Builder + enc, err := NewArmorEncoder(&encoded) + if err != nil { + return "", err + } + _, err = io.Copy(enc, strings.NewReader(s)) + if err != nil { + return "", err + } + err = enc.Close() + if err != nil { + return "", err + } + return armorDecodeToString(encoded.String()) +} + +func TestArmorRoundTrip(t *testing.T) { + lengths := make([]int, 0) + // Test short strings and lengths around elementSizeLimit thresholds. + for i := 0; i < bytesPerChunk*2; i++ { + lengths = append(lengths, i) + } + for i := -10; i < +10; i++ { + lengths = append(lengths, elementSizeLimit+i) + lengths = append(lengths, 2*elementSizeLimit+i) + } + for _, n := range lengths { + buf := make([]byte, n) + rand.Read(buf) + input := string(buf) + output, err := armorRoundTrip(input) + if err != nil { + t.Errorf("length %d → error %v", n, err) + continue + } + if output != input { + t.Errorf("length %d → %+q", n, output) + continue + } + } +} diff --git a/common/amp/cache.go b/common/amp/cache.go new file mode 100644 index 0000000..102993f --- /dev/null +++ b/common/amp/cache.go @@ -0,0 +1,178 @@ +package amp + +import ( + "crypto/sha256" + "encoding/base32" + "fmt" + "net" + "net/url" + "path" + "strings" + + "golang.org/x/net/idna" +) + +// domainPrefixBasic does the basic domain prefix conversion. Does not do any +// IDNA mapping, such as https://www.unicode.org/reports/tr46/. +// +// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#basic-algorithm +func domainPrefixBasic(domain string) (string, error) { + // 1. Punycode Decode the publisher domain. + prefix, err := idna.ToUnicode(domain) + if err != nil { + return "", err + } + + // 2. Replace any "-" (hyphen) character in the output of step 1 with + // "--" (two hyphens). + prefix = strings.Replace(prefix, "-", "--", -1) + + // 3. Replace any "." (dot) character in the output of step 2 with "-" + // (hyphen). + prefix = strings.Replace(prefix, ".", "-", -1) + + // 4. If the output of step 3 has a "-" (hyphen) at both positions 3 and + // 4, then to the output of step 3, add a prefix of "0-" and add a + // suffix of "-0". + if len(prefix) >= 4 && prefix[2] == '-' && prefix[3] == '-' { + prefix = "0-" + prefix + "-0" + } + + // 5. Punycode Encode the output of step 3. + return idna.ToASCII(prefix) +} + +// Lower-case base32 without padding. +var fallbackBase32Encoding = base32.NewEncoding("abcdefghijklmnopqrstuvwxyz234567").WithPadding(base32.NoPadding) + +// domainPrefixFallback does the fallback domain prefix conversion. The returned +// base32 domain uses lower-case letters. +// +// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#fallback-algorithm +func domainPrefixFallback(domain string) string { + // The algorithm specification does not say what, exactly, we are to + // take the SHA-256 of. domain is notionally an abstract Unicode + // string, not a byte sequence. While + // https://github.com/ampproject/amp-toolbox/blob/84cb3057e5f6c54d64369ddd285db1cb36237ee8/packages/cache-url/lib/AmpCurlUrlGenerator.js#L62 + // says "Take the SHA256 of the punycode view of the domain," in reality + // it hashes the UTF-8 encoding of the domain, without Punycode: + // https://github.com/ampproject/amp-toolbox/blob/84cb3057e5f6c54d64369ddd285db1cb36237ee8/packages/cache-url/lib/AmpCurlUrlGenerator.js#L141 + // https://github.com/ampproject/amp-toolbox/blob/84cb3057e5f6c54d64369ddd285db1cb36237ee8/packages/cache-url/lib/browser/Sha256.js#L24 + // We do the same here, hashing the raw bytes of domain, presumed to be + // UTF-8. + + // 1. Hash the publisher's domain using SHA256. + h := sha256.Sum256([]byte(domain)) + + // 2. Base32 Escape the output of step 1. + // 3. Remove the last 4 characters from the output of step 2, which are + // always "=" (equals) characters. + return fallbackBase32Encoding.EncodeToString(h[:]) +} + +// domainPrefix computes the domain prefix of an AMP cache URL. +// +// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#domain-name-prefix +func domainPrefix(domain string) string { + // https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#combined-algorithm + // 1. Run the Basic Algorithm. If the output is a valid DNS label, + // [append the Cache domain suffix and] return. Otherwise continue to + // step 2. + prefix, err := domainPrefixBasic(domain) + // "A domain prefix is not a valid DNS label if it is longer than 63 + // characters" + if err == nil && len(prefix) <= 63 { + return prefix + } + // 2. Run the Fallback Algorithm. [Append the Cache domain suffix and] + // return. + return domainPrefixFallback(domain) +} + +// CacheURL computes the AMP cache URL for the publisher URL pubURL, using the +// AMP cache at cacheURL. contentType is a string such as "c" or "i" that +// indicates what type of serving the AMP cache is to perform. The Scheme of +// pubURL must be "http" or "https". The Port of pubURL, if any, must match the +// default for the scheme. cacheURL may not have RawQuery, Fragment, or +// RawFragment set, because the resulting URL's query and fragment are taken +// from the publisher URL. +// +// https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/ +func CacheURL(pubURL, cacheURL *url.URL, contentType string) (*url.URL, error) { + // The cache URL subdomain, including the domain prefix corresponding to + // the publisher URL's domain. + resultHost := domainPrefix(pubURL.Hostname()) + "." + cacheURL.Hostname() + if cacheURL.Port() != "" { + resultHost = net.JoinHostPort(resultHost, cacheURL.Port()) + } + + // https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#url-path + // The first part of the path is the cache URL's own path, if any. + pathComponents := []string{cacheURL.EscapedPath()} + // The next path component is the content type. We cannot encode an + // empty content type, because it would result in consecutive path + // separators, which would semantically combine into a single separator. + if contentType == "" { + return nil, fmt.Errorf("invalid content type %+q", contentType) + } + pathComponents = append(pathComponents, url.PathEscape(contentType)) + // Then, we add an "s" path component, if the publisher URL scheme is + // "https". + switch pubURL.Scheme { + case "http": + // Do nothing. + case "https": + pathComponents = append(pathComponents, "s") + default: + return nil, fmt.Errorf("invalid scheme %+q in publisher URL", pubURL.Scheme) + } + // The next path component is the publisher URL's host. The AMP cache + // URL format specification is not clear about whether other + // subcomponents of the authority (namely userinfo and port) may appear + // here. We adopt a policy of forbidding userinfo, and requiring that + // the port be the default for the scheme (and then we omit the port + // entirely from the returned URL). + if pubURL.User != nil { + return nil, fmt.Errorf("publisher URL may not contain userinfo") + } + if port := pubURL.Port(); port != "" { + if !((pubURL.Scheme == "http" && port == "80") || (pubURL.Scheme == "https" && port == "443")) { + return nil, fmt.Errorf("publisher URL port %+q is not the default for scheme %+q", port, pubURL.Scheme) + } + } + // As with the content type, we cannot encode an empty host, because + // that would result in an empty path component. + if pubURL.Hostname() == "" { + return nil, fmt.Errorf("invalid host %+q in publisher URL", pubURL.Hostname()) + } + pathComponents = append(pathComponents, url.PathEscape(pubURL.Hostname())) + // Finally, we append the remainder of the original escaped path from + // the publisher URL. + pathComponents = append(pathComponents, pubURL.EscapedPath()) + + resultRawPath := path.Join(pathComponents...) + resultPath, err := url.PathUnescape(resultRawPath) + if err != nil { + return nil, err + } + + // The query and fragment of the returned URL always come from pubURL. + // Any query or fragment of cacheURL would be ignored. Return an error + // if either is set. + if cacheURL.RawQuery != "" { + return nil, fmt.Errorf("cache URL may not contain a query") + } + if cacheURL.Fragment != "" { + return nil, fmt.Errorf("cache URL may not contain a fragment") + } + + return &url.URL{ + Scheme: cacheURL.Scheme, + User: cacheURL.User, + Host: resultHost, + Path: resultPath, + RawPath: resultRawPath, + RawQuery: pubURL.RawQuery, + Fragment: pubURL.Fragment, + }, nil +} diff --git a/common/amp/cache_test.go b/common/amp/cache_test.go new file mode 100644 index 0000000..45950fd --- /dev/null +++ b/common/amp/cache_test.go @@ -0,0 +1,320 @@ +package amp + +import ( + "bytes" + "net/url" + "testing" + + "golang.org/x/net/idna" +) + +func TestDomainPrefixBasic(t *testing.T) { + // Tests expecting no error. + for _, test := range []struct { + domain, expected string + }{ + {"", ""}, + {"xn--", ""}, + {"...", "---"}, + + // Should not apply mappings such as case folding and + // normalization. + {"b\u00fccher.de", "xn--bcher-de-65a"}, + {"B\u00fccher.de", "xn--Bcher-de-65a"}, + {"bu\u0308cher.de", "xn--bucher-de-hkf"}, + + // Check some that differ between IDNA 2003 and IDNA 2008. + // https://unicode.org/reports/tr46/#Deviations + // https://util.unicode.org/UnicodeJsps/idna.jsp + {"faß.de", "xn--fa-de-mqa"}, + {"βόλοσ.com", "xn---com-4ld8c2a6a8e"}, + + // Lengths of 63 and 64. 64 is too long for a DNS label, but + // domainPrefixBasic is not expected to check for that. + {"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, + {"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, + + // https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#basic-algorithm + {"example.com", "example-com"}, + {"foo.example.com", "foo-example-com"}, + {"foo-example.com", "foo--example-com"}, + {"xn--57hw060o.com", "xn---com-p33b41770a"}, + {"\u26a1\U0001f60a.com", "xn---com-p33b41770a"}, + {"en-us.example.com", "0-en--us-example-com-0"}, + } { + output, err := domainPrefixBasic(test.domain) + if err != nil || output != test.expected { + t.Errorf("%+q → (%+q, %v), expected (%+q, %v)", + test.domain, output, err, test.expected, nil) + } + } + + // Tests expecting an error. + for _, domain := range []string{ + "xn---", + } { + output, err := domainPrefixBasic(domain) + if err == nil || output != "" { + t.Errorf("%+q → (%+q, %v), expected (%+q, non-nil)", + domain, output, err, "") + } + } +} + +func TestDomainPrefixFallback(t *testing.T) { + for _, test := range []struct { + domain, expected string + }{ + { + "", + "4oymiquy7qobjgx36tejs35zeqt24qpemsnzgtfeswmrw6csxbkq", + }, + { + "example.com", + "un42n5xov642kxrxrqiyanhcoupgql5lt4wtbkyt2ijflbwodfdq", + }, + + // These checked against the output of + // https://github.com/ampproject/amp-toolbox/tree/84cb3057e5f6c54d64369ddd285db1cb36237ee8/packages/cache-url, + // using the widget at + // https://amp.dev/documentation/guides-and-tutorials/learn/amp-caches-and-cors/amp-cache-urls/#url-format. + { + "000000000000000000000000000000000000000000000000000000000000.com", + "stejanx4hsijaoj4secyecy4nvqodk56kw72whwcmvdbtucibf5a", + }, + { + "00000000000000000000000000000000000000000000000000000000000a.com", + "jdcvbsorpnc3hcjrhst56nfm6ymdpovlawdbm2efyxpvlt4cpbya", + }, + { + "00000000000000000000000000000000000000000000000000000000000\u03bb.com", + "qhzqeumjkfpcpuic3vqruyjswcr7y7gcm3crqyhhywvn3xrhchfa", + }, + } { + output := domainPrefixFallback(test.domain) + if output != test.expected { + t.Errorf("%+q → %+q, expected %+q", + test.domain, output, test.expected) + } + } +} + +// Checks that domainPrefix chooses domainPrefixBasic or domainPrefixFallback as +// appropriate; i.e., always returns string that is a valid DNS label and is +// IDNA-decodable. +func TestDomainPrefix(t *testing.T) { + // A validating IDNA profile, which checks label length and that the + // label contains only certain ASCII characters. It does not do the + // ValidateLabels check, because that depends on the input having + // certain properties. + profile := idna.New( + idna.VerifyDNSLength(true), + idna.StrictDomainName(true), + ) + for _, domain := range []string{ + "example.com", + "\u0314example.com", + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // 63 bytes + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // 64 bytes + "xn--57hw060o.com", + "a b c", + } { + output := domainPrefix(domain) + if bytes.IndexByte([]byte(output), '.') != -1 { + t.Errorf("%+q → %+q contains a dot", domain, output) + } + _, err := profile.ToUnicode(output) + if err != nil { + t.Errorf("%+q → error %v", domain, err) + } + } +} + +func mustParseURL(rawurl string) *url.URL { + u, err := url.Parse(rawurl) + if err != nil { + panic(err) + } + return u +} + +func TestCacheURL(t *testing.T) { + // Tests expecting no error. + for _, test := range []struct { + pub string + cache string + contentType string + expected string + }{ + // With or without trailing slash on pubURL. + { + "http://example.com/", + "https://amp.cache/", + "c", + "https://example-com.amp.cache/c/example.com", + }, + { + "http://example.com", + "https://amp.cache/", + "c", + "https://example-com.amp.cache/c/example.com", + }, + // https pubURL. + { + "https://example.com/", + "https://amp.cache/", + "c", + "https://example-com.amp.cache/c/s/example.com", + }, + // The content type should be escaped if necessary. + { + "http://example.com/", + "https://amp.cache/", + "/", + "https://example-com.amp.cache/%2F/example.com", + }, + // Retain pubURL path, query, and fragment, including escaping. + { + "http://example.com/my%2Fpath/index.html?a=1#fragment", + "https://amp.cache/", + "c", + "https://example-com.amp.cache/c/example.com/my%2Fpath/index.html?a=1#fragment", + }, + // Retain scheme, userinfo, port, and path of cacheURL, escaping + // whatever is necessary. + { + "http://example.com", + "http://cache%2Fuser:cache%40pass@amp.cache:123/with/../../path/..%2f../", + "c", + "http://cache%2Fuser:cache%40pass@example-com.amp.cache:123/path/..%2f../c/example.com", + }, + // Port numbers in pubURL are allowed, if they're the default + // for scheme. + { + "http://example.com:80/", + "https://amp.cache/", + "c", + "https://example-com.amp.cache/c/example.com", + }, + { + "https://example.com:443/", + "https://amp.cache/", + "c", + "https://example-com.amp.cache/c/s/example.com", + }, + // "?" at the end of cacheURL is okay, as long as the query is + // empty. + { + "http://example.com/", + "https://amp.cache/?", + "c", + "https://example-com.amp.cache/c/example.com", + }, + + // https://developers.google.com/amp/cache/overview#example-requesting-document-using-tls + { + "https://example.com/amp_document.html", + "https://cdn.ampproject.org/", + "c", + "https://example-com.cdn.ampproject.org/c/s/example.com/amp_document.html", + }, + // https://developers.google.com/amp/cache/overview#example-requesting-image-using-plain-http + { + "http://example.com/logo.png", + "https://cdn.ampproject.org/", + "i", + "https://example-com.cdn.ampproject.org/i/example.com/logo.png", + }, + // https://developers.google.com/amp/cache/overview#query-parameter-example + { + "https://example.com/g?value=Hello%20World", + "https://cdn.ampproject.org/", + "c", + "https://example-com.cdn.ampproject.org/c/s/example.com/g?value=Hello%20World", + }, + } { + pubURL := mustParseURL(test.pub) + cacheURL := mustParseURL(test.cache) + outputURL, err := CacheURL(pubURL, cacheURL, test.contentType) + if err != nil { + t.Errorf("%+q %+q %+q → error %v", + test.pub, test.cache, test.contentType, err) + continue + } + if outputURL.String() != test.expected { + t.Errorf("%+q %+q %+q → %+q, expected %+q", + test.pub, test.cache, test.contentType, outputURL, test.expected) + continue + } + } + + // Tests expecting an error. + for _, test := range []struct { + pub string + cache string + contentType string + }{ + // Empty content type. + { + "http://example.com/", + "https://amp.cache/", + "", + }, + // Empty host. + { + "http:///index.html", + "https://amp.cache/", + "c", + }, + // Empty scheme. + { + "//example.com/", + "https://amp.cache/", + "c", + }, + // Unrecognized scheme. + { + "ftp://example.com/", + "https://amp.cache/", + "c", + }, + // Wrong port number for scheme. + { + "http://example.com:443/", + "https://amp.cache/", + "c", + }, + // userinfo in pubURL. + { + "http://user@example.com/", + "https://amp.cache/", + "c", + }, + { + "http://user:pass@example.com/", + "https://amp.cache/", + "c", + }, + // cacheURL may not contain a query. + { + "http://example.com/", + "https://amp.cache/?a=1", + "c", + }, + // cacheURL may not contain a fragment. + { + "http://example.com/", + "https://amp.cache/#fragment", + "c", + }, + } { + pubURL := mustParseURL(test.pub) + cacheURL := mustParseURL(test.cache) + outputURL, err := CacheURL(pubURL, cacheURL, test.contentType) + if err == nil { + t.Errorf("%+q %+q %+q → %+q, expected error", + test.pub, test.cache, test.contentType, outputURL) + continue + } + } +} diff --git a/common/amp/doc.go b/common/amp/doc.go new file mode 100644 index 0000000..82e641b --- /dev/null +++ b/common/amp/doc.go @@ -0,0 +1,91 @@ +/* +Package amp provides functions for working with the AMP (Accelerated Mobile +Pages) subset of HTML, and conveying binary data through an AMP cache. + +# AMP cache + +The CacheURL function takes a plain URL and converts it to be accessed through a +given AMP cache. + +The EncodePath and DecodePath functions provide a way to encode data into the +suffix of a URL path. AMP caches do not support HTTP POST, but encoding data +into a URL path with GET is an alternative means of sending data to the server. +The format of an encoded path is: + + 0<0 or more bytes, including slash>/ + +That is: +* "0", a format version number, which controls the interpretation of the rest of +the path. Only the first byte matters as a version indicator (not the whole +first path component). +* Any number of slash or non-slash bytes. These may be used as padding or to +prevent cache collisions in the AMP cache. +* A final slash. +* base64 encoding of the data, using the URL-safe alphabet (which does not +include slash). + +For example, an encoding of the string "This is path-encoded data." is the +following. The "lgWHcwhXFjUm" following the format version number is random +padding that will be ignored on decoding. + + 0lgWHcwhXFjUm/VGhpcyBpcyBwYXRoLWVuY29kZWQgZGF0YS4 + +It is the caller's responsibility to add or remove any directory path prefix +before calling EncodePath or DecodePath. + +# AMP armor + +AMP armor is a data encoding scheme that that satisfies the requirements of the +AMP (Accelerated Mobile Pages) subset of HTML, and survives modification by an +AMP cache. For the requirements of AMP HTML, see +https://amp.dev/documentation/guides-and-tutorials/learn/spec/amphtml/. +For modifications that may be made by an AMP cache, see +https://github.com/ampproject/amphtml/blob/main/docs/spec/amp-cache-modifications.md. + +The encoding is based on ones created by Ivan Markin. See codec/amp/ in +https://github.com/nogoegst/amper and discussion at +https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/25985. + +The encoding algorithm works as follows. Base64-encode the input. Prepend the +input with the byte '0'; this is a protocol version indicator that the decoder +can use to determine how to interpret the bytes that follow. Split the base64 +into fixed-size chunks separated by whitespace. Take up to 1024 chunks at a +time, and wrap them in a pre element. Then, situate the markup so far within the +body of the AMP HTML boilerplate. The decoding algorithm is to scan the HTML for +pre elements, split their text contents on whitespace and concatenate, then +base64 decode. The base64 encoding uses the standard alphabet, with normal "=" +padding (https://tools.ietf.org/html/rfc4648#section-4). + +The reason for splitting the base64 into chunks is that AMP caches reportedly +truncate long strings that are not broken by whitespace: +https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/25985#note_2592348. +The characters that may separate the chunks are the ASCII whitespace characters +(https://infra.spec.whatwg.org/#ascii-whitespace) "\x09", "\x0a", "\x0c", +"\x0d", and "\x20". The reason for separating the chunks into pre elements is to +limit the amount of text a decoder may have to buffer while parsing the HTML. +Each pre element may contain at most 64 KB of text. pre elements may not be +nested. + +# Example + +The following is the result of encoding the string +"This was encoded with AMP armor.": + + + + + + + + + + + +
+	0VGhpcyB3YXMgZW5jb2RlZCB3aXRoIEF
+	NUCBhcm1vci4=
+	
+ + +*/ +package amp diff --git a/common/amp/path.go b/common/amp/path.go new file mode 100644 index 0000000..5903694 --- /dev/null +++ b/common/amp/path.go @@ -0,0 +1,44 @@ +package amp + +import ( + "crypto/rand" + "encoding/base64" + "fmt" + "strings" +) + +// EncodePath encodes data in a way that is suitable for the suffix of an AMP +// cache URL. +func EncodePath(data []byte) string { + var cacheBreaker [9]byte + _, err := rand.Read(cacheBreaker[:]) + if err != nil { + panic(err) + } + b64 := base64.RawURLEncoding.EncodeToString + return "0" + b64(cacheBreaker[:]) + "/" + b64(data) +} + +// DecodePath decodes data from a path suffix as encoded by EncodePath. The path +// must have already been trimmed of any directory prefix (as might be present +// in, e.g., an HTTP request). That is, the first character of path should be +// the "0" message format indicator. +func DecodePath(path string) ([]byte, error) { + if len(path) < 1 { + return nil, fmt.Errorf("missing format indicator") + } + version := path[0] + rest := path[1:] + switch version { + case '0': + // Ignore everything else up to and including the final slash + // (there must be at least one slash). + i := strings.LastIndexByte(rest, '/') + if i == -1 { + return nil, fmt.Errorf("missing data") + } + return base64.RawURLEncoding.DecodeString(rest[i+1:]) + default: + return nil, fmt.Errorf("unknown format indicator %q", version) + } +} diff --git a/common/amp/path_test.go b/common/amp/path_test.go new file mode 100644 index 0000000..20e4ccf --- /dev/null +++ b/common/amp/path_test.go @@ -0,0 +1,54 @@ +package amp + +import ( + "testing" +) + +func TestDecodePath(t *testing.T) { + for _, test := range []struct { + path string + expectedData string + expectedErrStr string + }{ + {"", "", "missing format indicator"}, + {"0", "", "missing data"}, + {"0foobar", "", "missing data"}, + {"/0/YWJj", "", "unknown format indicator '/'"}, + + {"0/", "", ""}, + {"0foobar/", "", ""}, + {"0/YWJj", "abc", ""}, + {"0///YWJj", "abc", ""}, + {"0foobar/YWJj", "abc", ""}, + {"0/foobar/YWJj", "abc", ""}, + } { + data, err := DecodePath(test.path) + if test.expectedErrStr != "" { + if err == nil || err.Error() != test.expectedErrStr { + t.Errorf("%+q expected error %+q, got %+q", + test.path, test.expectedErrStr, err) + } + } else if err != nil { + t.Errorf("%+q expected no error, got %+q", test.path, err) + } else if string(data) != test.expectedData { + t.Errorf("%+q expected data %+q, got %+q", + test.path, test.expectedData, data) + } + } +} + +func TestPathRoundTrip(t *testing.T) { + for _, data := range []string{ + "", + "\x00", + "/", + "hello world", + } { + decoded, err := DecodePath(EncodePath([]byte(data))) + if err != nil { + t.Errorf("%+q roundtripped with error %v", data, err) + } else if string(decoded) != data { + t.Errorf("%+q roundtripped to %+q", data, decoded) + } + } +} diff --git a/common/bridgefingerprint/fingerprint.go b/common/bridgefingerprint/fingerprint.go new file mode 100644 index 0000000..1a89773 --- /dev/null +++ b/common/bridgefingerprint/fingerprint.go @@ -0,0 +1,30 @@ +package bridgefingerprint + +import ( + "encoding/hex" + "errors" +) + +type Fingerprint string + +var ErrBridgeFingerprintInvalid = errors.New("bridge fingerprint invalid") + +func FingerprintFromBytes(bytes []byte) (Fingerprint, error) { + n := len(bytes) + if n != 20 && n != 32 { + return Fingerprint(""), ErrBridgeFingerprintInvalid + } + return Fingerprint(bytes), nil +} + +func FingerprintFromHexString(hexString string) (Fingerprint, error) { + decoded, err := hex.DecodeString(hexString) + if err != nil { + return "", err + } + return FingerprintFromBytes(decoded) +} + +func (f Fingerprint) ToBytes() []byte { + return []byte(f) +} diff --git a/common/certs/certs.go b/common/certs/certs.go new file mode 100644 index 0000000..341f415 --- /dev/null +++ b/common/certs/certs.go @@ -0,0 +1,54 @@ +package certs + +import ( + "crypto/x509" + "log" +) + +// https://crt.sh/?id=9314791 +const LetsEncryptRootCert = `-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE-----` + +// GetRootCAs is a workaround for older versions of Android that do not trust +// Let's Encrypt's ISRG Root X1. This manually adds the ISRG root to the device's +// existing cert pool. +func GetRootCAs() *x509.CertPool { + rootCerts, err := x509.SystemCertPool() + if err != nil { + rootCerts = x509.NewCertPool() + } + if ok := rootCerts.AppendCertsFromPEM([]byte(LetsEncryptRootCert)); !ok { + log.Println("Error appending Let's Encrypt root certificate to cert poool") + return nil + } + return rootCerts +} diff --git a/common/constants/constants.go b/common/constants/constants.go new file mode 100644 index 0000000..ded1659 --- /dev/null +++ b/common/constants/constants.go @@ -0,0 +1,10 @@ +package constants + +const ( + // If the broker does not receive the proxy answer within this many seconds + // after the broker received the client offer, + // the broker will respond with an error to the client. + // + // this is calibrated to match the timeout of the CDNs we use for rendezvous + BrokerClientTimeout = 5 +) diff --git a/common/encapsulation/encapsulation.go b/common/encapsulation/encapsulation.go new file mode 100644 index 0000000..058f2fd --- /dev/null +++ b/common/encapsulation/encapsulation.go @@ -0,0 +1,209 @@ +// Package encapsulation implements a way of encoding variable-size chunks of +// data and padding into a byte stream. +// +// Each chunk of data or padding starts with a variable-size length prefix. One +// bit ("d") in the first byte of the prefix indicates whether the chunk +// represents data or padding (1=data, 0=padding). Another bit ("c" for +// "continuation") is the indicates whether there are more bytes in the length +// prefix. The remaining 6 bits ("x") encode part of the length value. +// +// dcxxxxxx +// +// If the continuation bit is set, then the next byte is also part of the length +// prefix. It lacks the "d" bit, has its own "c" bit, and 7 value-carrying bits +// ("y"). +// +// cyyyyyyy +// +// The length is decoded by concatenating value-carrying bits, from left to +// right, of all value-carrying bits, up to and including the first byte whose +// "c" bit is 0. Although in principle this encoding would allow for length +// prefixes of any size, length prefixes are arbitrarily limited to 3 bytes and +// any attempt to read or write a longer one is an error. These are therefore +// the only valid formats: +// +// 00xxxxxx xxxxxx₂ bytes of padding +// 10xxxxxx xxxxxx₂ bytes of data +// 01xxxxxx 0yyyyyyy xxxxxxyyyyyyy₂ bytes of padding +// 11xxxxxx 0yyyyyyy xxxxxxyyyyyyy₂ bytes of data +// 01xxxxxx 1yyyyyyy 0zzzzzzz xxxxxxyyyyyyyzzzzzzz₂ bytes of padding +// 11xxxxxx 1yyyyyyy 0zzzzzzz xxxxxxyyyyyyyzzzzzzz₂ bytes of data +// +// The maximum encodable length is 11111111111111111111₂ = 0xfffff = 1048575. +// There is no requirement to use a length prefix of minimum size; i.e. 00000100 +// and 01000000 00000100 are both valid encodings of the value 4. +// +// After the length prefix follow that many bytes of padding or data. There are +// no restrictions on the value of bytes comprising padding. +// +// The idea for this encapsulation is sketched here: +// https://github.com/net4people/bbs/issues/9#issuecomment-524095186 +package encapsulation + +import ( + "errors" + "io" +) + +// ErrTooLong is the error returned when an encoded length prefix is longer than +// 3 bytes, or when ReadData receives an input whose length is too large to +// encode in a 3-byte length prefix. +var ErrTooLong = errors.New("length prefix is too long") + +// ReadData the next available data chunk, skipping over any padding chunks that +// may come first, and copies the data into p. If p is shorter than the length +// of the data chunk, only the first len(p) bytes are copied into p, and the +// error return is io.ErrShortBuffer. The returned error value is nil if and +// only if a data chunk was present and was read in its entirety. The returned +// error is io.EOF only if r ended before the first byte of a length prefix. If +// r ended in the middle of a length prefix or data/padding, the returned error +// is io.ErrUnexpectedEOF. +func ReadData(r io.Reader, p []byte) (int, error) { + for { + var b [1]byte + _, err := r.Read(b[:]) + if err != nil { + // This is the only place we may return a real io.EOF. + return 0, err + } + isData := (b[0] & 0x80) != 0 + moreLength := (b[0] & 0x40) != 0 + n := int(b[0] & 0x3f) + for i := 0; moreLength; i++ { + if i >= 2 { + return 0, ErrTooLong + } + _, err := r.Read(b[:]) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + return 0, err + } + moreLength = (b[0] & 0x80) != 0 + n = (n << 7) | int(b[0]&0x7f) + } + if isData { + if len(p) > n { + p = p[:n] + } + numData, err := io.ReadFull(r, p) + if err == nil && numData < n { + // If the caller's buffer was too short, discard + // the rest of the data and return + // io.ErrShortBuffer. + _, err = io.CopyN(io.Discard, r, int64(n-numData)) + if err == nil { + err = io.ErrShortBuffer + } + } + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return numData, err + } else if n > 0 { + _, err := io.CopyN(io.Discard, r, int64(n)) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + if err != nil { + return 0, err + } + } + } +} + +// dataPrefixForLength returns a length prefix for the given length, with the +// "d" bit set to 1. +func dataPrefixForLength(n int) ([]byte, error) { + switch { + case (n>>0)&0x3f == (n >> 0): + return []byte{0x80 | byte((n>>0)&0x3f)}, nil + case (n>>7)&0x3f == (n >> 7): + return []byte{0xc0 | byte((n>>7)&0x3f), byte((n >> 0) & 0x7f)}, nil + case (n>>14)&0x3f == (n >> 14): + return []byte{0xc0 | byte((n>>14)&0x3f), 0x80 | byte((n>>7)&0x7f), byte((n >> 0) & 0x7f)}, nil + default: + return nil, ErrTooLong + } +} + +// WriteData encodes a data chunk into w. It returns the total number of bytes +// written; i.e., including the length prefix. The error is ErrTooLong if the +// length of data cannot fit into a length prefix. +func WriteData(w io.Writer, data []byte) (int, error) { + prefix, err := dataPrefixForLength(len(data)) + if err != nil { + return 0, err + } + total := 0 + n, err := w.Write(prefix) + total += n + if err != nil { + return total, err + } + n, err = w.Write(data) + total += n + return total, err +} + +var paddingBuffer [1024]byte + +// WritePadding encodes padding chunks, whose total size (including their own +// length prefixes) is n. Returns the total number of bytes written to w, which +// will be exactly n unless there was an error. The error cannot be ErrTooLong +// because this function will write multiple padding chunks if necessary to +// reach the requested size. Panics if n is negative. +func WritePadding(w io.Writer, n int) (int, error) { + if n < 0 { + panic("negative length") + } + total := 0 + for n > 0 { + p := len(paddingBuffer) + if p > n { + p = n + } + n -= p + var prefix []byte + switch { + case ((p-1)>>0)&0x3f == ((p - 1) >> 0): + p = p - 1 + prefix = []byte{byte((p >> 0) & 0x3f)} + case ((p-2)>>7)&0x3f == ((p - 2) >> 7): + p = p - 2 + prefix = []byte{0x40 | byte((p>>7)&0x3f), byte((p >> 0) & 0x7f)} + case ((p-3)>>14)&0x3f == ((p - 3) >> 14): + p = p - 3 + prefix = []byte{0x40 | byte((p>>14)&0x3f), 0x80 | byte((p>>7)&0x3f), byte((p >> 0) & 0x7f)} + } + nn, err := w.Write(prefix) + total += nn + if err != nil { + return total, err + } + nn, err = w.Write(paddingBuffer[:p]) + total += nn + if err != nil { + return total, err + } + } + return total, nil +} + +// MaxDataForSize returns the length of the longest slice that can pe passed to +// WriteData, whose total encoded size (including length prefix) is no larger +// than n. Call this to find out if a chunk of data will fit into a length +// budget. Panics if n == 0. +func MaxDataForSize(n int) int { + if n == 0 { + panic("zero length") + } + prefix, err := dataPrefixForLength(n) + if err == ErrTooLong { + return (1 << (6 + 7 + 7)) - 1 - 3 + } else if err != nil { + panic(err) + } + return n - len(prefix) +} diff --git a/common/encapsulation/encapsulation_test.go b/common/encapsulation/encapsulation_test.go new file mode 100644 index 0000000..f499bfb --- /dev/null +++ b/common/encapsulation/encapsulation_test.go @@ -0,0 +1,408 @@ +package encapsulation + +import ( + "bytes" + "io" + "math/rand" + "testing" +) + +// Return a byte slice with non-trivial contents. +func pseudorandomBuffer(n int) []byte { + source := rand.NewSource(0) + p := make([]byte, n) + for i := 0; i < len(p); i++ { + p[i] = byte(source.Int63() & 0xff) + } + return p +} + +func mustWriteData(w io.Writer, p []byte) int { + n, err := WriteData(w, p) + if err != nil { + panic(err) + } + return n +} + +func mustWritePadding(w io.Writer, n int) int { + n, err := WritePadding(w, n) + if err != nil { + panic(err) + } + return n +} + +// Test that ReadData(WriteData()) recovers the original data. +func TestRoundtrip(t *testing.T) { + // Test above and below interesting thresholds. + for _, i := range []int{ + 0x00, 0x01, + 0x3e, 0x3f, 0x40, 0x41, + 0xfe, 0xff, 0x100, 0x101, + 0x1ffe, 0x1fff, 0x2000, 0x2001, + 0xfffe, 0xffff, 0x10000, 0x10001, + 0xffffe, 0xfffff, + } { + original := pseudorandomBuffer(i) + var enc bytes.Buffer + n, err := WriteData(&enc, original) + if err != nil { + t.Fatalf("size %d, WriteData returned error %v", i, err) + } + if enc.Len() != n { + t.Fatalf("size %d, returned length was %d, written length was %d", + i, n, enc.Len()) + } + inverse := make([]byte, i) + n, err = ReadData(&enc, inverse) + if err != nil { + t.Fatalf("size %d, ReadData returned error %v", i, err) + } + if !bytes.Equal(inverse[:n], original) { + t.Fatalf("size %d, got <%x>, expected <%x>", i, inverse[:n], original) + } + } +} + +// Test that WritePadding writes exactly as much as requested. +func TestPaddingLength(t *testing.T) { + // Test above and below interesting thresholds. WritePadding also gets + // values above 0xfffff, the maximum value of a single length prefix. + for _, i := range []int{ + 0x00, 0x01, + 0x3f, 0x40, 0x41, 0x42, + 0xff, 0x100, 0x101, 0x102, + 0x2000, 0x2001, 0x2002, 0x2003, + 0x10000, 0x10001, 0x10002, 0x10003, + 0x100001, 0x100002, 0x100003, 0x100004, + } { + var enc bytes.Buffer + n, err := WritePadding(&enc, i) + if err != nil { + t.Fatalf("size %d, WritePadding returned error %v", i, err) + } + if n != i { + t.Fatalf("requested %d bytes, returned %d", i, n) + } + if enc.Len() != n { + t.Fatalf("requested %d bytes, wrote %d bytes", i, enc.Len()) + } + } +} + +// Test that ReadData skips over padding. +func TestSkipPadding(t *testing.T) { + var data = [][]byte{{}, {}, []byte("hello"), {}, []byte("world")} + var enc bytes.Buffer + mustWritePadding(&enc, 10) + mustWritePadding(&enc, 100) + mustWriteData(&enc, data[0]) + mustWriteData(&enc, data[1]) + mustWritePadding(&enc, 10) + mustWriteData(&enc, data[2]) + mustWriteData(&enc, data[3]) + mustWritePadding(&enc, 10) + mustWriteData(&enc, data[4]) + mustWritePadding(&enc, 10) + mustWritePadding(&enc, 10) + for i, expected := range data { + var actual [10]byte + n, err := ReadData(&enc, actual[:]) + if err != nil { + t.Fatalf("slice %d, got error %v, expected %v", i, err, nil) + } + if !bytes.Equal(actual[:n], expected) { + t.Fatalf("slice %d, got <%x>, expected <%x>", i, actual[:n], expected) + } + } + n, err := ReadData(&enc, nil) + if n != 0 || err != io.EOF { + t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, io.EOF) + } +} + +// Test that EOF before a length prefix returns io.EOF. +func TestEOF(t *testing.T) { + n, err := ReadData(bytes.NewReader(nil), nil) + if n != 0 || err != io.EOF { + t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, io.EOF) + } +} + +// Test that an EOF while reading a length prefix, or while reading the +// subsequent data/padding, returns io.ErrUnexpectedEOF. +func TestUnexpectedEOF(t *testing.T) { + for _, test := range [][]byte{ + {0x40}, // expecting a second length byte + {0xc0}, // expecting a second length byte + {0x41, 0x80}, // expecting a third length byte + {0xc1, 0x80}, // expecting a third length byte + {0x02}, // expecting 2 bytes of padding + {0x82}, // expecting 2 bytes of data + {0x02, 'X'}, // expecting 1 byte of padding + {0x82, 'X'}, // expecting 1 byte of data + {0x41, 0x00}, // expecting 128 bytes of padding + {0xc1, 0x00}, // expecting 128 bytes of data + {0x41, 0x00, 'X'}, // expecting 127 bytes of padding + {0xc1, 0x00, 'X'}, // expecting 127 bytes of data + {0x41, 0x80, 0x00}, // expecting 32768 bytes of padding + {0xc1, 0x80, 0x00}, // expecting 32768 bytes of data + {0x41, 0x80, 0x00, 'X'}, // expecting 32767 bytes of padding + {0xc1, 0x80, 0x00, 'X'}, // expecting 32767 bytes of data + } { + n, err := ReadData(bytes.NewReader(test), nil) + if n != 0 || err != io.ErrUnexpectedEOF { + t.Fatalf("<%x> got (%v, %v), expected (%v, %v)", test, n, err, 0, io.ErrUnexpectedEOF) + } + } +} + +// Test that length encodings that are longer than they could be are still +// interpreted. +func TestNonMinimalLengthEncoding(t *testing.T) { + for _, test := range []struct { + enc []byte + expected []byte + }{ + {[]byte{0x81, 'X'}, []byte("X")}, + {[]byte{0xc0, 0x01, 'X'}, []byte("X")}, + {[]byte{0xc0, 0x80, 0x01, 'X'}, []byte("X")}, + } { + var p [10]byte + n, err := ReadData(bytes.NewReader(test.enc), p[:]) + if err != nil { + t.Fatalf("<%x> got error %v, expected %v", test.enc, err, nil) + } + if !bytes.Equal(p[:n], test.expected) { + t.Fatalf("<%x> got <%x>, expected <%x>", test.enc, p[:n], test.expected) + } + } +} + +// Test that ReadData only reads up to 3 bytes of length prefix. +func TestReadLimits(t *testing.T) { + // Test the maximum length that's possible with 3 bytes of length + // prefix. + maxLength := (0x3f << 14) | (0x7f << 7) | 0x7f + data := bytes.Repeat([]byte{'X'}, maxLength) + prefix := []byte{0xff, 0xff, 0x7f} // encodes 0xfffff + var p [0xfffff]byte + n, err := ReadData(bytes.NewReader(append(prefix, data...)), p[:]) + if err != nil { + t.Fatalf("got error %v, expected %v", err, nil) + } + if !bytes.Equal(p[:n], data) { + t.Fatalf("got %d bytes unequal to %d bytes", len(p), len(data)) + } + // Test a 4-byte prefix. + prefix = []byte{0xc0, 0xc0, 0x80, 0x80} // encodes 0x100000 + data = bytes.Repeat([]byte{'X'}, maxLength+1) + n, err = ReadData(bytes.NewReader(append(prefix, data...)), nil) + if n != 0 || err != ErrTooLong { + t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, ErrTooLong) + } + // Test that 4 bytes don't work, even when they encode an integer that + // would fix in 3 bytes. + prefix = []byte{0xc0, 0x80, 0x80, 0x80} // encodes 0x0 + data = []byte{} + n, err = ReadData(bytes.NewReader(append(prefix, data...)), nil) + if n != 0 || err != ErrTooLong { + t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, ErrTooLong) + } + + // Do the same tests with padding lengths. + data = []byte("hello") + prefix = []byte{0x7f, 0xff, 0x7f} // encodes 0xfffff + padding := bytes.Repeat([]byte{'X'}, maxLength) + enc := bytes.NewBuffer(append(prefix, padding...)) + mustWriteData(enc, data) + n, err = ReadData(enc, p[:]) + if err != nil { + t.Fatalf("got error %v, expected %v", err, nil) + } + if !bytes.Equal(p[:n], data) { + t.Fatalf("got <%x>, expected <%x>", p[:n], data) + } + prefix = []byte{0x40, 0xc0, 0x80, 0x80} // encodes 0x100000 + padding = bytes.Repeat([]byte{'X'}, maxLength+1) + enc = bytes.NewBuffer(append(prefix, padding...)) + mustWriteData(enc, data) + n, err = ReadData(enc, nil) + if n != 0 || err != ErrTooLong { + t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, ErrTooLong) + } + prefix = []byte{0x40, 0x80, 0x80, 0x80} // encodes 0x0 + padding = []byte{} + enc = bytes.NewBuffer(append(prefix, padding...)) + mustWriteData(enc, data) + n, err = ReadData(enc, nil) + if n != 0 || err != ErrTooLong { + t.Fatalf("got (%v, %v), expected (%v, %v)", n, err, 0, ErrTooLong) + } +} + +// Test that WriteData and WritePadding only accept lengths that can be encoded +// in up to 3 bytes of length prefix. +func TestWriteLimits(t *testing.T) { + maxLength := (0x3f << 14) | (0x7f << 7) | 0x7f + var enc bytes.Buffer + n, err := WriteData(&enc, bytes.Repeat([]byte{'X'}, maxLength)) + if n != maxLength+3 || err != nil { + t.Fatalf("got (%d, %v), expected (%d, %v)", n, err, maxLength, nil) + } + enc.Reset() + n, err = WriteData(&enc, bytes.Repeat([]byte{'X'}, maxLength+1)) + if n != 0 || err != ErrTooLong { + t.Fatalf("got (%d, %v), expected (%d, %v)", n, err, 0, ErrTooLong) + } + + // Padding gets an extra 3 bytes because the prefix is counted as part + // of the length. + enc.Reset() + n, err = WritePadding(&enc, maxLength+3) + if n != maxLength+3 || err != nil { + t.Fatalf("got (%d, %v), expected (%d, %v)", n, err, maxLength+3, nil) + } + // Writing a too-long padding is okay because WritePadding will break it + // into smaller chunks. + enc.Reset() + n, err = WritePadding(&enc, maxLength+4) + if n != maxLength+4 || err != nil { + t.Fatalf("got (%d, %v), expected (%d, %v)", n, err, maxLength+4, nil) + } +} + +// Test that WritePadding panics when given a negative length. +func TestNegativeLength(t *testing.T) { + for _, n := range []int{-1, ^0} { + var enc bytes.Buffer + panicked, nn, err := testNegativeLengthSub(t, &enc, n) + if !panicked { + t.Fatalf("WritePadding(%d) returned (%d, %v) instead of panicking", n, nn, err) + } + } +} + +// Calls WritePadding(w, n) and augments the return value with a flag indicating +// whether the call panicked. +func testNegativeLengthSub(t *testing.T, w io.Writer, n int) (panicked bool, nn int, err error) { + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + t.Helper() + nn, err = WritePadding(w, n) + return false, n, err +} + +// Test that MaxDataForSize panics when given a 0 length. +func TestMaxDataForSizeZero(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Fatal("didn't panic") + } + }() + MaxDataForSize(0) +} + +// Test thresholds of available sizes for MaxDataForSize. +func TestMaxDataForSize(t *testing.T) { + for _, test := range []struct { + size int + expected int + }{ + {0x01, 0x00}, + {0x02, 0x01}, + {0x3f, 0x3e}, + {0x40, 0x3e}, + {0x41, 0x3f}, + {0x1fff, 0x1ffd}, + {0x2000, 0x1ffd}, + {0x2001, 0x1ffe}, + {0xfffff, 0xffffc}, + {0x100000, 0xffffc}, + {0x100001, 0xffffc}, + {0x7fffffff, 0xffffc}, + } { + max := MaxDataForSize(test.size) + if max != test.expected { + t.Fatalf("size %d, got %d, expected %d", test.size, max, test.expected) + } + } +} + +// Test that ReadData truncates the data when the destination slice is too +// short. +func TestReadDataTruncate(t *testing.T) { + var enc bytes.Buffer + mustWriteData(&enc, []byte("12345678")) + mustWriteData(&enc, []byte("abcdefgh")) + var p [4]byte + // First ReadData should return truncated "1234". + n, err := ReadData(&enc, p[:]) + if err != io.ErrShortBuffer { + t.Fatalf("got error %v, expected %v", err, io.ErrShortBuffer) + } + if !bytes.Equal(p[:n], []byte("1234")) { + t.Fatalf("got <%x>, expected <%x>", p[:n], []byte("1234")) + } + // Second ReadData should return truncated "abcd", not the rest of + // "12345678". + n, err = ReadData(&enc, p[:]) + if err != io.ErrShortBuffer { + t.Fatalf("got error %v, expected %v", err, io.ErrShortBuffer) + } + if !bytes.Equal(p[:n], []byte("abcd")) { + t.Fatalf("got <%x>, expected <%x>", p[:n], []byte("abcd")) + } + // Last ReadData should give io.EOF. + n, err = ReadData(&enc, p[:]) + if err != io.EOF { + t.Fatalf("got error %v, expected %v", err, io.EOF) + } +} + +// Test that even when the result is truncated, ReadData fills the provided +// buffer as much as possible (and not stop at the boundary of an internal Read, +// say). +func TestReadDataTruncateFull(t *testing.T) { + pr, pw := io.Pipe() + go func() { + // Send one data chunk that will be delivered across two Read + // calls. + pw.Write([]byte{0x8a, 'h', 'e', 'l', 'l', 'o'}) + pw.Write([]byte{'w', 'o', 'r', 'l', 'd'}) + }() + var p [8]byte + n, err := ReadData(pr, p[:]) + if err != io.ErrShortBuffer { + t.Fatalf("got error %v, expected %v", err, io.ErrShortBuffer) + } + // Should not stop after "hello". + if !bytes.Equal(p[:n], []byte("hellowor")) { + t.Fatalf("got <%x>, expected <%x>", p[:n], []byte("hellowor")) + } +} + +// Benchmark the ReadData function when reading from a stream of data packets of +// different sizes. +func BenchmarkReadData(b *testing.B) { + pr, pw := io.Pipe() + go func() { + for { + for length := 0; length < 128; length++ { + WriteData(pw, paddingBuffer[:length]) + } + } + }() + + var p [128]byte + for i := 0; i < b.N; i++ { + _, err := ReadData(pr, p[:]) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/common/event/bus.go b/common/event/bus.go new file mode 100644 index 0000000..7e45779 --- /dev/null +++ b/common/event/bus.go @@ -0,0 +1,39 @@ +package event + +import "sync" + +func NewSnowflakeEventDispatcher() SnowflakeEventDispatcher { + return &eventBus{lock: &sync.Mutex{}} +} + +type eventBus struct { + lock *sync.Mutex + listeners []SnowflakeEventReceiver +} + +func (e *eventBus) OnNewSnowflakeEvent(event SnowflakeEvent) { + e.lock.Lock() + defer e.lock.Unlock() + for _, v := range e.listeners { + v.OnNewSnowflakeEvent(event) + } +} + +func (e *eventBus) AddSnowflakeEventListener(receiver SnowflakeEventReceiver) { + e.lock.Lock() + defer e.lock.Unlock() + e.listeners = append(e.listeners, receiver) +} + +func (e *eventBus) RemoveSnowflakeEventListener(receiver SnowflakeEventReceiver) { + e.lock.Lock() + defer e.lock.Unlock() + var newListeners []SnowflakeEventReceiver + for _, v := range e.listeners { + if v != receiver { + newListeners = append(newListeners, v) + } + } + e.listeners = newListeners + return +} diff --git a/common/event/bus_test.go b/common/event/bus_test.go new file mode 100644 index 0000000..df40d3d --- /dev/null +++ b/common/event/bus_test.go @@ -0,0 +1,32 @@ +package event + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +type stubReceiver struct { + counter int +} + +func (s *stubReceiver) OnNewSnowflakeEvent(event SnowflakeEvent) { + s.counter++ +} + +func TestBusDispatch(t *testing.T) { + EventBus := NewSnowflakeEventDispatcher() + StubReceiverA := &stubReceiver{} + StubReceiverB := &stubReceiver{} + EventBus.AddSnowflakeEventListener(StubReceiverA) + EventBus.AddSnowflakeEventListener(StubReceiverB) + assert.Equal(t, 0, StubReceiverA.counter) + assert.Equal(t, 0, StubReceiverB.counter) + EventBus.OnNewSnowflakeEvent(EventOnSnowflakeConnected{}) + assert.Equal(t, 1, StubReceiverA.counter) + assert.Equal(t, 1, StubReceiverB.counter) + EventBus.RemoveSnowflakeEventListener(StubReceiverB) + EventBus.OnNewSnowflakeEvent(EventOnSnowflakeConnected{}) + assert.Equal(t, 2, StubReceiverA.counter) + assert.Equal(t, 1, StubReceiverB.counter) + +} diff --git a/common/event/interface.go b/common/event/interface.go new file mode 100644 index 0000000..dfb3f97 --- /dev/null +++ b/common/event/interface.go @@ -0,0 +1,141 @@ +package event + +import ( + "fmt" + "time" + + "github.com/pion/webrtc/v4" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog" +) + +type SnowflakeEvent interface { + IsSnowflakeEvent() + String() string +} + +type EventOnOfferCreated struct { + SnowflakeEvent + WebRTCLocalDescription *webrtc.SessionDescription + Error error +} + +func (e EventOnOfferCreated) String() string { + if e.Error != nil { + scrubbed := safelog.Scrub([]byte(e.Error.Error())) + return fmt.Sprintf("offer creation failure %s", scrubbed) + } + return "offer created" +} + +type EventOnBrokerRendezvous struct { + SnowflakeEvent + WebRTCRemoteDescription *webrtc.SessionDescription + Error error +} + +func (e EventOnBrokerRendezvous) String() string { + if e.Error != nil { + scrubbed := safelog.Scrub([]byte(e.Error.Error())) + return fmt.Sprintf("broker failure %s", scrubbed) + } + return "broker rendezvous peer received" +} + +type EventOnSnowflakeConnected struct { + SnowflakeEvent +} + +func (e EventOnSnowflakeConnected) String() string { + return "connected" +} + +type EventOnSnowflakeConnectionFailed struct { + SnowflakeEvent + Error error +} + +func (e EventOnSnowflakeConnectionFailed) String() string { + scrubbed := safelog.Scrub([]byte(e.Error.Error())) + return fmt.Sprintf("trying a new proxy: %s", scrubbed) +} + +type EventOnProxyStarting struct { + SnowflakeEvent +} + +func (e EventOnProxyStarting) String() string { + return "Proxy starting" +} + +type EventOnProxyClientConnected struct { + SnowflakeEvent +} + +func (e EventOnProxyClientConnected) String() string { + return fmt.Sprintf("client connected") +} + +// The connection with the client has now been closed, +// after getting successfully established. +type EventOnProxyConnectionOver struct { + SnowflakeEvent + Country string +} + +func (e EventOnProxyConnectionOver) String() string { + return fmt.Sprintf("Proxy connection closed") +} + +// Rendezvous with a client succeeded, +// but a data channel has not been created. +type EventOnProxyConnectionFailed struct { + SnowflakeEvent +} + +func (e EventOnProxyConnectionFailed) String() string { + return "Failed to connect to the client" +} + +type EventOnProxyStats struct { + SnowflakeEvent + // Completed successful connections. + ConnectionCount int + // Connections that failed to establish. + FailedConnectionCount uint + InboundBytes, OutboundBytes int64 + InboundUnit, OutboundUnit string + SummaryInterval time.Duration +} + +func (e EventOnProxyStats) String() string { + statString := fmt.Sprintf("In the last %v, there were %v completed successful connections. Traffic Relayed ↓ %v %v (%.2f %v%s), ↑ %v %v (%.2f %v%s).", + e.SummaryInterval.String(), e.ConnectionCount, + e.InboundBytes, e.InboundUnit, float64(e.InboundBytes)/e.SummaryInterval.Seconds(), e.InboundUnit, "/s", + e.OutboundBytes, e.OutboundUnit, float64(e.OutboundBytes)/e.SummaryInterval.Seconds(), e.OutboundUnit, "/s") + return statString +} + +type EventOnCurrentNATTypeDetermined struct { + SnowflakeEvent + CurNATType string +} + +func (e EventOnCurrentNATTypeDetermined) String() string { + return fmt.Sprintf("NAT type: %v", e.CurNATType) +} + +type SnowflakeEventReceiver interface { + // OnNewSnowflakeEvent notify receiver about a new event + // This method MUST not block + OnNewSnowflakeEvent(event SnowflakeEvent) +} + +type SnowflakeEventDispatcher interface { + SnowflakeEventReceiver + // AddSnowflakeEventListener allow receiver(s) to receive event notification + // when OnNewSnowflakeEvent is called on the dispatcher. + // Every event listener added will be called when an event is received by the dispatcher. + // The order each listener is called is undefined. + AddSnowflakeEventListener(receiver SnowflakeEventReceiver) + RemoveSnowflakeEventListener(receiver SnowflakeEventReceiver) +} diff --git a/common/messages/client.go b/common/messages/client.go new file mode 100644 index 0000000..da6359e --- /dev/null +++ b/common/messages/client.go @@ -0,0 +1,151 @@ +//Package for communication with the snowflake broker + +// import "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages" +package messages + +import ( + "bytes" + "encoding/json" + "fmt" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/bridgefingerprint" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/nat" +) + +const ClientVersion = "1.0" + +/* Client--Broker protocol v1.x specification: + +All messages contain the version number +followed by a new line and then the message body + := \n + := . + := | + +There are two different types of body messages, +each encoded in JSON format + +== ClientPollRequest == + := +{ + offer: + [nat: (unknown|restricted|unrestricted)] + [fingerprint: ] +} + +The NAT field is optional, and if it is missing a +value of "unknown" will be assumed. The fingerprint +is also optional and, if absent, will be assigned the +fingerprint of the default bridge. + +== ClientPollResponse == + := +{ + [answer: ] + [error: ] +} + +If the broker succeeded in matching the client with a proxy, +the answer field MUST contain a valid SDP answer, and the +error field MUST be empty. If the answer field is empty, the +error field MUST contain a string explaining with a reason +for the error. + +*/ + +// The bridge fingerprint to assume, for client poll requests that do not +// specify a fingerprint. Before #28651, there was only one bridge with one +// fingerprint, which all clients expected to be connected to implicitly. +// If a client is old enough that it does not specify a fingerprint, this is +// the fingerprint it expects. Clients that do set a fingerprint in the +// SOCKS params will also be assumed to want to connect to the default bridge. +const defaultBridgeFingerprint = "2B280B23E1107BB62ABFC40DDCC8824814F80A72" + +type ClientPollRequest struct { + Offer string `json:"offer"` + NAT string `json:"nat"` + Fingerprint string `json:"fingerprint"` +} + +// Encodes a poll message from a snowflake client +func (req *ClientPollRequest) EncodeClientPollRequest() ([]byte, error) { + if req.Fingerprint == "" { + req.Fingerprint = defaultBridgeFingerprint + } + body, err := json.Marshal(req) + if err != nil { + return nil, err + } + return append([]byte(ClientVersion+"\n"), body...), nil +} + +// Decodes a poll message from a snowflake client +func DecodeClientPollRequest(data []byte) (*ClientPollRequest, error) { + parts := bytes.SplitN(data, []byte("\n"), 2) + + if len(parts) < 2 { + // no version number found + return nil, fmt.Errorf("unsupported message version") + } + + var message ClientPollRequest + + if string(parts[0]) != ClientVersion { + return nil, fmt.Errorf("unsupported message version") + } + + err := json.Unmarshal(parts[1], &message) + if err != nil { + return nil, err + } + + if message.Offer == "" { + return nil, fmt.Errorf("no supplied offer") + } + + if message.Fingerprint == "" { + message.Fingerprint = defaultBridgeFingerprint + } + + if _, err := bridgefingerprint.FingerprintFromHexString(message.Fingerprint); err != nil { + return nil, fmt.Errorf("cannot decode fingerprint") + } + + switch message.NAT { + case "": + message.NAT = nat.NATUnknown + case nat.NATUnknown: + case nat.NATRestricted: + case nat.NATUnrestricted: + default: + return nil, fmt.Errorf("invalid NAT type") + } + + return &message, nil +} + +type ClientPollResponse struct { + Answer string `json:"answer,omitempty"` + Error string `json:"error,omitempty"` +} + +// Encodes a poll response for a snowflake client +func (resp *ClientPollResponse) EncodePollResponse() ([]byte, error) { + return json.Marshal(resp) +} + +// Decodes a poll response for a snowflake client +// If the Error field is empty, the Answer should be non-empty +func DecodeClientPollResponse(data []byte) (*ClientPollResponse, error) { + var message ClientPollResponse + + err := json.Unmarshal(data, &message) + if err != nil { + return nil, err + } + if message.Error == "" && message.Answer == "" { + return nil, fmt.Errorf("received empty broker response") + } + + return &message, nil +} diff --git a/common/messages/ipc.go b/common/messages/ipc.go new file mode 100644 index 0000000..91eccdb --- /dev/null +++ b/common/messages/ipc.go @@ -0,0 +1,30 @@ +package messages + +import ( + "context" + "errors" +) + +type RendezvousMethod string + +const ( + RendezvousHttp RendezvousMethod = "http" + RendezvousAmpCache RendezvousMethod = "ampcache" + RendezvousSqs RendezvousMethod = "sqs" +) + +type Arg struct { + Body []byte + RemoteAddr string + RendezvousMethod RendezvousMethod + Context context.Context +} + +var ( + ErrBadRequest = errors.New("bad request") + ErrInternal = errors.New("internal error") + ErrExtraInfo = errors.New("client sent extra info") + + StrTimedOut = "timed out waiting for answer!" + StrNoProxies = "no snowflake proxies currently available" +) diff --git a/common/messages/messages_test.go b/common/messages/messages_test.go new file mode 100644 index 0000000..c70e6f3 --- /dev/null +++ b/common/messages/messages_test.go @@ -0,0 +1,472 @@ +package messages + +import ( + "encoding/json" + "fmt" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestDecodeProxyPollRequest(t *testing.T) { + Convey("Context", t, func() { + for _, test := range []struct { + sid string + proxyType string + natType string + clients int + data string + err error + + acceptedRelayPattern string + }{ + { + //Version 1.0 proxy message + sid: "ymbcCMto7KHNGYlp", + proxyType: "unknown", + natType: "unknown", + clients: 0, + data: `{"Sid":"ymbcCMto7KHNGYlp","Version":"1.0"}`, + err: nil, + }, + { + //Version 1.1 proxy message + sid: "ymbcCMto7KHNGYlp", + proxyType: "standalone", + natType: "unknown", + clients: 0, + data: `{"Sid":"ymbcCMto7KHNGYlp","Version":"1.1","Type":"standalone"}`, + err: nil, + }, + { + //Version 1.2 proxy message + sid: "ymbcCMto7KHNGYlp", + proxyType: "standalone", + natType: "restricted", + clients: 0, + data: `{"Sid":"ymbcCMto7KHNGYlp","Version":"1.2","Type":"standalone", "NAT":"restricted"}`, + err: nil, + }, + { + //Version 1.2 proxy message with clients + sid: "ymbcCMto7KHNGYlp", + proxyType: "standalone", + natType: "restricted", + clients: 24, + data: `{"Sid":"ymbcCMto7KHNGYlp","Version":"1.2","Type":"standalone", "NAT":"restricted","Clients":24}`, + err: nil, + }, + { + //Version 1.3 proxy message with clients and proxyURL + sid: "ymbcCMto7KHNGYlp", + proxyType: "standalone", + natType: "restricted", + clients: 24, + acceptedRelayPattern: "snowfalke.torproject.org", + data: `{"Sid":"ymbcCMto7KHNGYlp","Version":"1.2","Type":"standalone", "NAT":"restricted","Clients":24, "AcceptedRelayPattern":"snowfalke.torproject.org"}`, + err: nil, + }, + { + //Version 0.X proxy message: + sid: "", + proxyType: "", + natType: "", + clients: 0, + data: "", + err: &json.SyntaxError{}, + }, + { + sid: "", + proxyType: "", + natType: "", + clients: 0, + data: `{"Sid":"ymbcCMto7KHNGYlp"}`, + err: fmt.Errorf(""), + }, + { + sid: "", + proxyType: "", + natType: "", + clients: 0, + data: "{}", + err: fmt.Errorf(""), + }, + { + sid: "", + proxyType: "", + natType: "", + clients: 0, + data: `{"Version":"1.0"}`, + err: fmt.Errorf(""), + }, + { + sid: "", + proxyType: "", + natType: "", + clients: 0, + data: `{"Version":"2.0"}`, + err: fmt.Errorf(""), + }, + } { + sid, proxyType, natType, clients, relayPattern, _, err := DecodeProxyPollRequestWithRelayPrefix([]byte(test.data)) + So(sid, ShouldResemble, test.sid) + So(proxyType, ShouldResemble, test.proxyType) + So(natType, ShouldResemble, test.natType) + So(clients, ShouldEqual, test.clients) + So(relayPattern, ShouldResemble, test.acceptedRelayPattern) + So(err, ShouldHaveSameTypeAs, test.err) + } + + }) +} + +func TestEncodeProxyPollRequests(t *testing.T) { + Convey("Context", t, func() { + b, err := EncodeProxyPollRequest("ymbcCMto7KHNGYlp", "standalone", "unknown", 16) + So(err, ShouldBeNil) + sid, proxyType, natType, clients, err := DecodeProxyPollRequest(b) + So(sid, ShouldEqual, "ymbcCMto7KHNGYlp") + So(proxyType, ShouldEqual, "standalone") + So(natType, ShouldEqual, "unknown") + So(clients, ShouldEqual, 16) + So(err, ShouldBeNil) + }) +} + +func TestDecodeProxyPollResponse(t *testing.T) { + Convey("Context", t, func() { + for _, test := range []struct { + offer string + data string + relayURL string + err error + }{ + { + offer: "fake offer", + data: `{"Status":"client match","Offer":"fake offer","NAT":"unknown"}`, + err: nil, + }, + { + offer: "fake offer", + data: `{"Status":"client match","Offer":"fake offer","NAT":"unknown", "RelayURL":"wss://snowflake.torproject.org/proxy"}`, + relayURL: "wss://snowflake.torproject.org/proxy", + err: nil, + }, + { + offer: "", + data: `{"Status":"no match"}`, + err: nil, + }, + { + offer: "", + data: `{"Status":"client match"}`, + err: fmt.Errorf("no supplied offer"), + }, + { + offer: "", + data: `{"Test":"test"}`, + err: fmt.Errorf(""), + }, + } { + offer, _, relayURL, err := DecodePollResponseWithRelayURL([]byte(test.data)) + So(err, ShouldHaveSameTypeAs, test.err) + So(offer, ShouldResemble, test.offer) + So(relayURL, ShouldResemble, test.relayURL) + } + + }) +} + +func TestEncodeProxyPollResponse(t *testing.T) { + Convey("Context", t, func() { + b, err := EncodePollResponse("fake offer", true, "restricted") + So(err, ShouldBeNil) + offer, natType, err := DecodePollResponse(b) + So(offer, ShouldEqual, "fake offer") + So(natType, ShouldEqual, "restricted") + So(err, ShouldBeNil) + + b, err = EncodePollResponse("", false, "unknown") + So(err, ShouldBeNil) + offer, natType, err = DecodePollResponse(b) + So(offer, ShouldEqual, "") + So(natType, ShouldEqual, "unknown") + So(err, ShouldBeNil) + }) +} + +func TestEncodeProxyPollResponseWithProxyURL(t *testing.T) { + Convey("Context", t, func() { + b, err := EncodePollResponseWithRelayURL("fake offer", true, "restricted", "wss://test/", "") + So(err, ShouldBeNil) + offer, natType, err := DecodePollResponse(b) + So(err, ShouldNotBeNil) + + offer, natType, relay, err := DecodePollResponseWithRelayURL(b) + So(offer, ShouldEqual, "fake offer") + So(natType, ShouldEqual, "restricted") + So(relay, ShouldEqual, "wss://test/") + So(err, ShouldBeNil) + + b, err = EncodePollResponse("", false, "unknown") + So(err, ShouldBeNil) + offer, natType, relay, err = DecodePollResponseWithRelayURL(b) + So(offer, ShouldEqual, "") + So(natType, ShouldEqual, "unknown") + So(err, ShouldBeNil) + + b, err = EncodePollResponseWithRelayURL("fake offer", false, "restricted", "wss://test/", "test error reason") + So(err, ShouldBeNil) + offer, natType, relay, err = DecodePollResponseWithRelayURL(b) + So(err, ShouldNotBeNil) + So(err.Error(), ShouldContainSubstring, "test error reason") + }) +} +func TestDecodeProxyAnswerRequest(t *testing.T) { + Convey("Context", t, func() { + for _, test := range []struct { + answer string + sid string + data string + err error + }{ + { + "test", + "test", + `{"Version":"1.0","Sid":"test","Answer":"test"}`, + nil, + }, + { + "", + "", + `{"type":"offer","sdp":"v=0\r\no=- 4358805017720277108 2 IN IP4 [scrubbed]\r\ns=-\r\nt=0 0\r\na=group:BUNDLE data\r\na=msid-semantic: WMS\r\nm=application 56688 DTLS/SCTP 5000\r\nc=IN IP4 [scrubbed]\r\na=candidate:3769337065 1 udp 2122260223 [scrubbed] 56688 typ host generation 0 network-id 1 network-cost 50\r\na=candidate:2921887769 1 tcp 1518280447 [scrubbed] 35441 typ host tcptype passive generation 0 network-id 1 network-cost 50\r\na=ice-ufrag:aMAZ\r\na=ice-pwd:jcHb08Jjgrazp2dzjdrvPPvV\r\na=ice-options:trickle\r\na=fingerprint:sha-256 C8:88:EE:B9:E7:02:2E:21:37:ED:7A:D1:EB:2B:A3:15:A2:3B:5B:1C:3D:D4:D5:1F:06:CF:52:40:03:F8:DD:66\r\na=setup:actpass\r\na=mid:data\r\na=sctpmap:5000 webrtc-datachannel 1024\r\n"}`, + fmt.Errorf(""), + }, + { + "", + "", + `{"Version":"1.0","Answer":"test"}`, + fmt.Errorf(""), + }, + { + "", + "", + `{"Version":"1.0","Sid":"test"}`, + fmt.Errorf(""), + }, + } { + answer, sid, err := DecodeAnswerRequest([]byte(test.data)) + So(answer, ShouldResemble, test.answer) + So(sid, ShouldResemble, test.sid) + So(err, ShouldHaveSameTypeAs, test.err) + } + + }) +} + +func TestEncodeProxyAnswerRequest(t *testing.T) { + Convey("Context", t, func() { + b, err := EncodeAnswerRequest("test answer", "test sid") + So(err, ShouldBeNil) + answer, sid, err := DecodeAnswerRequest(b) + So(answer, ShouldEqual, "test answer") + So(sid, ShouldEqual, "test sid") + So(err, ShouldBeNil) + }) +} + +func TestDecodeProxyAnswerResponse(t *testing.T) { + Convey("Context", t, func() { + for _, test := range []struct { + success bool + data string + err error + }{ + { + true, + `{"Status":"success"}`, + nil, + }, + { + false, + `{"Status":"client gone"}`, + nil, + }, + { + false, + `{"Test":"test"}`, + fmt.Errorf(""), + }, + } { + success, err := DecodeAnswerResponse([]byte(test.data)) + So(success, ShouldResemble, test.success) + So(err, ShouldHaveSameTypeAs, test.err) + } + + }) +} + +func TestEncodeProxyAnswerResponse(t *testing.T) { + Convey("Context", t, func() { + b, err := EncodeAnswerResponse(true) + So(err, ShouldBeNil) + success, err := DecodeAnswerResponse(b) + So(success, ShouldEqual, true) + So(err, ShouldBeNil) + + b, err = EncodeAnswerResponse(false) + So(err, ShouldBeNil) + success, err = DecodeAnswerResponse(b) + So(success, ShouldEqual, false) + So(err, ShouldBeNil) + }) +} + +func TestDecodeClientPollRequest(t *testing.T) { + Convey("Context", t, func() { + for _, test := range []struct { + natType string + offer string + data string + err error + }{ + { + //version 1.0 client message + "unknown", + "fake", + `1.0 +{"nat":"unknown","offer":"fake"}`, + nil, + }, + { + //version 1.0 client message + "unknown", + "fake", + `1.0 +{"offer":"fake"}`, + nil, + }, + { + //unknown version + "", + "", + `{"version":"2.0"}`, + fmt.Errorf(""), + }, + { + //no offer + "", + "", + `1.0 +{"nat":"unknown"}`, + fmt.Errorf(""), + }, + } { + req, err := DecodeClientPollRequest([]byte(test.data)) + So(err, ShouldHaveSameTypeAs, test.err) + if test.err == nil { + So(req.NAT, ShouldResemble, test.natType) + So(req.Offer, ShouldResemble, test.offer) + } + } + + }) +} + +func TestEncodeClientPollRequests(t *testing.T) { + Convey("Context", t, func() { + for i, test := range []struct { + natType string + offer string + fingerprint string + err error + }{ + { + "unknown", + "fake", + "", + nil, + }, + { + "unknown", + "fake", + defaultBridgeFingerprint, + nil, + }, + { + "unknown", + "fake", + "123123", + fmt.Errorf(""), + }, + } { + req1 := &ClientPollRequest{ + NAT: test.natType, + Offer: test.offer, + Fingerprint: test.fingerprint, + } + b, err := req1.EncodeClientPollRequest() + So(err, ShouldBeNil) + req2, err := DecodeClientPollRequest(b) + So(err, ShouldHaveSameTypeAs, test.err) + if test.err == nil { + So(req2.Offer, ShouldEqual, req1.Offer) + So(req2.NAT, ShouldEqual, req1.NAT) + fingerprint := test.fingerprint + if i == 0 { + fingerprint = defaultBridgeFingerprint + } + So(req2.Fingerprint, ShouldEqual, fingerprint) + } + } + }) +} + +func TestDecodeClientPollResponse(t *testing.T) { + Convey("Context", t, func() { + for _, test := range []struct { + answer string + msg string + data string + }{ + { + "fake answer", + "", + `{"answer":"fake answer"}`, + }, + { + "", + "no snowflakes", + `{"error":"no snowflakes"}`, + }, + } { + resp, err := DecodeClientPollResponse([]byte(test.data)) + So(err, ShouldBeNil) + So(resp.Answer, ShouldResemble, test.answer) + So(resp.Error, ShouldResemble, test.msg) + } + + }) +} + +func TestEncodeClientPollResponse(t *testing.T) { + Convey("Context", t, func() { + resp1 := &ClientPollResponse{ + Answer: "fake answer", + } + b, err := resp1.EncodePollResponse() + So(err, ShouldBeNil) + resp2, err := DecodeClientPollResponse(b) + So(err, ShouldBeNil) + So(resp1, ShouldResemble, resp2) + + resp1 = &ClientPollResponse{ + Error: "failed", + } + b, err = resp1.EncodePollResponse() + So(err, ShouldBeNil) + resp2, err = DecodeClientPollResponse(b) + So(err, ShouldBeNil) + So(resp1, ShouldResemble, resp2) + }) +} diff --git a/common/messages/proxy.go b/common/messages/proxy.go new file mode 100644 index 0000000..6fe02be --- /dev/null +++ b/common/messages/proxy.go @@ -0,0 +1,315 @@ +//Package for communication with the snowflake broker + +// import "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages" +package messages + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/nat" +) + +const ( + version = "1.3" + ProxyUnknown = "unknown" +) + +var KnownProxyTypes = map[string]bool{ + "standalone": true, + "webext": true, + "badge": true, + "iptproxy": true, +} + +/* Version 1.3 specification: + +== ProxyPollRequest == +{ + Sid: [generated session id of proxy], + Version: 1.3, + Type: ["badge"|"webext"|"standalone"], + NAT: ["unknown"|"restricted"|"unrestricted"], + Clients: [number of current clients, rounded down to multiples of 8], + AcceptedRelayPattern: [a pattern representing accepted set of relay domains] +} + +== ProxyPollResponse == +1) If a client is matched: +HTTP 200 OK +{ + Status: "client match", + { + type: offer, + sdp: [WebRTC SDP] + }, + NAT: ["unknown"|"restricted"|"unrestricted"], + RelayURL: [the WebSocket URL proxy should connect to relay Snowflake traffic] +} + +2) If a client is not matched: +HTTP 200 OK + +{ + Status: "no match" +} + +3) If the request is malformed: +HTTP 400 BadRequest + +== ProxyAnswerRequest == +{ + Sid: [generated session id of proxy], + Version: 1.3, + Answer: + { + type: answer, + sdp: [WebRTC SDP] + } +} + +== ProxyAnswerResponse == +1) If the client retrieved the answer: +HTTP 200 OK + +{ + Status: "success" +} + +2) If the client left: +HTTP 200 OK + +{ + Status: "client gone" +} + +3) If the request is malformed: +HTTP 400 BadRequest + +*/ + +type ProxyPollRequest struct { + Sid string + Version string + Type string + NAT string + Clients int + + AcceptedRelayPattern *string +} + +func EncodeProxyPollRequest(sid string, proxyType string, natType string, clients int) ([]byte, error) { + return EncodeProxyPollRequestWithRelayPrefix(sid, proxyType, natType, clients, "") +} + +func EncodeProxyPollRequestWithRelayPrefix(sid string, proxyType string, natType string, clients int, relayPattern string) ([]byte, error) { + return json.Marshal(ProxyPollRequest{ + Sid: sid, + Version: version, + Type: proxyType, + NAT: natType, + Clients: clients, + AcceptedRelayPattern: &relayPattern, + }) +} + +func DecodeProxyPollRequest(data []byte) (sid string, proxyType string, natType string, clients int, err error) { + var relayPrefix string + sid, proxyType, natType, clients, relayPrefix, _, err = DecodeProxyPollRequestWithRelayPrefix(data) + if relayPrefix != "" { + return "", "", "", 0, ErrExtraInfo + } + return +} + +// Decodes a poll message from a snowflake proxy and returns the +// sid, proxy type, nat type and clients of the proxy on success +// and an error if it failed +func DecodeProxyPollRequestWithRelayPrefix(data []byte) ( + sid string, proxyType string, natType string, clients int, relayPrefix string, relayPrefixAware bool, err error) { + var message ProxyPollRequest + + err = json.Unmarshal(data, &message) + if err != nil { + return + } + + majorVersion := strings.Split(message.Version, ".")[0] + if majorVersion != "1" { + err = fmt.Errorf("using unknown version") + return + } + + // Version 1.x requires an Sid + if message.Sid == "" { + err = fmt.Errorf("no supplied session id") + return + } + + switch message.NAT { + case "": + message.NAT = nat.NATUnknown + case nat.NATUnknown: + case nat.NATRestricted: + case nat.NATUnrestricted: + default: + err = fmt.Errorf("invalid NAT type") + return + } + + // we don't reject polls with an unknown proxy type because we encourage + // projects that embed proxy code to include their own type + if !KnownProxyTypes[message.Type] { + message.Type = ProxyUnknown + } + var acceptedRelayPattern = "" + if message.AcceptedRelayPattern != nil { + acceptedRelayPattern = *message.AcceptedRelayPattern + } + return message.Sid, message.Type, message.NAT, message.Clients, + acceptedRelayPattern, message.AcceptedRelayPattern != nil, nil +} + +type ProxyPollResponse struct { + Status string + Offer string + NAT string + + RelayURL string +} + +func EncodePollResponse(offer string, success bool, natType string) ([]byte, error) { + return EncodePollResponseWithRelayURL(offer, success, natType, "", "no match") +} + +func EncodePollResponseWithRelayURL(offer string, success bool, natType, relayURL, failReason string) ([]byte, error) { + if success { + return json.Marshal(ProxyPollResponse{ + Status: "client match", + Offer: offer, + NAT: natType, + RelayURL: relayURL, + }) + + } + return json.Marshal(ProxyPollResponse{ + Status: failReason, + }) +} +func DecodePollResponse(data []byte) (offer string, natType string, err error) { + offer, natType, relayURL, err := DecodePollResponseWithRelayURL(data) + if relayURL != "" { + return "", "", ErrExtraInfo + } + return offer, natType, err +} + +// Decodes a poll response from the broker and returns an offer and the client's NAT type +// If there is a client match, the returned offer string will be non-empty +func DecodePollResponseWithRelayURL(data []byte) ( + offer string, + natType string, + relayURL string, + err_ error, +) { + var message ProxyPollResponse + + err := json.Unmarshal(data, &message) + if err != nil { + return "", "", "", err + } + if message.Status == "" { + return "", "", "", fmt.Errorf("received invalid data") + } + + err = nil + if message.Status == "client match" { + if message.Offer == "" { + return "", "", "", fmt.Errorf("no supplied offer") + } + } else { + message.Offer = "" + if message.Status != "no match" { + err = errors.New(message.Status) + } + } + + natType = message.NAT + if natType == "" { + natType = "unknown" + } + + return message.Offer, natType, message.RelayURL, err +} + +type ProxyAnswerRequest struct { + Version string + Sid string + Answer string +} + +func EncodeAnswerRequest(answer string, sid string) ([]byte, error) { + return json.Marshal(ProxyAnswerRequest{ + Version: version, + Sid: sid, + Answer: answer, + }) +} + +// Returns the sdp answer and proxy sid +func DecodeAnswerRequest(data []byte) (answer string, sid string, err error) { + var message ProxyAnswerRequest + + err = json.Unmarshal(data, &message) + if err != nil { + return "", "", err + } + + majorVersion := strings.Split(message.Version, ".")[0] + if majorVersion != "1" { + return "", "", fmt.Errorf("using unknown version") + } + + if message.Sid == "" || message.Answer == "" { + return "", "", fmt.Errorf("no supplied sid or answer") + } + + return message.Answer, message.Sid, nil +} + +type ProxyAnswerResponse struct { + Status string +} + +func EncodeAnswerResponse(success bool) ([]byte, error) { + if success { + return json.Marshal(ProxyAnswerResponse{ + Status: "success", + }) + + } + return json.Marshal(ProxyAnswerResponse{ + Status: "client gone", + }) +} + +func DecodeAnswerResponse(data []byte) (bool, error) { + var message ProxyAnswerResponse + var success bool + + err := json.Unmarshal(data, &message) + if err != nil { + return success, err + } + if message.Status == "" { + return success, fmt.Errorf("received invalid data") + } + + if message.Status == "success" { + success = true + } + + return success, nil +} diff --git a/common/namematcher/matcher.go b/common/namematcher/matcher.go new file mode 100644 index 0000000..afcdbff --- /dev/null +++ b/common/namematcher/matcher.go @@ -0,0 +1,31 @@ +package namematcher + +import "strings" + +func NewNameMatcher(rule string) NameMatcher { + rule = strings.TrimSuffix(rule, "$") + return NameMatcher{suffix: strings.TrimPrefix(rule, "^"), exact: strings.HasPrefix(rule, "^")} +} + +func IsValidRule(rule string) bool { + return strings.HasSuffix(rule, "$") +} + +type NameMatcher struct { + exact bool + suffix string +} + +func (m *NameMatcher) IsSupersetOf(matcher NameMatcher) bool { + if m.exact { + return matcher.exact && m.suffix == matcher.suffix + } + return strings.HasSuffix(matcher.suffix, m.suffix) +} + +func (m *NameMatcher) IsMember(s string) bool { + if m.exact { + return s == m.suffix + } + return strings.HasSuffix(s, m.suffix) +} diff --git a/common/namematcher/matcher_test.go b/common/namematcher/matcher_test.go new file mode 100644 index 0000000..08d089c --- /dev/null +++ b/common/namematcher/matcher_test.go @@ -0,0 +1,55 @@ +package namematcher + +import "testing" + +import . "github.com/smartystreets/goconvey/convey" + +func TestMatchMember(t *testing.T) { + testingVector := []struct { + matcher string + target string + expects bool + }{ + {matcher: "", target: "", expects: true}, + {matcher: "^snowflake.torproject.net$", target: "snowflake.torproject.net", expects: true}, + {matcher: "^snowflake.torproject.net$", target: "faketorproject.net", expects: false}, + {matcher: "snowflake.torproject.net$", target: "faketorproject.net", expects: false}, + {matcher: "snowflake.torproject.net$", target: "snowflake.torproject.net", expects: true}, + {matcher: "snowflake.torproject.net$", target: "imaginary-01-snowflake.torproject.net", expects: true}, + {matcher: "snowflake.torproject.net$", target: "imaginary-aaa-snowflake.torproject.net", expects: true}, + {matcher: "snowflake.torproject.net$", target: "imaginary-aaa-snowflake.faketorproject.net", expects: false}, + } + for _, v := range testingVector { + t.Run(v.matcher+"<>"+v.target, func(t *testing.T) { + Convey("test", t, func() { + matcher := NewNameMatcher(v.matcher) + So(matcher.IsMember(v.target), ShouldEqual, v.expects) + }) + }) + } +} + +func TestMatchSubset(t *testing.T) { + testingVector := []struct { + matcher string + target string + expects bool + }{ + {matcher: "", target: "", expects: true}, + {matcher: "^snowflake.torproject.net$", target: "^snowflake.torproject.net$", expects: true}, + {matcher: "snowflake.torproject.net$", target: "^snowflake.torproject.net$", expects: true}, + {matcher: "snowflake.torproject.net$", target: "snowflake.torproject.net$", expects: true}, + {matcher: "snowflake.torproject.net$", target: "testing-snowflake.torproject.net$", expects: true}, + {matcher: "snowflake.torproject.net$", target: "^testing-snowflake.torproject.net$", expects: true}, + {matcher: "snowflake.torproject.net$", target: "", expects: false}, + } + for _, v := range testingVector { + t.Run(v.matcher+"<>"+v.target, func(t *testing.T) { + Convey("test", t, func() { + matcher := NewNameMatcher(v.matcher) + target := NewNameMatcher(v.target) + So(matcher.IsSupersetOf(target), ShouldEqual, v.expects) + }) + }) + } +} diff --git a/common/nat/nat.go b/common/nat/nat.go new file mode 100644 index 0000000..7e6da00 --- /dev/null +++ b/common/nat/nat.go @@ -0,0 +1,256 @@ +/* +The majority of this code is taken from a utility I wrote for pion/stun +https://github.com/pion/stun/blob/master/cmd/stun-nat-behaviour/main.go + +Copyright 2018 Pion LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +package nat + +import ( + "errors" + "fmt" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/proxy" + "log" + "net" + "net/url" + "time" + + "github.com/pion/stun/v3" +) + +var ErrTimedOut = errors.New("timed out waiting for response") + +const ( + NATUnknown = "unknown" + NATRestricted = "restricted" + NATUnrestricted = "unrestricted" +) + +// Deprecated: Use CheckIfRestrictedNATWithProxy Instead. +func CheckIfRestrictedNAT(server string) (bool, error) { + return CheckIfRestrictedNATWithProxy(server, nil) +} + +// CheckIfRestrictedNATWithProxy checks the NAT mapping and filtering +// behaviour and returns true if the NAT is restrictive +// (address-dependent mapping and/or port-dependent filtering) +// and false if the NAT is unrestrictive (meaning it +// will work with most other NATs), +func CheckIfRestrictedNATWithProxy(server string, proxy *url.URL) (bool, error) { + return isRestrictedMapping(server, proxy) +} + +// Performs two tests from RFC 5780 to determine whether the mapping type +// of the client's NAT is address-independent or address-dependent +// Returns true if the mapping is address-dependent and false otherwise +func isRestrictedMapping(addrStr string, proxy *url.URL) (bool, error) { + var xorAddr1 stun.XORMappedAddress + var xorAddr2 stun.XORMappedAddress + + mapTestConn, err := connect(addrStr, proxy) + if err != nil { + return false, fmt.Errorf("Error creating STUN connection: %w", err) + } + + defer mapTestConn.Close() + + // Test I: Regular binding request + message := stun.MustBuild(stun.TransactionID, stun.BindingRequest) + + resp, err := mapTestConn.RoundTrip(message, mapTestConn.PrimaryAddr) + if err != nil { + return false, fmt.Errorf("Error completing roundtrip map test: %w", err) + } + + // Decoding XOR-MAPPED-ADDRESS attribute from message. + if err = xorAddr1.GetFrom(resp); err != nil { + return false, fmt.Errorf("Error retrieving XOR-MAPPED-ADDRESS resonse: %w", err) + } + + // Decoding OTHER-ADDRESS attribute from message. + var otherAddr stun.OtherAddress + if err = otherAddr.GetFrom(resp); err != nil { + return false, fmt.Errorf("NAT discovery feature not supported: %w", err) + } + + if err = mapTestConn.AddOtherAddr(otherAddr.String()); err != nil { + return false, fmt.Errorf("Error resolving address %s: %w", otherAddr.String(), err) + } + + // Test II: Send binding request to other address + resp, err = mapTestConn.RoundTrip(message, mapTestConn.OtherAddr) + if err != nil { + return false, fmt.Errorf("Error retrieveing server response: %w", err) + } + + // Decoding XOR-MAPPED-ADDRESS attribute from message. + if err = xorAddr2.GetFrom(resp); err != nil { + return false, fmt.Errorf("Error retrieving XOR-MAPPED-ADDRESS resonse: %w", err) + } + + return xorAddr1.String() != xorAddr2.String(), nil + +} + +// Performs two tests from RFC 5780 to determine whether the filtering type +// of the client's NAT is port-dependent. +// Returns true if the filtering is port-dependent and false otherwise +// Note: This function is no longer used because a client's NAT type is +// determined only by their mapping type, but the functionality might +// be useful in the future and remains here. +func isRestrictedFiltering(addrStr string, proxy *url.URL) (bool, error) { + var xorAddr stun.XORMappedAddress + + mapTestConn, err := connect(addrStr, proxy) + if err != nil { + log.Printf("Error creating STUN connection: %s", err.Error()) + return false, err + } + + defer mapTestConn.Close() + + // Test I: Regular binding request + message := stun.MustBuild(stun.TransactionID, stun.BindingRequest) + + resp, err := mapTestConn.RoundTrip(message, mapTestConn.PrimaryAddr) + if err == ErrTimedOut { + log.Printf("Error: no response from server") + return false, err + } + if err != nil { + log.Printf("Error: %s", err.Error()) + return false, err + } + + // Decoding XOR-MAPPED-ADDRESS attribute from message. + if err = xorAddr.GetFrom(resp); err != nil { + log.Printf("Error retrieving XOR-MAPPED-ADDRESS from resonse: %s", err.Error()) + return false, err + } + + // Test III: Request port change + message.Add(stun.AttrChangeRequest, []byte{0x00, 0x00, 0x00, 0x02}) + + _, err = mapTestConn.RoundTrip(message, mapTestConn.PrimaryAddr) + if err != ErrTimedOut && err != nil { + // something else went wrong + log.Printf("Error reading response from server: %s", err.Error()) + return false, err + } + + return err == ErrTimedOut, nil +} + +// Given an address string, returns a StunServerConn +func connect(addrStr string, proxyAddr *url.URL) (*StunServerConn, error) { + // Creating a "connection" to STUN server. + var conn net.PacketConn + + ResolveUDPAddr := net.ResolveUDPAddr + if proxyAddr != nil { + socksClient := proxy.NewSocks5UDPClient(proxyAddr) + ResolveUDPAddr = socksClient.ResolveUDPAddr + } + + addr, err := ResolveUDPAddr("udp4", addrStr) + if err != nil { + log.Printf("Error resolving address: %s\n", err.Error()) + return nil, err + } + + if proxyAddr == nil { + c, err := net.ListenUDP("udp4", nil) + if err != nil { + return nil, err + } + conn = c + } else { + socksClient := proxy.NewSocks5UDPClient(proxyAddr) + c, err := socksClient.ListenPacket("udp", nil) + if err != nil { + return nil, err + } + conn = c + } + + mChan := listen(conn) + + return &StunServerConn{ + conn: conn, + PrimaryAddr: addr, + messageChan: mChan, + }, nil +} + +type StunServerConn struct { + conn net.PacketConn + PrimaryAddr *net.UDPAddr + OtherAddr *net.UDPAddr + messageChan chan *stun.Message +} + +func (c *StunServerConn) Close() { + c.conn.Close() +} + +func (c *StunServerConn) RoundTrip(msg *stun.Message, addr net.Addr) (*stun.Message, error) { + _, err := c.conn.WriteTo(msg.Raw, addr) + if err != nil { + return nil, err + } + + // Wait for response or timeout + select { + case m, ok := <-c.messageChan: + if !ok { + return nil, fmt.Errorf("error reading from messageChan") + } + return m, nil + case <-time.After(10 * time.Second): + return nil, ErrTimedOut + } +} + +func (c *StunServerConn) AddOtherAddr(addrStr string) error { + addr2, err := net.ResolveUDPAddr("udp4", addrStr) + if err != nil { + return err + } + c.OtherAddr = addr2 + return nil +} + +// taken from https://github.com/pion/stun/blob/master/cmd/stun-traversal/main.go +func listen(conn net.PacketConn) chan *stun.Message { + messages := make(chan *stun.Message) + go func() { + for { + buf := make([]byte, 1024) + + n, _, err := conn.ReadFrom(buf) + if err != nil { + close(messages) + return + } + buf = buf[:n] + + m := new(stun.Message) + m.Raw = buf + err = m.Decode() + if err != nil { + close(messages) + return + } + + messages <- m + } + }() + return messages +} diff --git a/common/proxy/check.go b/common/proxy/check.go new file mode 100644 index 0000000..797b3eb --- /dev/null +++ b/common/proxy/check.go @@ -0,0 +1,18 @@ +package proxy + +import ( + "errors" + "net/url" + "strings" +) + +var errUnsupportedProxyType = errors.New("unsupported proxy type") + +func CheckProxyProtocolSupport(proxy *url.URL) error { + switch strings.ToLower(proxy.Scheme) { + case "socks5": + return nil + default: + return errUnsupportedProxyType + } +} diff --git a/common/proxy/client.go b/common/proxy/client.go new file mode 100644 index 0000000..c111c71 --- /dev/null +++ b/common/proxy/client.go @@ -0,0 +1,274 @@ +package proxy + +import ( + "context" + "errors" + "log" + "net" + "net/url" + "strconv" + "time" + + "github.com/miekg/dns" + "github.com/pion/transport/v3" + "github.com/txthinking/socks5" +) + +func NewSocks5UDPClient(addr *url.URL) SocksClient { + return SocksClient{addr: addr} +} + +type SocksClient struct { + addr *url.URL +} + +type SocksConn struct { + net.Conn + socks5Client *socks5.Client +} + +func (s SocksConn) SetReadBuffer(bytes int) error { + return nil +} + +func (s SocksConn) SetWriteBuffer(bytes int) error { + return nil +} + +func (s SocksConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) { + var buf [2000]byte + n, err = s.Conn.Read(buf[:]) + if err != nil { + return 0, nil, err + } + Datagram, err := socks5.NewDatagramFromBytes(buf[:n]) + if err != nil { + return 0, nil, err + } + addr, err = net.ResolveUDPAddr("udp", Datagram.Address()) + if err != nil { + return 0, nil, err + } + n = copy(b, Datagram.Data) + if n < len(Datagram.Data) { + return 0, nil, errors.New("short buffer") + } + return len(Datagram.Data), addr, nil +} + +func (s SocksConn) ReadMsgUDP(b, oob []byte) (n, oobn, flags int, addr *net.UDPAddr, err error) { + panic("unimplemented") +} + +func (s SocksConn) WriteToUDP(b []byte, addr *net.UDPAddr) (int, error) { + + a, addrb, portb, err := socks5.ParseAddress(addr.String()) + if err != nil { + return 0, err + } + packet := socks5.NewDatagram(a, addrb, portb, b) + _, err = s.Conn.Write(packet.Bytes()) + if err != nil { + return 0, err + } + return len(b), nil +} + +func (s SocksConn) WriteMsgUDP(b, oob []byte, addr *net.UDPAddr) (n, oobn int, err error) { + panic("unimplemented") +} + +func (sc *SocksClient) ListenPacket(network string, locAddr *net.UDPAddr) (transport.UDPConn, error) { + conn, err := sc.listenPacket() + if err != nil { + log.Println("[SOCKS5 Client Error] cannot listen packet", err) + } + return conn, err +} + +func (sc *SocksClient) listenPacket() (transport.UDPConn, error) { + var username, password string + if sc.addr.User != nil { + username = sc.addr.User.Username() + password, _ = sc.addr.User.Password() + } + client, err := socks5.NewClient( + sc.addr.Host, + username, password, 300, 300) + if err != nil { + return nil, err + } + + err = client.Negotiate(nil) + if err != nil { + return nil, err + } + + udpRequest := socks5.NewRequest(socks5.CmdUDP, socks5.ATYPIPv4, []byte{0x00, 0x00, 0x00, 0x00}, []byte{0x00, 0x00}) + + reply, err := client.Request(udpRequest) + if err != nil { + return nil, err + } + + udpServerAddr := socks5.ToAddress(reply.Atyp, reply.BndAddr, reply.BndPort) + + conn, err := net.Dial("udp", udpServerAddr) + if err != nil { + return nil, err + } + + return &SocksConn{conn, client}, nil +} + +func (s SocksConn) WriteTo(p []byte, addr net.Addr) (n int, err error) { + return s.WriteToUDP(p, addr.(*net.UDPAddr)) +} + +func (s SocksConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) { + return s.ReadFromUDP(p) +} + +func (s SocksConn) Read(b []byte) (int, error) { + panic("implement me") +} + +func (s SocksConn) RemoteAddr() net.Addr { + panic("implement me") +} + +func (s SocksConn) Write(b []byte) (int, error) { + panic("implement me") +} + +func (sc *SocksClient) ResolveUDPAddr(network string, address string) (*net.UDPAddr, error) { + dnsServer, err := net.ResolveUDPAddr("udp", "1.1.1.1:53") + if err != nil { + return nil, err + } + proxiedResolver := newDnsResolver(sc, dnsServer) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + host, port, err := net.SplitHostPort(address) + if err != nil { + return nil, err + } + ip, err := proxiedResolver.lookupIPAddr(ctx, host, network == "udp6") + if err != nil { + return nil, err + } + if len(ip) <= 0 { + return nil, errors.New("cannot resolve hostname: NXDOMAIN") + } + switch network { + case "udp4": + var v4IPAddr []net.IPAddr + for _, v := range ip { + if v.IP.To4() != nil { + v4IPAddr = append(v4IPAddr, v) + } + } + ip = v4IPAddr + case "udp6": + var v6IPAddr []net.IPAddr + for _, v := range ip { + if v.IP.To4() == nil { + v6IPAddr = append(v6IPAddr, v) + } + } + ip = v6IPAddr + case "udp": + default: + return nil, errors.New("unknown network") + } + + if len(ip) <= 0 { + return nil, errors.New("cannot resolve hostname: so suitable address") + } + + portInInt, err := strconv.ParseInt(port, 10, 32) + return &net.UDPAddr{ + IP: ip[0].IP, + Port: int(portInInt), + Zone: "", + }, nil +} + +func newDnsResolver(sc *SocksClient, + serverAddress net.Addr) *dnsResolver { + return &dnsResolver{sc: sc, serverAddress: serverAddress} +} + +type dnsResolver struct { + sc *SocksClient + serverAddress net.Addr +} + +func (r *dnsResolver) lookupIPAddr(ctx context.Context, host string, ipv6 bool) ([]net.IPAddr, error) { + packetConn, err := r.sc.listenPacket() + if err != nil { + return nil, err + } + msg := new(dns.Msg) + if !ipv6 { + msg.SetQuestion(dns.Fqdn(host), dns.TypeA) + } else { + msg.SetQuestion(dns.Fqdn(host), dns.TypeAAAA) + } + encodedMsg, err := msg.Pack() + if err != nil { + log.Println(err.Error()) + } + for i := 2; i >= 0; i-- { + _, err := packetConn.WriteTo(encodedMsg, r.serverAddress) + if err != nil { + log.Println(err.Error()) + } + } + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + go func() { + <-ctx.Done() + packetConn.Close() + }() + var dataBuf [1600]byte + n, _, err := packetConn.ReadFrom(dataBuf[:]) + if err != nil { + return nil, err + } + err = msg.Unpack(dataBuf[:n]) + if err != nil { + return nil, err + } + var returnedIPs []net.IPAddr + for _, resp := range msg.Answer { + switch respTyped := resp.(type) { + case *dns.A: + returnedIPs = append(returnedIPs, net.IPAddr{IP: respTyped.A}) + case *dns.AAAA: + returnedIPs = append(returnedIPs, net.IPAddr{IP: respTyped.AAAA}) + } + } + return returnedIPs, nil +} + +func NewTransportWrapper(sc *SocksClient, innerNet transport.Net) transport.Net { + return &transportWrapper{sc: sc, Net: innerNet} +} + +type transportWrapper struct { + transport.Net + sc *SocksClient +} + +func (t *transportWrapper) ListenUDP(network string, locAddr *net.UDPAddr) (transport.UDPConn, error) { + return t.sc.ListenPacket(network, nil) +} + +func (t *transportWrapper) ListenPacket(network string, address string) (net.PacketConn, error) { + return t.sc.ListenPacket(network, nil) +} + +func (t *transportWrapper) ResolveUDPAddr(network string, address string) (*net.UDPAddr, error) { + return t.sc.ResolveUDPAddr(network, address) +} diff --git a/common/safelog/log.go b/common/safelog/log.go deleted file mode 100644 index 1241676..0000000 --- a/common/safelog/log.go +++ /dev/null @@ -1,62 +0,0 @@ -//Package for a safer logging wrapper around the standard logging package - -//import "git.torproject.org/pluggable-transports/snowflake.git/common/safelog" -package safelog - -import ( - "bytes" - "io" - "regexp" -) - -const ipv4Address = `\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}` -const ipv6Address = `([0-9a-fA-F]{0,4}:){5,7}([0-9a-fA-F]{0,4})?` -const ipv6Compressed = `([0-9a-fA-F]{0,4}:){0,5}([0-9a-fA-F]{0,4})?(::)([0-9a-fA-F]{0,4}:){0,5}([0-9a-fA-F]{0,4})?` -const ipv6Full = `(` + ipv6Address + `(` + ipv4Address + `))` + - `|(` + ipv6Compressed + `(` + ipv4Address + `))` + - `|(` + ipv6Address + `)` + `|(` + ipv6Compressed + `)` -const optionalPort = `(:\d{1,5})?` -const addressPattern = `((` + ipv4Address + `)|(\[(` + ipv6Full + `)\])|(` + ipv6Full + `))` + optionalPort -const fullAddrPattern = `(^|\s|[^\w:])` + addressPattern + `(\s|(:\s)|[^\w:]|$)` - -var scrubberPatterns = []*regexp.Regexp{ - regexp.MustCompile(fullAddrPattern), -} - -var addressRegexp = regexp.MustCompile(addressPattern) - -// An io.Writer that can be used as the output for a logger that first -// sanitizes logs and then writes to the provided io.Writer -type LogScrubber struct { - Output io.Writer - buffer []byte -} - -func scrub(b []byte) []byte { - scrubbedBytes := b - for _, pattern := range scrubberPatterns { - // this is a workaround since go does not yet support look ahead or look - // behind for regular expressions. - scrubbedBytes = pattern.ReplaceAllFunc(scrubbedBytes, func(b []byte) []byte { - return addressRegexp.ReplaceAll(b, []byte("[scrubbed]")) - }) - } - return scrubbedBytes -} - -func (ls *LogScrubber) Write(b []byte) (n int, err error) { - n = len(b) - ls.buffer = append(ls.buffer, b...) - for { - i := bytes.LastIndexByte(ls.buffer, '\n') - if i == -1 { - return - } - fullLines := ls.buffer[:i+1] - _, err = ls.Output.Write(scrub(fullLines)) - if err != nil { - return - } - ls.buffer = ls.buffer[i+1:] - } -} diff --git a/common/safelog/log_test.go b/common/safelog/log_test.go deleted file mode 100644 index 16edbc9..0000000 --- a/common/safelog/log_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package safelog - -import ( - "bytes" - "log" - "testing" -) - -//Check to make sure that addresses split across calls to write are still scrubbed -func TestLogScrubberSplit(t *testing.T) { - input := []byte("test\nhttp2: panic serving [2620:101:f000:780:9097:75b1:519f:dbb8]:58344: interface conversion: *http2.responseWriter is not http.Hijacker: missing method Hijack\n") - - expected := "test\nhttp2: panic serving [scrubbed]: interface conversion: *http2.responseWriter is not http.Hijacker: missing method Hijack\n" - - var buff bytes.Buffer - scrubber := &LogScrubber{Output: &buff} - n, err := scrubber.Write(input[:12]) //test\nhttp2: - if n != 12 { - t.Errorf("wrong number of bytes %d", n) - } - if err != nil { - t.Errorf("%q", err) - } - if buff.String() != "test\n" { - t.Errorf("Got %q, expected %q", buff.String(), "test\n") - } - - n, err = scrubber.Write(input[12:30]) //panic serving [2620:101:f - if n != 18 { - t.Errorf("wrong number of bytes %d", n) - } - if err != nil { - t.Errorf("%q", err) - } - if buff.String() != "test\n" { - t.Errorf("Got %q, expected %q", buff.String(), "test\n") - } - - n, err = scrubber.Write(input[30:]) //000:780:9097:75b1:519f:dbb8]:58344: interface conversion: *http2.responseWriter is not http.Hijacker: missing method Hijack\n - if n != (len(input) - 30) { - t.Errorf("wrong number of bytes %d", n) - } - if err != nil { - t.Errorf("%q", err) - } - if buff.String() != expected { - t.Errorf("Got %q, expected %q", buff.String(), expected) - } - -} - -//Test the log scrubber on known problematic log messages -func TestLogScrubberMessages(t *testing.T) { - for _, test := range []struct { - input, expected string - }{ - { - "http: TLS handshake error from 129.97.208.23:38310: ", - "http: TLS handshake error from [scrubbed]: \n", - }, - { - "http2: panic serving [2620:101:f000:780:9097:75b1:519f:dbb8]:58344: interface conversion: *http2.responseWriter is not http.Hijacker: missing method Hijack", - "http2: panic serving [scrubbed]: interface conversion: *http2.responseWriter is not http.Hijacker: missing method Hijack\n", - }, - { - //Make sure it doesn't scrub fingerprint - "a=fingerprint:sha-256 33:B6:FA:F6:94:CA:74:61:45:4A:D2:1F:2C:2F:75:8A:D9:EB:23:34:B2:30:E9:1B:2A:A6:A9:E0:44:72:CC:74", - "a=fingerprint:sha-256 33:B6:FA:F6:94:CA:74:61:45:4A:D2:1F:2C:2F:75:8A:D9:EB:23:34:B2:30:E9:1B:2A:A6:A9:E0:44:72:CC:74\n", - }, - { - //try with enclosing parens - "(1:2:3:4:c:d:e:f) {1:2:3:4:c:d:e:f}", - "([scrubbed]) {[scrubbed]}\n", - }, - { - //Make sure it doesn't scrub timestamps - "2019/05/08 15:37:31 starting", - "2019/05/08 15:37:31 starting\n", - }, - } { - var buff bytes.Buffer - log.SetFlags(0) //remove all extra log output for test comparisons - log.SetOutput(&LogScrubber{Output: &buff}) - log.Print(test.input) - if buff.String() != test.expected { - t.Errorf("%q: got %q, expected %q", test.input, buff.String(), test.expected) - } - } - -} - -func TestLogScrubberGoodFormats(t *testing.T) { - for _, addr := range []string{ - // IPv4 - "1.2.3.4", - "255.255.255.255", - // IPv4 with port - "1.2.3.4:55", - "255.255.255.255:65535", - // IPv6 - "1:2:3:4:c:d:e:f", - "1111:2222:3333:4444:CCCC:DDDD:EEEE:FFFF", - // IPv6 with brackets - "[1:2:3:4:c:d:e:f]", - "[1111:2222:3333:4444:CCCC:DDDD:EEEE:FFFF]", - // IPv6 with brackets and port - "[1:2:3:4:c:d:e:f]:55", - "[1111:2222:3333:4444:CCCC:DDDD:EEEE:FFFF]:65535", - // compressed IPv6 - "::f", - "::d:e:f", - "1:2:3::", - "1:2:3::d:e:f", - "1:2:3:d:e:f::", - "::1:2:3:d:e:f", - "1111:2222:3333::DDDD:EEEE:FFFF", - // compressed IPv6 with brackets - "[::d:e:f]", - "[1:2:3::]", - "[1:2:3::d:e:f]", - "[1111:2222:3333::DDDD:EEEE:FFFF]", - "[1:2:3:4:5:6::8]", - "[1::7:8]", - // compressed IPv6 with brackets and port - "[1::]:58344", - "[::d:e:f]:55", - "[1:2:3::]:55", - "[1:2:3::d:e:f]:55", - "[1111:2222:3333::DDDD:EEEE:FFFF]:65535", - // IPv4-compatible and IPv4-mapped - "::255.255.255.255", - "::ffff:255.255.255.255", - "[::255.255.255.255]", - "[::ffff:255.255.255.255]", - "[::255.255.255.255]:65535", - "[::ffff:255.255.255.255]:65535", - "[::ffff:0:255.255.255.255]", - "[2001:db8:3:4::192.0.2.33]", - } { - var buff bytes.Buffer - log.SetFlags(0) //remove all extra log output for test comparisons - log.SetOutput(&LogScrubber{Output: &buff}) - log.Print(addr) - if buff.String() != "[scrubbed]\n" { - t.Errorf("%q: Got %q, expected %q", addr, buff.String(), "[scrubbed]\n") - } - } -} diff --git a/common/sqsclient/sqsclient.go b/common/sqsclient/sqsclient.go new file mode 100644 index 0000000..a00c132 --- /dev/null +++ b/common/sqsclient/sqsclient.go @@ -0,0 +1,18 @@ +package sqsclient + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/sqs" +) + +type SQSClient interface { + ReceiveMessage(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error) + ListQueues(ctx context.Context, input *sqs.ListQueuesInput, optFns ...func(*sqs.Options)) (*sqs.ListQueuesOutput, error) + GetQueueAttributes(ctx context.Context, input *sqs.GetQueueAttributesInput, optFns ...func(*sqs.Options)) (*sqs.GetQueueAttributesOutput, error) + DeleteQueue(ctx context.Context, input *sqs.DeleteQueueInput, optFns ...func(*sqs.Options)) (*sqs.DeleteQueueOutput, error) + CreateQueue(ctx context.Context, input *sqs.CreateQueueInput, optFns ...func(*sqs.Options)) (*sqs.CreateQueueOutput, error) + SendMessage(ctx context.Context, input *sqs.SendMessageInput, optFns ...func(*sqs.Options)) (*sqs.SendMessageOutput, error) + DeleteMessage(ctx context.Context, input *sqs.DeleteMessageInput, optFns ...func(*sqs.Options)) (*sqs.DeleteMessageOutput, error) + GetQueueUrl(ctx context.Context, input *sqs.GetQueueUrlInput, optFns ...func(*sqs.Options)) (*sqs.GetQueueUrlOutput, error) +} diff --git a/common/sqsclient/sqsclient_mock.go b/common/sqsclient/sqsclient_mock.go new file mode 100644 index 0000000..7224730 --- /dev/null +++ b/common/sqsclient/sqsclient_mock.go @@ -0,0 +1,196 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: common/sqsclient/sqsclient.go + +// Package mock_sqsclient is a generated GoMock package. +package sqsclient + +import ( + context "context" + reflect "reflect" + + sqs "github.com/aws/aws-sdk-go-v2/service/sqs" + gomock "github.com/golang/mock/gomock" +) + +// MockSQSClient is a mock of SQSClient interface. +type MockSQSClient struct { + ctrl *gomock.Controller + recorder *MockSQSClientMockRecorder +} + +// MockSQSClientMockRecorder is the mock recorder for MockSQSClient. +type MockSQSClientMockRecorder struct { + mock *MockSQSClient +} + +// NewMockSQSClient creates a new mock instance. +func NewMockSQSClient(ctrl *gomock.Controller) *MockSQSClient { + mock := &MockSQSClient{ctrl: ctrl} + mock.recorder = &MockSQSClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSQSClient) EXPECT() *MockSQSClientMockRecorder { + return m.recorder +} + +// CreateQueue mocks base method. +func (m *MockSQSClient) CreateQueue(ctx context.Context, input *sqs.CreateQueueInput, optFns ...func(*sqs.Options)) (*sqs.CreateQueueOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, input} + for _, a := range optFns { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateQueue", varargs...) + ret0, _ := ret[0].(*sqs.CreateQueueOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateQueue indicates an expected call of CreateQueue. +func (mr *MockSQSClientMockRecorder) CreateQueue(ctx, input interface{}, optFns ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, input}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateQueue", reflect.TypeOf((*MockSQSClient)(nil).CreateQueue), varargs...) +} + +// DeleteMessage mocks base method. +func (m *MockSQSClient) DeleteMessage(ctx context.Context, input *sqs.DeleteMessageInput, optFns ...func(*sqs.Options)) (*sqs.DeleteMessageOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, input} + for _, a := range optFns { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteMessage", varargs...) + ret0, _ := ret[0].(*sqs.DeleteMessageOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteMessage indicates an expected call of DeleteMessage. +func (mr *MockSQSClientMockRecorder) DeleteMessage(ctx, input interface{}, optFns ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, input}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteMessage", reflect.TypeOf((*MockSQSClient)(nil).DeleteMessage), varargs...) +} + +// DeleteQueue mocks base method. +func (m *MockSQSClient) DeleteQueue(ctx context.Context, input *sqs.DeleteQueueInput, optFns ...func(*sqs.Options)) (*sqs.DeleteQueueOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, input} + for _, a := range optFns { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteQueue", varargs...) + ret0, _ := ret[0].(*sqs.DeleteQueueOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteQueue indicates an expected call of DeleteQueue. +func (mr *MockSQSClientMockRecorder) DeleteQueue(ctx, input interface{}, optFns ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, input}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteQueue", reflect.TypeOf((*MockSQSClient)(nil).DeleteQueue), varargs...) +} + +// GetQueueAttributes mocks base method. +func (m *MockSQSClient) GetQueueAttributes(ctx context.Context, input *sqs.GetQueueAttributesInput, optFns ...func(*sqs.Options)) (*sqs.GetQueueAttributesOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, input} + for _, a := range optFns { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetQueueAttributes", varargs...) + ret0, _ := ret[0].(*sqs.GetQueueAttributesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetQueueAttributes indicates an expected call of GetQueueAttributes. +func (mr *MockSQSClientMockRecorder) GetQueueAttributes(ctx, input interface{}, optFns ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, input}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQueueAttributes", reflect.TypeOf((*MockSQSClient)(nil).GetQueueAttributes), varargs...) +} + +// GetQueueUrl mocks base method. +func (m *MockSQSClient) GetQueueUrl(ctx context.Context, input *sqs.GetQueueUrlInput, optFns ...func(*sqs.Options)) (*sqs.GetQueueUrlOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, input} + for _, a := range optFns { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetQueueUrl", varargs...) + ret0, _ := ret[0].(*sqs.GetQueueUrlOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetQueueUrl indicates an expected call of GetQueueUrl. +func (mr *MockSQSClientMockRecorder) GetQueueUrl(ctx, input interface{}, optFns ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, input}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetQueueUrl", reflect.TypeOf((*MockSQSClient)(nil).GetQueueUrl), varargs...) +} + +// ListQueues mocks base method. +func (m *MockSQSClient) ListQueues(ctx context.Context, input *sqs.ListQueuesInput, optFns ...func(*sqs.Options)) (*sqs.ListQueuesOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, input} + for _, a := range optFns { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListQueues", varargs...) + ret0, _ := ret[0].(*sqs.ListQueuesOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListQueues indicates an expected call of ListQueues. +func (mr *MockSQSClientMockRecorder) ListQueues(ctx, input interface{}, optFns ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, input}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListQueues", reflect.TypeOf((*MockSQSClient)(nil).ListQueues), varargs...) +} + +// ReceiveMessage mocks base method. +func (m *MockSQSClient) ReceiveMessage(ctx context.Context, input *sqs.ReceiveMessageInput, optFns ...func(*sqs.Options)) (*sqs.ReceiveMessageOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, input} + for _, a := range optFns { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReceiveMessage", varargs...) + ret0, _ := ret[0].(*sqs.ReceiveMessageOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReceiveMessage indicates an expected call of ReceiveMessage. +func (mr *MockSQSClientMockRecorder) ReceiveMessage(ctx, input interface{}, optFns ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, input}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReceiveMessage", reflect.TypeOf((*MockSQSClient)(nil).ReceiveMessage), varargs...) +} + +// SendMessage mocks base method. +func (m *MockSQSClient) SendMessage(ctx context.Context, input *sqs.SendMessageInput, optFns ...func(*sqs.Options)) (*sqs.SendMessageOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, input} + for _, a := range optFns { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SendMessage", varargs...) + ret0, _ := ret[0].(*sqs.SendMessageOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SendMessage indicates an expected call of SendMessage. +func (mr *MockSQSClientMockRecorder) SendMessage(ctx, input interface{}, optFns ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, input}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMessage", reflect.TypeOf((*MockSQSClient)(nil).SendMessage), varargs...) +} diff --git a/common/sqscreds/generate_creds.go b/common/sqscreds/generate_creds.go new file mode 100644 index 0000000..0f89225 --- /dev/null +++ b/common/sqscreds/generate_creds.go @@ -0,0 +1,36 @@ +package main + +import ( + "fmt" + + sqscreds "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/sqscreds/lib" +) + +// This script can be run to generate the encoded SQS credentials to pass as a CLI param or SOCKS option to the client +func main() { + var accessKey, secretKey string + + fmt.Print("Enter Access Key: ") + _, err := fmt.Scanln(&accessKey) + if err != nil { + fmt.Println("Error reading access key:", err) + return + } + + fmt.Print("Enter Secret Key: ") + _, err = fmt.Scanln(&secretKey) + if err != nil { + fmt.Println("Error reading access key:", err) + return + } + + awsCreds := sqscreds.AwsCreds{AwsAccessKeyId: accessKey, AwsSecretKey: secretKey} + println() + println("Encoded Credentials:") + res, err := awsCreds.Base64() + if err != nil { + fmt.Println("Error encoding credentials:", err) + return + } + println(res) +} diff --git a/common/sqscreds/lib/sqs_creds.go b/common/sqscreds/lib/sqs_creds.go new file mode 100644 index 0000000..dba1828 --- /dev/null +++ b/common/sqscreds/lib/sqs_creds.go @@ -0,0 +1,35 @@ +package sqscreds + +import ( + "encoding/base64" + "encoding/json" +) + +type AwsCreds struct { + AwsAccessKeyId string `json:"aws-access-key-id"` + AwsSecretKey string `json:"aws-secret-key"` +} + +func (awsCreds AwsCreds) Base64() (string, error) { + jsonData, err := json.Marshal(awsCreds) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(jsonData), nil +} + +func AwsCredsFromBase64(base64Str string) (AwsCreds, error) { + var awsCreds AwsCreds + + jsonData, err := base64.StdEncoding.DecodeString(base64Str) + if err != nil { + return awsCreds, err + } + + err = json.Unmarshal(jsonData, &awsCreds) + if err != nil { + return awsCreds, err + } + + return awsCreds, nil +} diff --git a/common/task/periodic.go b/common/task/periodic.go new file mode 100644 index 0000000..2e8ab22 --- /dev/null +++ b/common/task/periodic.go @@ -0,0 +1,123 @@ +// Package task +// Reused from https://github.com/v2fly/v2ray-core/blob/784775f68922f07d40c9eead63015b2026af2ade/common/task/periodic.go +/* +The MIT License (MIT) + +Copyright (c) 2015-2021 V2Ray & V2Fly Community + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ +package task + +import ( + "sync" + "time" +) + +// Periodic is a task that runs periodically. +type Periodic struct { + // Interval of the task being run + Interval time.Duration + // Execute is the task function + Execute func() error + // OnError handles the error of the task + OnError func(error) + + access sync.Mutex + timer *time.Timer + running bool +} + +func (t *Periodic) hasClosed() bool { + t.access.Lock() + defer t.access.Unlock() + + return !t.running +} + +func (t *Periodic) checkedExecute() error { + if t.hasClosed() { + return nil + } + + if err := t.Execute(); err != nil { + if t.OnError != nil { + t.OnError(err) + } else { + // default error handling is to shut down the task + t.access.Lock() + t.running = false + t.access.Unlock() + return err + } + } + + t.access.Lock() + defer t.access.Unlock() + + if !t.running { + return nil + } + + t.timer = time.AfterFunc(t.Interval, func() { + t.checkedExecute() + }) + + return nil +} + +// Start implements common.Runnable. +func (t *Periodic) Start() error { + t.access.Lock() + if t.running { + t.access.Unlock() + return nil + } + t.running = true + t.access.Unlock() + + if err := t.checkedExecute(); err != nil { + t.access.Lock() + t.running = false + t.access.Unlock() + return err + } + + return nil +} + +func (t *Periodic) WaitThenStart() { + time.AfterFunc(t.Interval, func() { + t.Start() + }) +} + +// Close implements common.Closable. +func (t *Periodic) Close() error { + t.access.Lock() + defer t.access.Unlock() + + t.running = false + if t.timer != nil { + t.timer.Stop() + t.timer = nil + } + + return nil +} diff --git a/common/turbotunnel/clientid.go b/common/turbotunnel/clientid.go new file mode 100644 index 0000000..17257e1 --- /dev/null +++ b/common/turbotunnel/clientid.go @@ -0,0 +1,28 @@ +package turbotunnel + +import ( + "crypto/rand" + "encoding/hex" +) + +// ClientID is an abstract identifier that binds together all the communications +// belonging to a single client session, even though those communications may +// arrive from multiple IP addresses or over multiple lower-level connections. +// It plays the same role that an (IP address, port number) tuple plays in a +// net.UDPConn: it's the return address pertaining to a long-lived abstract +// client session. The client attaches its ClientID to each of its +// communications, enabling the server to disambiguate requests among its many +// clients. ClientID implements the net.Addr interface. +type ClientID [8]byte + +func NewClientID() ClientID { + var id ClientID + _, err := rand.Read(id[:]) + if err != nil { + panic(err) + } + return id +} + +func (id ClientID) Network() string { return "clientid" } +func (id ClientID) String() string { return hex.EncodeToString(id[:]) } diff --git a/common/turbotunnel/clientmap.go b/common/turbotunnel/clientmap.go new file mode 100644 index 0000000..94e8a99 --- /dev/null +++ b/common/turbotunnel/clientmap.go @@ -0,0 +1,146 @@ +package turbotunnel + +import ( + "container/heap" + "net" + "sync" + "time" +) + +// clientRecord is a record of a recently seen client, with the time it was last +// seen and a send queue. +type clientRecord struct { + Addr net.Addr + LastSeen time.Time + SendQueue chan []byte +} + +// ClientMap manages a mapping of live clients (keyed by address, which will be +// a ClientID) to their respective send queues. ClientMap's functions are safe +// to call from multiple goroutines. +type ClientMap struct { + // We use an inner structure to avoid exposing public heap.Interface + // functions to users of clientMap. + inner clientMapInner + // Synchronizes access to inner. + lock sync.Mutex +} + +// NewClientMap creates a ClientMap that expires clients after a timeout. +// +// The timeout does not have to be kept in sync with smux's internal idle +// timeout. If a client is removed from the client map while the smux session is +// still live, the worst that can happen is a loss of whatever packets were in +// the send queue at the time. If smux later decides to send more packets to the +// same client, we'll instantiate a new send queue, and if the client ever +// connects again with the proper client ID, we'll deliver them. +func NewClientMap(timeout time.Duration) *ClientMap { + m := &ClientMap{ + inner: clientMapInner{ + byAge: make([]*clientRecord, 0), + byAddr: make(map[net.Addr]int), + }, + } + go func() { + for { + time.Sleep(timeout / 2) + now := time.Now() + m.lock.Lock() + m.inner.removeExpired(now, timeout) + m.lock.Unlock() + } + }() + return m +} + +// SendQueue returns the send queue corresponding to addr, creating it if +// necessary. +func (m *ClientMap) SendQueue(addr net.Addr) chan []byte { + m.lock.Lock() + queue := m.inner.SendQueue(addr, time.Now()) + m.lock.Unlock() + return queue +} + +// clientMapInner is the inner type of ClientMap, implementing heap.Interface. +// byAge is the backing store, a heap ordered by LastSeen time, to facilitate +// expiring old client records. byAddr is a map from addresses (i.e., ClientIDs) +// to heap indices, to allow looking up by address. Unlike ClientMap, +// clientMapInner requires external synchonization. +type clientMapInner struct { + byAge []*clientRecord + byAddr map[net.Addr]int +} + +// removeExpired removes all client records whose LastSeen timestamp is more +// than timeout in the past. +func (inner *clientMapInner) removeExpired(now time.Time, timeout time.Duration) { + for len(inner.byAge) > 0 && now.Sub(inner.byAge[0].LastSeen) >= timeout { + heap.Pop(inner) + } +} + +// SendQueue finds the existing client record corresponding to addr, or creates +// a new one if none exists yet. It updates the client record's LastSeen time +// and returns its SendQueue. +func (inner *clientMapInner) SendQueue(addr net.Addr, now time.Time) chan []byte { + var record *clientRecord + i, ok := inner.byAddr[addr] + if ok { + // Found one, update its LastSeen. + record = inner.byAge[i] + record.LastSeen = now + heap.Fix(inner, i) + } else { + // Not found, create a new one. + record = &clientRecord{ + Addr: addr, + LastSeen: now, + SendQueue: make(chan []byte, queueSize), + } + heap.Push(inner, record) + } + return record.SendQueue +} + +// heap.Interface for clientMapInner. + +func (inner *clientMapInner) Len() int { + if len(inner.byAge) != len(inner.byAddr) { + panic("inconsistent clientMap") + } + return len(inner.byAge) +} + +func (inner *clientMapInner) Less(i, j int) bool { + return inner.byAge[i].LastSeen.Before(inner.byAge[j].LastSeen) +} + +func (inner *clientMapInner) Swap(i, j int) { + inner.byAge[i], inner.byAge[j] = inner.byAge[j], inner.byAge[i] + inner.byAddr[inner.byAge[i].Addr] = i + inner.byAddr[inner.byAge[j].Addr] = j +} + +func (inner *clientMapInner) Push(x interface{}) { + record := x.(*clientRecord) + if _, ok := inner.byAddr[record.Addr]; ok { + panic("duplicate address in clientMap") + } + // Insert into byAddr map. + inner.byAddr[record.Addr] = len(inner.byAge) + // Insert into byAge slice. + inner.byAge = append(inner.byAge, record) +} + +func (inner *clientMapInner) Pop() interface{} { + n := len(inner.byAddr) + // Remove from byAge slice. + record := inner.byAge[n-1] + inner.byAge[n-1] = nil + inner.byAge = inner.byAge[:n-1] + // Remove from byAddr map. + delete(inner.byAddr, record.Addr) + close(record.SendQueue) + return record +} diff --git a/common/turbotunnel/clientmap_test.go b/common/turbotunnel/clientmap_test.go new file mode 100644 index 0000000..57d794a --- /dev/null +++ b/common/turbotunnel/clientmap_test.go @@ -0,0 +1,18 @@ +package turbotunnel + +import ( + "testing" + "time" +) + +// Benchmark the ClientMap.SendQueue function. This is mainly measuring the cost +// of the mutex operations around the call to clientMapInner.SendQueue. +func BenchmarkSendQueue(b *testing.B) { + m := NewClientMap(1 * time.Hour) + id := NewClientID() + m.SendQueue(id) // populate the entry for id + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.SendQueue(id) + } +} diff --git a/common/turbotunnel/consts.go b/common/turbotunnel/consts.go new file mode 100644 index 0000000..d9bf324 --- /dev/null +++ b/common/turbotunnel/consts.go @@ -0,0 +1,17 @@ +// Package turbotunnel provides support for overlaying a virtual net.PacketConn +// on some other network carrier. +// +// https://github.com/net4people/bbs/issues/9 +package turbotunnel + +import "errors" + +// This magic prefix is how a client opts into turbo tunnel mode. It is just a +// randomly generated byte string. +var Token = [8]byte{0x12, 0x93, 0x60, 0x5d, 0x27, 0x81, 0x75, 0xf5} + +// The size of receive and send queues. +const queueSize = 512 + +var errClosedPacketConn = errors.New("operation on closed connection") +var errNotImplemented = errors.New("not implemented") diff --git a/common/turbotunnel/queuepacketconn.go b/common/turbotunnel/queuepacketconn.go new file mode 100644 index 0000000..6fcc3bf --- /dev/null +++ b/common/turbotunnel/queuepacketconn.go @@ -0,0 +1,168 @@ +package turbotunnel + +import ( + "net" + "sync" + "sync/atomic" + "time" +) + +// taggedPacket is a combination of a []byte and a net.Addr, encapsulating the +// return type of PacketConn.ReadFrom. +type taggedPacket struct { + P []byte + Addr net.Addr +} + +// QueuePacketConn implements net.PacketConn by storing queues of packets. There +// is one incoming queue (where packets are additionally tagged by the source +// address of the client that sent them). There are many outgoing queues, one +// for each client address that has been recently seen. The QueueIncoming method +// inserts a packet into the incoming queue, to eventually be returned by +// ReadFrom. WriteTo inserts a packet into an address-specific outgoing queue, +// which can later by accessed through the OutgoingQueue method. +type QueuePacketConn struct { + clients *ClientMap + localAddr net.Addr + recvQueue chan taggedPacket + closeOnce sync.Once + closed chan struct{} + mtu int + // Pool of reusable mtu-sized buffers. + bufPool sync.Pool + // What error to return when the QueuePacketConn is closed. + err atomic.Value +} + +// NewQueuePacketConn makes a new QueuePacketConn, set to track recent clients +// for at least a duration of timeout. The maximum packet size is mtu. +func NewQueuePacketConn(localAddr net.Addr, timeout time.Duration, mtu int) *QueuePacketConn { + return &QueuePacketConn{ + clients: NewClientMap(timeout), + localAddr: localAddr, + recvQueue: make(chan taggedPacket, queueSize), + closed: make(chan struct{}), + mtu: mtu, + bufPool: sync.Pool{New: func() interface{} { return make([]byte, mtu) }}, + } +} + +// QueueIncoming queues an incoming packet and its source address, to be +// returned in a future call to ReadFrom. If p is longer than the MTU, only its +// first MTU bytes will be used. +func (c *QueuePacketConn) QueueIncoming(p []byte, addr net.Addr) { + select { + case <-c.closed: + // If we're closed, silently drop it. + return + default: + } + // Copy the slice so that the caller may reuse it. + buf := c.bufPool.Get().([]byte) + if len(p) < cap(buf) { + buf = buf[:len(p)] + } else { + buf = buf[:cap(buf)] + } + copy(buf, p) + select { + case c.recvQueue <- taggedPacket{buf, addr}: + default: + // Drop the incoming packet if the receive queue is full. + c.Restore(buf) + } +} + +// OutgoingQueue returns the queue of outgoing packets corresponding to addr, +// creating it if necessary. The contents of the queue will be packets that are +// written to the address in question using WriteTo. +func (c *QueuePacketConn) OutgoingQueue(addr net.Addr) <-chan []byte { + return c.clients.SendQueue(addr) +} + +// Restore adds a slice to the internal pool of packet buffers. Typically you +// will call this with a slice from the OutgoingQueue channel once you are done +// using it. (It is not an error to fail to do so, it will just result in more +// allocations.) +func (c *QueuePacketConn) Restore(p []byte) { + if cap(p) >= c.mtu { + c.bufPool.Put(p) + } +} + +// ReadFrom returns a packet and address previously stored by QueueIncoming. +func (c *QueuePacketConn) ReadFrom(p []byte) (int, net.Addr, error) { + select { + case <-c.closed: + return 0, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Addr: c.LocalAddr(), Err: c.err.Load().(error)} + default: + } + select { + case <-c.closed: + return 0, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Addr: c.LocalAddr(), Err: c.err.Load().(error)} + case packet := <-c.recvQueue: + n := copy(p, packet.P) + c.Restore(packet.P) + return n, packet.Addr, nil + } +} + +// WriteTo queues an outgoing packet for the given address. The queue can later +// be retrieved using the OutgoingQueue method. If p is longer than the MTU, +// only its first MTU bytes will be used. +func (c *QueuePacketConn) WriteTo(p []byte, addr net.Addr) (int, error) { + select { + case <-c.closed: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Addr: c.LocalAddr(), Err: c.err.Load().(error)} + default: + } + // Copy the slice so that the caller may reuse it. + buf := c.bufPool.Get().([]byte) + if len(p) < cap(buf) { + buf = buf[:len(p)] + } else { + buf = buf[:cap(buf)] + } + copy(buf, p) + select { + case c.clients.SendQueue(addr) <- buf: + return len(buf), nil + default: + // Drop the outgoing packet if the send queue is full. + c.Restore(buf) + return len(p), nil + } +} + +// closeWithError unblocks pending operations and makes future operations fail +// with the given error. If err is nil, it becomes errClosedPacketConn. +func (c *QueuePacketConn) closeWithError(err error) error { + var newlyClosed bool + c.closeOnce.Do(func() { + newlyClosed = true + // Store the error to be returned by future PacketConn + // operations. + if err == nil { + err = errClosedPacketConn + } + c.err.Store(err) + close(c.closed) + }) + if !newlyClosed { + return &net.OpError{Op: "close", Net: c.LocalAddr().Network(), Addr: c.LocalAddr(), Err: c.err.Load().(error)} + } + return nil +} + +// Close unblocks pending operations and makes future operations fail with a +// "closed connection" error. +func (c *QueuePacketConn) Close() error { + return c.closeWithError(nil) +} + +// LocalAddr returns the localAddr value that was passed to NewQueuePacketConn. +func (c *QueuePacketConn) LocalAddr() net.Addr { return c.localAddr } + +func (c *QueuePacketConn) SetDeadline(t time.Time) error { return errNotImplemented } +func (c *QueuePacketConn) SetReadDeadline(t time.Time) error { return errNotImplemented } +func (c *QueuePacketConn) SetWriteDeadline(t time.Time) error { return errNotImplemented } diff --git a/common/turbotunnel/queuepacketconn_test.go b/common/turbotunnel/queuepacketconn_test.go new file mode 100644 index 0000000..0ff19b2 --- /dev/null +++ b/common/turbotunnel/queuepacketconn_test.go @@ -0,0 +1,242 @@ +package turbotunnel + +import ( + "bytes" + "fmt" + "net" + "sync" + "testing" + "time" + + "github.com/xtaci/kcp-go/v5" +) + +type emptyAddr struct{} + +func (_ emptyAddr) Network() string { return "empty" } +func (_ emptyAddr) String() string { return "empty" } + +type intAddr int + +func (i intAddr) Network() string { return "int" } +func (i intAddr) String() string { return fmt.Sprintf("%d", i) } + +// Run with -benchmem to see memory allocations. +func BenchmarkQueueIncoming(b *testing.B) { + conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, 500) + defer conn.Close() + + b.ResetTimer() + var p [500]byte + for i := 0; i < b.N; i++ { + conn.QueueIncoming(p[:], emptyAddr{}) + } + b.StopTimer() +} + +// BenchmarkWriteTo benchmarks the QueuePacketConn.WriteTo function. +func BenchmarkWriteTo(b *testing.B) { + conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, 500) + defer conn.Close() + + b.ResetTimer() + var p [500]byte + for i := 0; i < b.N; i++ { + conn.WriteTo(p[:], emptyAddr{}) + } + b.StopTimer() +} + +// TestQueueIncomingOversize tests that QueueIncoming truncates packets that are +// larger than the MTU. +func TestQueueIncomingOversize(t *testing.T) { + const payload = "abcdefghijklmnopqrstuvwxyz" + conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, len(payload)-1) + defer conn.Close() + conn.QueueIncoming([]byte(payload), emptyAddr{}) + var p [500]byte + n, _, err := conn.ReadFrom(p[:]) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(p[:n], []byte(payload[:len(payload)-1])) { + t.Fatalf("payload was %+q, expected %+q", p[:n], payload[:len(payload)-1]) + } +} + +// TestWriteToOversize tests that WriteTo truncates packets that are larger than +// the MTU. +func TestWriteToOversize(t *testing.T) { + const payload = "abcdefghijklmnopqrstuvwxyz" + conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, len(payload)-1) + defer conn.Close() + conn.WriteTo([]byte(payload), emptyAddr{}) + p := <-conn.OutgoingQueue(emptyAddr{}) + if !bytes.Equal(p, []byte(payload[:len(payload)-1])) { + t.Fatalf("payload was %+q, expected %+q", p, payload[:len(payload)-1]) + } +} + +// TestRestoreMTU tests that Restore ignores any inputs that are not at least +// MTU-sized. +func TestRestoreMTU(t *testing.T) { + const mtu = 500 + const payload = "hello" + conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, mtu) + defer conn.Close() + conn.Restore(make([]byte, mtu-1)) + // This WriteTo may use the short slice we just gave to Restore. + conn.WriteTo([]byte(payload), emptyAddr{}) + // Read the queued slice and ensure its capacity is at least the MTU. + p := <-conn.OutgoingQueue(emptyAddr{}) + if cap(p) != mtu { + t.Fatalf("cap was %v, expected %v", cap(p), mtu) + } + // Check the payload while we're at it. + if !bytes.Equal(p, []byte(payload)) { + t.Fatalf("payload was %+q, expected %+q", p, payload) + } +} + +// TestRestoreCap tests that Restore can use slices whose cap is at least the +// MTU, even if the len is shorter. +func TestRestoreCap(t *testing.T) { + const mtu = 500 + const payload = "hello" + conn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, mtu) + defer conn.Close() + conn.Restore(make([]byte, 0, mtu)) + conn.WriteTo([]byte(payload), emptyAddr{}) + p := <-conn.OutgoingQueue(emptyAddr{}) + if !bytes.Equal(p, []byte(payload)) { + t.Fatalf("payload was %+q, expected %+q", p, payload) + } +} + +// DiscardPacketConn is a net.PacketConn whose ReadFrom method block forever and +// whose WriteTo method discards whatever it is called with. +type DiscardPacketConn struct{} + +func (_ DiscardPacketConn) ReadFrom(_ []byte) (int, net.Addr, error) { select {} } // block forever +func (_ DiscardPacketConn) WriteTo(p []byte, _ net.Addr) (int, error) { return len(p), nil } +func (_ DiscardPacketConn) Close() error { return nil } +func (_ DiscardPacketConn) LocalAddr() net.Addr { return emptyAddr{} } +func (_ DiscardPacketConn) SetDeadline(t time.Time) error { return nil } +func (_ DiscardPacketConn) SetReadDeadline(t time.Time) error { return nil } +func (_ DiscardPacketConn) SetWriteDeadline(t time.Time) error { return nil } + +// TranscriptPacketConn keeps a log of the []byte argument to every call to +// WriteTo. +type TranscriptPacketConn struct { + Transcript [][]byte + lock sync.Mutex + net.PacketConn +} + +func NewTranscriptPacketConn(inner net.PacketConn) *TranscriptPacketConn { + return &TranscriptPacketConn{ + PacketConn: inner, + } +} + +func (c *TranscriptPacketConn) WriteTo(p []byte, addr net.Addr) (int, error) { + c.lock.Lock() + defer c.lock.Unlock() + + p2 := make([]byte, len(p)) + copy(p2, p) + c.Transcript = append(c.Transcript, p2) + + return c.PacketConn.WriteTo(p, addr) +} + +func (c *TranscriptPacketConn) Length() int { + c.lock.Lock() + defer c.lock.Unlock() + + return len(c.Transcript) +} + +// Tests that QueuePacketConn.WriteTo is compatible with the way kcp-go uses +// PacketConn, allocating source buffers in a sync.Pool. +// +// https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/40260 +func TestQueuePacketConnWriteToKCP(t *testing.T) { + // Start a goroutine to constantly exercise kcp UDPSession.tx, writing + // packets with payload "XXXX". + done := make(chan struct{}, 0) + defer close(done) + ready := make(chan struct{}, 0) + go func() { + var readyClose sync.Once + defer readyClose.Do(func() { close(ready) }) + pconn := DiscardPacketConn{} + defer pconn.Close() + loop: + for { + select { + case <-done: + break loop + default: + } + // Create a new UDPSession, send once, then discard the + // UDPSession. + conn, err := kcp.NewConn2(intAddr(2), nil, 0, 0, pconn) + if err != nil { + panic(err) + } + _, err = conn.Write([]byte("XXXX")) + if err != nil { + panic(err) + } + conn.Close() + // Signal the main test to start once we have done one + // iterator of this noisy loop. + readyClose.Do(func() { close(ready) }) + } + }() + + pconn := NewQueuePacketConn(emptyAddr{}, 1*time.Hour, 500) + defer pconn.Close() + addr1 := intAddr(1) + outgoing := pconn.OutgoingQueue(addr1) + + // Once the "XXXX" goroutine is started, repeatedly send a packet, wait, + // then retrieve it and check whether it has changed since being sent. + <-ready + for i := 0; i < 10; i++ { + transcript := NewTranscriptPacketConn(pconn) + conn, err := kcp.NewConn2(addr1, nil, 0, 0, transcript) + if err != nil { + panic(err) + } + _, err = conn.Write([]byte("hello world")) + if err != nil { + panic(err) + } + + // A sleep after the Write makes buffer reuse more likely, + // and to allow the connection to flush before close + time.Sleep(500 * time.Millisecond) + + err = conn.Close() + if err != nil { + panic(err) + } + + if transcript.Length() == 0 { + panic("empty transcript") + } + + for j, tr := range transcript.Transcript { + p := <-outgoing + // This test is meant to detect unsynchronized memory + // changes, so freeze the slice we just read. + p2 := make([]byte, len(p)) + copy(p2, p) + if !bytes.Equal(p2, tr) { + t.Fatalf("%d %d packet changed between send and recv\nsend: %+q\nrecv: %+q", i, j, tr, p2) + } + } + } +} diff --git a/common/turbotunnel/redialpacketconn.go b/common/turbotunnel/redialpacketconn.go new file mode 100644 index 0000000..9b71440 --- /dev/null +++ b/common/turbotunnel/redialpacketconn.go @@ -0,0 +1,204 @@ +package turbotunnel + +import ( + "context" + "errors" + "net" + "sync" + "sync/atomic" + "time" +) + +// RedialPacketConn implements a long-lived net.PacketConn atop a sequence of +// other, transient net.PacketConns. RedialPacketConn creates a new +// net.PacketConn by calling a provided dialContext function. Whenever the +// net.PacketConn experiences a ReadFrom or WriteTo error, RedialPacketConn +// calls the dialContext function again and starts sending and receiving packets +// on the new net.PacketConn. RedialPacketConn's own ReadFrom and WriteTo +// methods return an error only when the dialContext function returns an error. +// +// RedialPacketConn uses static local and remote addresses that are independent +// of those of any dialed net.PacketConn. +type RedialPacketConn struct { + localAddr net.Addr + remoteAddr net.Addr + dialContext func(context.Context) (net.PacketConn, error) + recvQueue chan []byte + sendQueue chan []byte + closed chan struct{} + closeOnce sync.Once + // The first dial error, which causes the clientPacketConn to be + // closed and is returned from future read/write operations. Compare to + // the rerr and werr in io.Pipe. + err atomic.Value +} + +// NewRedialPacketConn makes a new RedialPacketConn, with the given static local +// and remote addresses, and dialContext function. +func NewRedialPacketConn( + localAddr, remoteAddr net.Addr, + dialContext func(context.Context) (net.PacketConn, error), +) *RedialPacketConn { + c := &RedialPacketConn{ + localAddr: localAddr, + remoteAddr: remoteAddr, + dialContext: dialContext, + recvQueue: make(chan []byte, queueSize), + sendQueue: make(chan []byte, queueSize), + closed: make(chan struct{}), + err: atomic.Value{}, + } + go c.dialLoop() + return c +} + +// dialLoop repeatedly calls c.dialContext and passes the resulting +// net.PacketConn to c.exchange. It returns only when c is closed or dialContext +// returns an error. +func (c *RedialPacketConn) dialLoop() { + ctx, cancel := context.WithCancel(context.Background()) + for { + select { + case <-c.closed: + cancel() + return + default: + } + conn, err := c.dialContext(ctx) + if err != nil { + c.closeWithError(err) + cancel() + return + } + c.exchange(conn) + conn.Close() + } +} + +// exchange calls ReadFrom on the given net.PacketConn and places the resulting +// packets in the receive queue, and takes packets from the send queue and calls +// WriteTo on them, making the current net.PacketConn active. +func (c *RedialPacketConn) exchange(conn net.PacketConn) { + readErrCh := make(chan error) + writeErrCh := make(chan error) + + go func() { + defer close(readErrCh) + for { + select { + case <-c.closed: + return + case <-writeErrCh: + return + default: + } + + var buf [1500]byte + n, _, err := conn.ReadFrom(buf[:]) + if err != nil { + readErrCh <- err + return + } + p := make([]byte, n) + copy(p, buf[:]) + select { + case c.recvQueue <- p: + default: // OK to drop packets. + } + } + }() + + go func() { + defer close(writeErrCh) + for { + select { + case <-c.closed: + return + case <-readErrCh: + return + case p := <-c.sendQueue: + _, err := conn.WriteTo(p, c.remoteAddr) + if err != nil { + writeErrCh <- err + return + } + } + } + }() + + select { + case <-readErrCh: + case <-writeErrCh: + } +} + +// ReadFrom reads a packet from the currently active net.PacketConn. The +// packet's original remote address is replaced with the RedialPacketConn's own +// remote address. +func (c *RedialPacketConn) ReadFrom(p []byte) (int, net.Addr, error) { + select { + case <-c.closed: + return 0, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: c.remoteAddr, Err: c.err.Load().(error)} + default: + } + select { + case <-c.closed: + return 0, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: c.remoteAddr, Err: c.err.Load().(error)} + case buf := <-c.recvQueue: + return copy(p, buf), c.remoteAddr, nil + } +} + +// WriteTo writes a packet to the currently active net.PacketConn. The addr +// argument is ignored and instead replaced with the RedialPacketConn's own +// remote address. +func (c *RedialPacketConn) WriteTo(p []byte, addr net.Addr) (int, error) { + // addr is ignored. + select { + case <-c.closed: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: c.remoteAddr, Err: c.err.Load().(error)} + default: + } + buf := make([]byte, len(p)) + copy(buf, p) + select { + case c.sendQueue <- buf: + return len(buf), nil + default: + // Drop the outgoing packet if the send queue is full. + return len(buf), nil + } +} + +// closeWithError unblocks pending operations and makes future operations fail +// with the given error. If err is nil, it becomes errClosedPacketConn. +func (c *RedialPacketConn) closeWithError(err error) error { + var once bool + c.closeOnce.Do(func() { + // Store the error to be returned by future read/write + // operations. + if err == nil { + err = errors.New("operation on closed connection") + } + c.err.Store(err) + close(c.closed) + once = true + }) + if !once { + return &net.OpError{Op: "close", Net: c.LocalAddr().Network(), Addr: c.LocalAddr(), Err: c.err.Load().(error)} + } + return nil +} + +// Close unblocks pending operations and makes future operations fail with a +// "closed connection" error. +func (c *RedialPacketConn) Close() error { + return c.closeWithError(nil) +} + +// LocalAddr returns the localAddr value that was passed to NewRedialPacketConn. +func (c *RedialPacketConn) LocalAddr() net.Addr { return c.localAddr } + +func (c *RedialPacketConn) SetDeadline(t time.Time) error { return errNotImplemented } +func (c *RedialPacketConn) SetReadDeadline(t time.Time) error { return errNotImplemented } +func (c *RedialPacketConn) SetWriteDeadline(t time.Time) error { return errNotImplemented } diff --git a/common/util/util.go b/common/util/util.go new file mode 100644 index 0000000..f66c69f --- /dev/null +++ b/common/util/util.go @@ -0,0 +1,173 @@ +package util + +import ( + "encoding/json" + "errors" + "log" + "net" + "net/http" + "slices" + "sort" + + "github.com/pion/ice/v4" + "github.com/pion/sdp/v3" + "github.com/pion/webrtc/v4" + "github.com/realclientip/realclientip-go" +) + +func SerializeSessionDescription(desc *webrtc.SessionDescription) (string, error) { + bytes, err := json.Marshal(*desc) + if err != nil { + return "", err + } + return string(bytes), nil +} + +func DeserializeSessionDescription(msg string) (*webrtc.SessionDescription, error) { + var parsed map[string]interface{} + err := json.Unmarshal([]byte(msg), &parsed) + if err != nil { + return nil, err + } + if _, ok := parsed["type"]; !ok { + return nil, errors.New("cannot deserialize SessionDescription without type field") + } + if _, ok := parsed["sdp"]; !ok { + return nil, errors.New("cannot deserialize SessionDescription without sdp field") + } + + var stype webrtc.SDPType + switch parsed["type"].(string) { + default: + return nil, errors.New("Unknown SDP type") + case "offer": + stype = webrtc.SDPTypeOffer + case "pranswer": + stype = webrtc.SDPTypePranswer + case "answer": + stype = webrtc.SDPTypeAnswer + case "rollback": + stype = webrtc.SDPTypeRollback + } + + return &webrtc.SessionDescription{ + Type: stype, + SDP: parsed["sdp"].(string), + }, nil +} + +func IsLocal(ip net.IP) bool { + if ip.IsPrivate() { + return true + } + // Dynamic Configuration as per https://tools.ietf.org/htm/rfc3927 + if ip.IsLinkLocalUnicast() { + return true + } + if ip4 := ip.To4(); ip4 != nil { + // Carrier-Grade NAT as per https://tools.ietf.org/htm/rfc6598 + if ip4[0] == 100 && ip4[1]&0xc0 == 64 { + return true + } + } + return false +} + +// Removes local LAN address ICE candidates +// +// This is unused after https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/merge_requests/442, +// but come in handy later for https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40322 +// Also this is exported, so let's not remove it at least until +// the next major release. +func StripLocalAddresses(str string) string { + var desc sdp.SessionDescription + err := desc.Unmarshal([]byte(str)) + if err != nil { + return str + } + for _, m := range desc.MediaDescriptions { + attrs := make([]sdp.Attribute, 0) + for _, a := range m.Attributes { + if a.IsICECandidate() { + c, err := ice.UnmarshalCandidate(a.Value) + if err == nil && c.Type() == ice.CandidateTypeHost { + ip := net.ParseIP(c.Address()) + if ip != nil && (IsLocal(ip) || ip.IsUnspecified() || ip.IsLoopback()) { + /* no append in this case */ + continue + } + } + } + attrs = append(attrs, a) + } + m.Attributes = attrs + } + bts, err := desc.Marshal() + if err != nil { + return str + } + return string(bts) +} + +// Attempts to retrieve the client IP of where the HTTP request originating. +// There is no standard way to do this since the original client IP can be included in a number of different headers, +// depending on the proxies and load balancers between the client and the server. We attempt to check as many of these +// headers as possible to determine a "best guess" of the client IP +// Using this as a reference: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Forwarded +func GetClientIp(req *http.Request) string { + // We check the "Fowarded" header first, followed by the "X-Forwarded-For" header, and then use the "RemoteAddr" as + // a last resort. We use the leftmost address since it is the closest one to the client. + strat := realclientip.NewChainStrategy( + realclientip.Must(realclientip.NewLeftmostNonPrivateStrategy("Forwarded")), + realclientip.Must(realclientip.NewLeftmostNonPrivateStrategy("X-Forwarded-For")), + realclientip.RemoteAddrStrategy{}, + ) + clientIp := strat.ClientIP(req.Header, req.RemoteAddr) + return clientIp +} + +// Returns a list of IP addresses of ICE candidates, roughly in descending order for accuracy for geolocation +func GetCandidateAddrs(sdpStr string) []net.IP { + var desc sdp.SessionDescription + err := desc.Unmarshal([]byte(sdpStr)) + if err != nil { + log.Printf("GetCandidateAddrs: failed to unmarshal SDP: %v\n", err) + return []net.IP{} + } + + iceCandidates := make([]ice.Candidate, 0) + + for _, m := range desc.MediaDescriptions { + for _, a := range m.Attributes { + if a.IsICECandidate() { + c, err := ice.UnmarshalCandidate(a.Value) + if err == nil { + iceCandidates = append(iceCandidates, c) + } + } + } + } + + // ICE candidates are first sorted in asecending order of priority, to match convention of providing a custom Less + // function to sort + sort.Slice(iceCandidates, func(i, j int) bool { + if iceCandidates[i].Type() != iceCandidates[j].Type() { + // Sort by candidate type first, in the order specified in https://datatracker.ietf.org/doc/html/rfc8445#section-5.1.2.2 + // Higher priority candidate types are more efficient, which likely means they are closer to the client + // itself, providing a more accurate result for geolocation + return ice.CandidateType(iceCandidates[i].Type().Preference()) < ice.CandidateType(iceCandidates[j].Type().Preference()) + } + // Break ties with the ICE candidate's priority property + return iceCandidates[i].Priority() < iceCandidates[j].Priority() + }) + slices.Reverse(iceCandidates) + + sortedIpAddr := make([]net.IP, 0) + for _, c := range iceCandidates { + ip := net.ParseIP(c.Address()) + if ip != nil { + sortedIpAddr = append(sortedIpAddr, ip) + } + } + return sortedIpAddr +} diff --git a/common/util/util_test.go b/common/util/util_test.go new file mode 100644 index 0000000..701a4d6 --- /dev/null +++ b/common/util/util_test.go @@ -0,0 +1,75 @@ +package util + +import ( + "net" + "net/http" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestUtil(t *testing.T) { + Convey("Strip", t, func() { + const offerStart = "v=0\r\no=- 4358805017720277108 2 IN IP4 8.8.8.8\r\ns=-\r\nt=0 0\r\na=group:BUNDLE data\r\na=msid-semantic: WMS\r\nm=application 56688 DTLS/SCTP 5000\r\nc=IN IP4 8.8.8.8\r\n" + const goodCandidate = "a=candidate:3769337065 1 udp 2122260223 8.8.8.8 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + const offerEnd = "a=ice-ufrag:aMAZ\r\na=ice-pwd:jcHb08Jjgrazp2dzjdrvPPvV\r\na=ice-options:trickle\r\na=fingerprint:sha-256 C8:88:EE:B9:E7:02:2E:21:37:ED:7A:D1:EB:2B:A3:15:A2:3B:5B:1C:3D:D4:D5:1F:06:CF:52:40:03:F8:DD:66\r\na=setup:actpass\r\na=mid:data\r\na=sctpmap:5000 webrtc-datachannel 1024\r\n" + + offer := offerStart + goodCandidate + + "a=candidate:3769337065 1 udp 2122260223 192.168.0.100 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLocal IPv4 + "a=candidate:3769337065 1 udp 2122260223 100.127.50.5 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLocal IPv4 + "a=candidate:3769337065 1 udp 2122260223 169.254.250.88 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLocal IPv4 + "a=candidate:3769337065 1 udp 2122260223 fdf8:f53b:82e4::53 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLocal IPv6 + "a=candidate:3769337065 1 udp 2122260223 0.0.0.0 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsUnspecified IPv4 + "a=candidate:3769337065 1 udp 2122260223 :: 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsUnspecified IPv6 + "a=candidate:3769337065 1 udp 2122260223 127.0.0.1 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLoopback IPv4 + "a=candidate:3769337065 1 udp 2122260223 ::1 56688 typ host generation 0 network-id 1 network-cost 50\r\n" + // IsLoopback IPv6 + offerEnd + + So(StripLocalAddresses(offer), ShouldEqual, offerStart+goodCandidate+offerEnd) + }) + + Convey("GetClientIp", t, func() { + // Should use Forwarded header + req1, _ := http.NewRequest("GET", "https://example.com", nil) + req1.Header.Add("X-Forwarded-For", "1.1.1.1, 2001:db8:cafe::99%eth0, 3.3.3.3, 192.168.1.1") + req1.Header.Add("Forwarded", `For=fe80::abcd;By=fe80::1234, Proto=https;For=::ffff:188.0.2.128, For="[2001:db8:cafe::17]:4848", For=fc00::1`) + req1.RemoteAddr = "192.168.1.2:8888" + So(GetClientIp(req1), ShouldEqual, "188.0.2.128") + + // Should use X-Forwarded-For header + req2, _ := http.NewRequest("GET", "https://example.com", nil) + req2.Header.Add("X-Forwarded-For", "1.1.1.1, 2001:db8:cafe::99%eth0, 3.3.3.3, 192.168.1.1") + req2.RemoteAddr = "192.168.1.2:8888" + So(GetClientIp(req2), ShouldEqual, "1.1.1.1") + + // Should use RemoteAddr + req3, _ := http.NewRequest("GET", "https://example.com", nil) + req3.RemoteAddr = "192.168.1.2:8888" + So(GetClientIp(req3), ShouldEqual, "192.168.1.2") + + // Should return empty client IP + req4, _ := http.NewRequest("GET", "https://example.com", nil) + So(GetClientIp(req4), ShouldEqual, "") + }) + + Convey("GetCandidateAddrs", t, func() { + // Should prioritize type in the following order: https://datatracker.ietf.org/doc/html/rfc8445#section-5.1.2.2 + // Break ties using priority value + const offerStart = "v=0\r\no=- 4358805017720277108 2 IN IP4 8.8.8.8\r\ns=-\r\nt=0 0\r\na=group:BUNDLE data\r\na=msid-semantic: WMS\r\nm=application 56688 DTLS/SCTP 5000\r\nc=IN IP4 8.8.8.8\r\n" + const offerEnd = "a=ice-ufrag:aMAZ\r\na=ice-pwd:jcHb08Jjgrazp2dzjdrvPPvV\r\na=ice-options:trickle\r\na=fingerprint:sha-256 C8:88:EE:B9:E7:02:2E:21:37:ED:7A:D1:EB:2B:A3:15:A2:3B:5B:1C:3D:D4:D5:1F:06:CF:52:40:03:F8:DD:66\r\na=setup:actpass\r\na=mid:data\r\na=sctpmap:5000 webrtc-datachannel 1024\r\n" + + const sdp = offerStart + "a=candidate:3769337065 1 udp 2122260223 8.8.8.8 56688 typ prflx\r\n" + + "a=candidate:3769337065 1 udp 2122260223 129.97.124.13 56688 typ relay\r\n" + + "a=candidate:3769337065 1 udp 2122260223 129.97.124.14 56688 typ srflx\r\n" + + "a=candidate:3769337065 1 udp 2122260223 129.97.124.15 56688 typ host\r\n" + + "a=candidate:3769337065 1 udp 2122260224 129.97.124.16 56688 typ host\r\n" + offerEnd + + So(GetCandidateAddrs(sdp), ShouldEqual, []net.IP{ + net.ParseIP("129.97.124.16"), + net.ParseIP("129.97.124.15"), + net.ParseIP("8.8.8.8"), + net.ParseIP("129.97.124.14"), + net.ParseIP("129.97.124.13"), + }) + }) +} diff --git a/common/version/combined.go b/common/version/combined.go new file mode 100644 index 0000000..9de74db --- /dev/null +++ b/common/version/combined.go @@ -0,0 +1,5 @@ +package version + +func ConstructResult() string { + return GetVersion() + "\n" + GetVersionDetail() +} diff --git a/common/version/detail.go b/common/version/detail.go new file mode 100644 index 0000000..6965630 --- /dev/null +++ b/common/version/detail.go @@ -0,0 +1,13 @@ +package version + +import "strings" + +var detailBuilder strings.Builder + +func AddVersionDetail(detail string) { + detailBuilder.WriteString(detail) +} + +func GetVersionDetail() string { + return detailBuilder.String() +} diff --git a/common/version/version.go b/common/version/version.go new file mode 100644 index 0000000..27d345d --- /dev/null +++ b/common/version/version.go @@ -0,0 +1,32 @@ +package version + +import ( + "fmt" + "runtime/debug" +) + +var version = func() string { + ver := "2.11.0" + if info, ok := debug.ReadBuildInfo(); ok { + var revision string + var modified string + for _, setting := range info.Settings { + switch setting.Key { + case "vcs.revision": + revision = setting.Value[:8] + case "vcs.modified": + if setting.Value == "true" { + modified = "*" + } + } + } + if revision != "" { + return fmt.Sprintf("%v (%v%v)", ver, revision, modified) + } + } + return ver +}() + +func GetVersion() string { + return version +} diff --git a/common/websocketconn/websocketconn.go b/common/websocketconn/websocketconn.go new file mode 100644 index 0000000..e5256df --- /dev/null +++ b/common/websocketconn/websocketconn.go @@ -0,0 +1,112 @@ +package websocketconn + +import ( + "io" + "time" + + "github.com/gorilla/websocket" +) + +// An abstraction that makes an underlying WebSocket connection look like a +// net.Conn. +type Conn struct { + *websocket.Conn + Reader io.Reader + Writer io.Writer +} + +func (conn *Conn) Read(b []byte) (n int, err error) { + return conn.Reader.Read(b) +} + +func (conn *Conn) Write(b []byte) (n int, err error) { + return conn.Writer.Write(b) +} + +func (conn *Conn) Close() error { + conn.Reader.(*io.PipeReader).Close() + conn.Writer.(*io.PipeWriter).Close() + // Ignore any error in trying to write a Close frame. + _ = conn.Conn.WriteControl(websocket.CloseMessage, []byte{}, time.Now().Add(time.Second)) + return conn.Conn.Close() +} + +func (conn *Conn) SetDeadline(t time.Time) error { + errRead := conn.Conn.SetReadDeadline(t) + errWrite := conn.Conn.SetWriteDeadline(t) + err := errRead + if err == nil { + err = errWrite + } + return err +} + +func readLoop(w io.Writer, ws *websocket.Conn) error { + var buf [2048]byte + for { + messageType, r, err := ws.NextReader() + if err != nil { + return err + } + if messageType != websocket.BinaryMessage && messageType != websocket.TextMessage { + continue + } + _, err = io.CopyBuffer(w, r, buf[:]) + if err != nil { + return err + } + } +} + +func writeLoop(ws *websocket.Conn, r io.Reader) error { + var buf [2048]byte + for { + n, err := r.Read(buf[:]) + if err != nil { + return err + } + err = ws.WriteMessage(websocket.BinaryMessage, buf[:n]) + if err != nil { + return err + } + } +} + +// websocket.Conn methods start returning websocket.CloseError after the +// connection has been closed. We want to instead interpret that as io.EOF, just +// as you would find with a normal net.Conn. This only converts +// websocket.CloseErrors with known codes; other codes like CloseProtocolError +// and CloseAbnormalClosure will still be reported as anomalous. +func closeErrorToEOF(err error) error { + if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseNoStatusReceived) { + err = io.EOF + } + return err +} + +// Create a new Conn. +func New(ws *websocket.Conn) *Conn { + // Set up synchronous pipes to serialize reads and writes to the + // underlying websocket.Conn. + // + // https://godoc.org/github.com/gorilla/websocket#hdr-Concurrency + // "Connections support one concurrent reader and one concurrent writer. + // Applications are responsible for ensuring that no more than one + // goroutine calls the write methods (WriteMessage, etc.) concurrently + // and that no more than one goroutine calls the read methods + // (NextReader, etc.) concurrently. The Close and WriteControl methods + // can be called concurrently with all other methods." + pr1, pw1 := io.Pipe() + go func() { + pw1.CloseWithError(closeErrorToEOF(readLoop(pw1, ws))) + }() + pr2, pw2 := io.Pipe() + go func() { + pr2.CloseWithError(closeErrorToEOF(writeLoop(ws, pr2))) + }() + return &Conn{ + Conn: ws, + Reader: pr1, + Writer: pw2, + } +} diff --git a/common/websocketconn/websocketconn_test.go b/common/websocketconn/websocketconn_test.go new file mode 100644 index 0000000..e3191f3 --- /dev/null +++ b/common/websocketconn/websocketconn_test.go @@ -0,0 +1,372 @@ +package websocketconn + +import ( + "bytes" + "fmt" + "io" + "net" + "net/http" + "net/url" + "sync" + "testing" + "time" + + "github.com/gorilla/websocket" +) + +// Returns a (server, client) pair of websocketconn.Conns. +func connPair() (*Conn, *Conn, error) { + // Will be assigned inside server.Handler. + var serverConn *Conn + + // Start up a web server to receive the request. + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, nil, err + } + defer ln.Close() + errCh := make(chan error) + server := http.Server{ + Handler: http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + upgrader := websocket.Upgrader{ + CheckOrigin: func(*http.Request) bool { return true }, + } + ws, err := upgrader.Upgrade(rw, req, nil) + if err != nil { + errCh <- err + return + } + serverConn = New(ws) + close(errCh) + }), + } + defer server.Close() + go func() { + err := server.Serve(ln) + if err != nil && err != http.ErrServerClosed { + errCh <- err + } + }() + + // Make a request to the web server. + urlStr := (&url.URL{Scheme: "ws", Host: ln.Addr().String()}).String() + ws, _, err := (&websocket.Dialer{}).Dial(urlStr, nil) + if err != nil { + return nil, nil, err + } + clientConn := New(ws) + + // The server is finished when errCh is written to or closed. + err = <-errCh + if err != nil { + return nil, nil, err + } + return serverConn, clientConn, nil +} + +// Test that you can write in chunks and read the result concatenated. +func TestWrite(t *testing.T) { + tests := [][][]byte{ + {}, + {[]byte("foo")}, + {[]byte("foo"), []byte("bar")}, + {{}, []byte("foo"), {}, {}, []byte("bar")}, + } + + for _, test := range tests { + s, c, err := connPair() + if err != nil { + t.Fatal(err) + } + + // This is a little awkward because we need to read to and write + // from both ends of the Conn, and we need to do it in separate + // goroutines because otherwise a Write may block waiting for + // someone to Read it. Here we set up a loop in a separate + // goroutine, reading from the Conn s and writing to the dataCh + // and errCh channels, whose ultimate effect in the select loop + // below is like + // data, err := io.ReadAll(s) + dataCh := make(chan []byte) + errCh := make(chan error) + go func() { + for { + var buf [1024]byte + n, err := s.Read(buf[:]) + if err != nil { + errCh <- err + return + } + p := make([]byte, n) + copy(p, buf[:]) + dataCh <- p + } + }() + + // Write the data to the client side of the Conn, one chunk at a + // time. + for i, chunk := range test { + n, err := c.Write(chunk) + if err != nil || n != len(chunk) { + t.Fatalf("%+q Write chunk %d: got (%d, %v), expected (%d, %v)", + test, i, n, err, len(chunk), nil) + } + } + // We cannot immediately c.Close here, because that closes the + // connection right away, without waiting for buffered data to + // be sent. + + // Pull data and err from the server goroutine above. + var data []byte + err = nil + loop: + for { + select { + case p := <-dataCh: + data = append(data, p...) + case err = <-errCh: + break loop + case <-time.After(100 * time.Millisecond): + break loop + } + } + s.Close() + c.Close() + + // Now data and err contain the result of reading everything + // from s. + expected := bytes.Join(test, []byte{}) + if err != nil || !bytes.Equal(data, expected) { + t.Fatalf("%+q ReadAll: got (%+q, %v), expected (%+q, %v)", + test, data, err, expected, nil) + } + } +} + +// Test that multiple goroutines may call Read on a Conn simultaneously. Run +// this with +// +// go test -race +func TestConcurrentRead(t *testing.T) { + s, c, err := connPair() + if err != nil { + t.Fatal(err) + } + defer s.Close() + + // Set up multiple threads reading from the same conn. + errCh := make(chan error, 2) + var wg sync.WaitGroup + wg.Add(2) + for i := 0; i < 2; i++ { + go func() { + defer wg.Done() + _, err := io.Copy(io.Discard, s) + if err != nil { + errCh <- err + } + }() + } + + // Write a bunch of data to the other end. + for i := 0; i < 2000; i++ { + _, err := fmt.Fprintf(c, "%d", i) + if err != nil { + c.Close() + t.Fatalf("Write: %v", err) + } + } + c.Close() + + wg.Wait() + close(errCh) + + err = <-errCh + if err != nil { + t.Fatalf("Read: %v", err) + } +} + +// Test that multiple goroutines may call Write on a Conn simultaneously. Run +// this with +// +// go test -race +func TestConcurrentWrite(t *testing.T) { + s, c, err := connPair() + if err != nil { + t.Fatal(err) + } + + // Set up multiple threads writing to the same conn. + errCh := make(chan error, 3) + var wg sync.WaitGroup + wg.Add(2) + for i := 0; i < 2; i++ { + go func() { + defer wg.Done() + for j := 0; j < 1000; j++ { + _, err := fmt.Fprintf(s, "%d", j) + if err != nil { + errCh <- err + break + } + } + }() + } + go func() { + wg.Wait() + err := s.Close() + if err != nil { + errCh <- err + } + close(errCh) + }() + + // Read from the other end. + _, err = io.Copy(io.Discard, c) + c.Close() + if err != nil { + t.Fatalf("Read: %v", err) + } + + err = <-errCh + if err != nil { + t.Fatalf("Write: %v", err) + } +} + +// Test that Read and Write methods return errors after Close. +func TestClose(t *testing.T) { + s, c, err := connPair() + if err != nil { + t.Fatal(err) + } + defer c.Close() + + err = s.Close() + if err != nil { + t.Fatal(err) + } + + var buf [10]byte + n, err := s.Read(buf[:]) + if n != 0 || err == nil { + t.Fatalf("Read after Close returned (%v, %v), expected (%v, non-nil)", n, err, 0) + } + + _, err = s.Write([]byte{1, 2, 3}) + // Here we break the abstraction a little and look for a specific error, + // io.ErrClosedPipe. This is because we know the Conn uses an io.Pipe + // internally. + if err != io.ErrClosedPipe { + t.Fatalf("Write after Close returned %v, expected %v", err, io.ErrClosedPipe) + } +} + +// Benchmark creating a server websocket.Conn (without the websocketconn.Conn +// wrapper) for different read/write buffer sizes. +func BenchmarkUpgradeBufferSize(b *testing.B) { + // Buffer size of 0 would mean the default of 4096: + // https://github.com/gorilla/websocket/blob/v1.5.0/conn.go#L37 + // But a size of zero also has the effect of causing reuse of the HTTP + // server's buffers. So we test 4096 separately from 0. + // https://github.com/gorilla/websocket/blob/v1.5.0/server.go#L32 + for _, bufSize := range []int{0, 128, 1024, 2048, 4096, 8192} { + upgrader := websocket.Upgrader{ + CheckOrigin: func(*http.Request) bool { return true }, + ReadBufferSize: bufSize, + WriteBufferSize: bufSize, + } + b.Run(fmt.Sprintf("%d", bufSize), func(b *testing.B) { + // Start up a web server to receive the request. + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + b.Fatal(err) + } + defer ln.Close() + wsCh := make(chan *websocket.Conn) + errCh := make(chan error) + server := http.Server{ + Handler: http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + ws, err := upgrader.Upgrade(rw, req, nil) + if err != nil { + errCh <- err + return + } + wsCh <- ws + }), + } + defer server.Close() + go func() { + err := server.Serve(ln) + if err != nil && err != http.ErrServerClosed { + errCh <- err + } + }() + + // Make a request to the web server. + dialer := &websocket.Dialer{ + ReadBufferSize: bufSize, + WriteBufferSize: bufSize, + } + urlStr := (&url.URL{Scheme: "ws", Host: ln.Addr().String()}).String() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ws, _, err := dialer.Dial(urlStr, nil) + if err != nil { + b.Fatal(err) + } + ws.Close() + + select { + case <-wsCh: + case err := <-errCh: + b.Fatal(err) + } + } + b.StopTimer() + }) + } +} + +// Benchmark read/write in the client←server and server←client directions, with +// messages of different sizes. Run with -benchmem to see memory allocations. +func BenchmarkReadWrite(b *testing.B) { + trial := func(b *testing.B, readConn, writeConn *Conn, msgSize int) { + go func() { + io.Copy(io.Discard, readConn) + }() + data := make([]byte, msgSize) + b.ResetTimer() + for i := 0; i < b.N; i++ { + n, err := writeConn.Write(data[:]) + b.SetBytes(int64(n)) + if err != nil { + b.Fatal(err) + } + } + } + for _, msgSize := range []int{150, 3000} { + s, c, err := connPair() + if err != nil { + b.Fatal(err) + } + + b.Run(fmt.Sprintf("c←s %d", msgSize), func(b *testing.B) { + trial(b, c, s, msgSize) + }) + b.Run(fmt.Sprintf("s←c %d", msgSize), func(b *testing.B) { + trial(b, s, c, msgSize) + }) + + err = s.Close() + if err != nil { + b.Fatal(err) + } + err = c.Close() + if err != nil { + b.Fatal(err) + } + } +} diff --git a/doc/broker-spec.txt b/doc/broker-spec.txt new file mode 100644 index 0000000..b502cb4 --- /dev/null +++ b/doc/broker-spec.txt @@ -0,0 +1,334 @@ + + + Snowflake broker protocol + +0. Scope and Preliminaries + +The Snowflake broker is used to hand out Snowflake proxies to clients using the Snowflake pluggable transport. There are some similarities to the function of the broker and how BridgeDB hands out Tor bridges. + +This document specifies how the Snowflake broker interacts with other parts of the Tor ecosystem, starting with the metrics CollecTor module and to be expanded upon later. + +1. Metrics Reporting (version 1.1) + +Metrics data from the Snowflake broker can be retrieved by sending an HTTP GET request to https://[Snowflake broker URL]/metrics and consists of the following items: + + "snowflake-stats-end" YYYY-MM-DD HH:MM:SS (NSEC s) NL + [At start, exactly once.] + + YYYY-MM-DD HH:MM:SS defines the end of the included measurement + interval of length NSEC seconds (86400 seconds by default). + + "snowflake-ips" [CC=NUM,CC=NUM,...,CC=NUM] NL + [At most once.] + + List of mappings from two-letter country codes to the number of + unique IP addresses of Snowflake proxies that have polled. Each + country code only appears once. + + "snowflake-ips-total" NUM NL + [At most once.] + + A count of the total number of unique IP addresses of Snowflake + proxies that have polled. + + "snowflake-ips-standalone" NUM NL + [At most once.] + + A count of the total number of unique IP addresses of snowflake + proxies of type "standalone" that have polled. + + "snowflake-ips-badge" NUM NL + [At most once.] + + A count of the total number of unique IP addresses of snowflake + proxies of type "badge" that have polled. + + "snowflake-ips-webext" NUM NL + [At most once.] + + A count of the total number of unique IP addresses of snowflake + proxies of type "webext" that have polled. + + "snowflake-idle-count" NUM NL + [At most once.] + + A count of the number of times a proxy has polled but received + no client offer, rounded up to the nearest multiple of 8. + + "client-denied-count" NUM NL + [At most once.] + + A count of the number of times a client has requested a proxy + from the broker but no proxies were available, rounded up to + the nearest multiple of 8. + + "client-restricted-denied-count" NUM NL + [At most once.] + + A count of the number of times a client with a restricted or + unknown NAT type has requested a proxy from the broker but no + proxies were available, rounded up to the nearest multiple of 8. + + "client-unrestricted-denied-count" NUM NL + [At most once.] + + A count of the number of times a client with an unrestricted NAT + type has requested a proxy from the broker but no proxies were + available, rounded up to the nearest multiple of 8. + + "client-snowflake-match-count" NUM NL + [At most once.] + + A count of the number of times a client successfully received a + proxy from the broker, rounded up to the nearest multiple of 8. + + "client-snowflake-timeout-count" NUM NL + [At most once.] + + A count of the number of times a client was matched with a proxy + but timed out before receiving the proxy's WebRTC answer, + rounded up to the nearest multiple of 8. + + "client-http-count" NUM NL + [At most once.] + + A count of the number of times a client has requested a proxy using + the HTTP rendezvous method from the broker, rounded up to the nearest + multiple of 8. + + "client-http-ips" [CC=NUM,CC=NUM,...,CC=NUM] NL + [At most once.] + + List of mappings from two-letter country codes to the number of + times a client has requested a proxy using the HTTP rendezvous method, + rounded up to the nearest multiple of 8. Each country code only appears + once. + + Note that this descriptor field name is misleading. We use IP addresses + to partition by country, but this metric counts polls, not unique IPs. + + "client-ampcache-count" NUM NL + [At most once.] + + A count of the number of times a client has requested a proxy using + the ampcache rendezvous method from the broker, rounded up to the + nearest multiple of 8. + + "client-ampcache-ips" [CC=NUM,CC=NUM,...,CC=NUM] NL + [At most once.] + + List of mappings from two-letter country codes to the number of + times a client has requested a proxy using the ampcache rendezvous + method, rounded up to the nearest multiple of 8. Each country code only + appears once. + + Note that this descriptor field name is misleading. We use IP addresses + to partition by country, but this metric counts polls, not unique IPs. + + "client-sqs-count" NUM NL + [At most once.] + + A count of the number of times a client has requested a proxy using + the sqs rendezvous method from the broker, rounded up to the nearest + multiple of 8. + + "client-sqs-ips" [CC=NUM,CC=NUM,...,CC=NUM] NL + [At most once.] + + List of mappings from two-letter country codes to the number of + times a client has requested a proxy using the sqs rendezvous method, + rounded up to the nearest multiple of 8. Each country code only appears + once. + + Note that this descriptor field name is misleading. We use IP addresses + to partition by country, but this metric counts polls, not unique IPs. + + "snowflake-ips-nat-restricted" NUM NL + [At most once.] + + A count of the total number of unique IP addresses of snowflake + proxies that have a restricted NAT type. + + "snowflake-ips-nat-unrestricted" NUM NL + [At most once.] + + A count of the total number of unique IP addresses of snowflake + proxies that have an unrestricted NAT type. + + "snowflake-ips-nat-unknown" NUM NL + [At most once.] + + A count of the total number of unique IP addresses of snowflake + proxies that have an unknown NAT type. + + "snowflake-proxy-poll-with-relay-url-count" NUM NL + [At most once.] + + A count of snowflake proxy polls with relay url extension present. + This means this proxy understands relay url, and is sending its + allowed prefix. + "snowflake-proxy-poll-without-relay-url-count" NUM NL + [At most once.] + + A count of snowflake proxy polls with relay url extension absent. + This means this proxy is not yet updated. + "snowflake-proxy-rejected-for-relay-url-count" NUM NL + [At most once.] + + A count of snowflake proxy polls with relay url extension rejected + based on broker's relay url extension policy. + This means an incompatible allowed relay pattern is included in the + proxy poll message. +2. Broker messaging specification and endpoints + +The broker facilitates the connection of snowflake clients and snowflake proxies +through the exchange of WebRTC SDP information with its endpoints. + +2.1. Client interactions with the broker + +The broker offers multiple ways for clients to exchange registration +messages. + +2.1.1. HTTPS POST + +Clients interact with the broker by making a POST request to `/client` with the +offer SDP in the request body: +``` +POST /client HTTP + +[offer SDP] +``` +If the broker is behind a domain-fronted connection, this request is accompanied +with the necessary HOST information. + +If the client is matched up with a proxy, they receive a 200 OK response with +the proxy's answer SDP in the request body: +``` +HTTP 200 OK + +[answer SDP] +``` + +If no proxies were available, they receive a 503 status code: +``` +HTTP 503 Service Unavailable +``` + +2.1.2. AMP + +The broker's /amp/client endpoint receives client poll messages encoded +into the URL path, and sends client poll responses encoded as HTML that +conforms to the requirements of AMP (Accelerated Mobile Pages). This +endpoint is intended to be accessed through an AMP cache, using the +-ampcache option of snowflake-client. + +The client encodes its poll message into a GET request as follows: +``` +GET /amp/client/0[0 or more bytes]/[base64 of client poll message] +``` +The components of the path are as follows: +* "/amp/client/", the root of the endpoint. +* "0", a format version number, which controls the interpretation of the + rest of the path. Only the first byte matters as a version indicator + (not the whole first path component). +* Any number of slash or non-slash bytes. These may be used as padding + or to prevent cache collisions in the AMP cache. +* A final slash. +* base64 encoding of the client poll message, using the URL-safe + alphabet (which does not include slash). + +The broker returns a client poll response message in the HTTP response. +The message is encoded using AMP armor, an AMP-compatible HTML encoding. +The data stream is notionally a "0" byte (a format version indicator) +followed by the base64 encoding of the message (using the standard +alphabet, with "=" padding). This stream is broken into +whitespace-separated chunks, which are then bundled into HTML
+elements. The 
 elements are then surrounded by AMP boilerplate. To
+decode, search the HTML for 
 elements, concatenate their contents
+and join on whitespace, discard the "0" prefix, and base64 decode.
+
+2.2 Proxy interactions with the broker
+
+Proxies poll the broker with a proxy poll request to `/proxy`:
+
+```
+POST /proxy HTTP
+
+{
+  Sid: [generated session id of proxy],
+  Version: 1.3,
+  Type: ["badge"|"webext"|"standalone"|"mobile"],
+  NAT: ["unknown"|"restricted"|"unrestricted"],
+  Clients: [number of current clients, rounded down to multiples of 8],
+  AcceptedRelayPattern: [a pattern representing accepted set of relay domains]
+}
+```
+
+If the request is well-formed, they receive a 200 OK response.
+
+If a client is matched:
+```
+HTTP 200 OK
+
+{
+  Status: "client match",
+  {
+    type: offer,
+    sdp: [WebRTC SDP]
+  },
+  RelayURL: [the WebSocket URL proxy should connect to relay Snowflake traffic]
+}
+```
+
+If a client is not matched:
+```
+HTTP 200 OK
+
+{
+    Status: "no match"
+}
+```
+
+If the request is malformed:
+```
+HTTP 400 BadRequest
+```
+
+If they are matched with a client, they provide their SDP answer with a POST
+request to `/answer`:
+```
+POST /answer HTTP
+
+{
+  Sid: [generated session id of proxy],
+  Version: 1.3,
+  Answer:
+  {
+    type: answer,
+    sdp: [WebRTC SDP]
+  }
+}
+```
+
+If the request is well-formed, they receive a 200 OK response.
+
+If the client retrieved the answer:
+```
+HTTP 200 OK
+
+{
+  Status: "success"
+}
+```
+
+If the client left:
+```
+HTTP 200 OK
+
+{
+  Status: "client gone"
+}
+
+3) If the request is malformed:
+HTTP 400 BadRequest
+```
diff --git a/doc/rendezvous-with-sqs.md b/doc/rendezvous-with-sqs.md
new file mode 100644
index 0000000..4da07fb
--- /dev/null
+++ b/doc/rendezvous-with-sqs.md
@@ -0,0 +1,44 @@
+# Rendezvous with Amazon SQS
+This is a new experimental rendezvous method (in addition to the existing HTTPs and AMP cache methods).
+It leverages the Amazon SQS Queue service for a client to communicate with the broker server.
+
+## Broker
+To run the broker with this rendezvous method, use the following CLI flags (they are both required):
+- `broker-sqs-name` - name of the broker SQS queue to listen for incoming messages
+- `broker-sqs-region` - name of AWS region of the SQS queue
+
+These two parameters determine the SQS queue URL that the client needs to be run with as a CLI flag in order to communicate with the broker. For example, the following values can be used:
+
+`-broker-sqs-name snowflake-broker -broker-sqs-region us-east-1`
+
+The machine on which the broker is being run must be equiped with the correct AWS configs and credentials that would allow the broker program to create, read from, and write to the SQS queue. These are typically stored at `~/.aws/config` and `~/.aws/credentials`. However, enviornment variables may also be used as described in the [AWS Docs](https://docs.aws.amazon.com/sdkref/latest/guide/creds-config-files.html)
+
+## Client
+To run the client with this rendezvous method, use the following CLI flags (they are all required):
+- `sqsqueue` - URL of the SQS queue to use as a proxy for signalling
+- `sqscreds` - Encoded credentials for accessing the SQS queue
+
+`sqsqueue` should correspond to the URL of the SQS queue that the broker is listening on. 
+For the example above, the following value can be used:
+
+`-sqsqueue https://sqs.us-east-1.amazonaws.com/893902434899/snowflake-broker -sqscreds some-encoded-sqs-creds`
+
+*Public access to SQS queues is not allowed, so there needs to be some form of authentication to be able to access the queue. Limited permission credentials will be provided by the Snowflake team to access the corresponding SQS queue.*
+
+## Implementation Details
+```
+╭――――――――――――――――――╮     ╭――――――――――――――――――╮     ╭――――――――――――――――――╮     ╭―――――――――――――――――-―╮
+│      Client      │ <=> │    Amazon SQS    │ <=> │      Broker      │ <=> │  Snowflake Proxy  │
+╰――――――――――――――――――╯     ╰――――――――――――――――――╯     ╰――――――――――――――――――╯     ╰――――――――――――――――――-╯
+```
+
+1. On startup, the **broker** ensures that an SQS queue with the name of the `broker-sqs-name` parameter exists. It will create such a queue if it doesn’t exist. Afterwards, it will enter a loop of continuously:
+    - polling for new messages
+    - cleaning up client queues
+2. **Client** sends SDP Offer to the SQS queue at the URL provided by the `sqsqueue` parameter using a message with a unique ID (clientID) corresponding to the client along with the contents of the SDP Offer. The client will randomly generate a new ClientID to use each rendezvous attempt.
+3. The **broker** will receive this message during its polling and process it.
+    -  A client SQS queue with the name `"snowflake-client" + clientID` will be created for the broker to send messages to the client. This is needed because if a queue shared between all clients was used for outgoing messages from the server, then clients would have to pick off the top message, check if it is addressed to them, and then process the message if it is. This means clients would possibly have to check many messages before they find the one addressed to them.
+    - When the broker has a response for the client, it will send a message to the client queue with the details of the SDP answer.
+    - The SDP offer message from the client is then deleted from the broker queue.
+4. The **client** will continuously poll its client queue and eventually receive the message with the SDP answer from the broker.
+5. The broker server will periodically clean up the unique SQS queues it has created for each client once the queues are no longer needed (it will delete queues that were last modified before a certain amount of time ago)
\ No newline at end of file
diff --git a/doc/snowflake-client.1 b/doc/snowflake-client.1
new file mode 100644
index 0000000..122ada0
--- /dev/null
+++ b/doc/snowflake-client.1
@@ -0,0 +1,50 @@
+.TH SNOWFLAKE-CLIENT "1" "July 2021" "snowflake-client" "User Commands"
+.SH NAME
+snowflake-client \- WebRTC pluggable transport client for Tor
+.SH DESCRIPTION
+Snowflake helps users circumvent censorship by making a WebRTC
+connection to volunteer proxies. These proxies relay Tor traffic to a
+Snowflake bridge and then through the Tor network.
+.SS "Usage of snowflake-client:"
+.HP
+\fB\-ampcache\fR string
+.IP
+URL of AMP cache to use as a proxy for signaling
+.HP
+\fB\-front\fR string
+.IP
+front domain
+.HP
+\fB\-ice\fR string
+.IP
+comma\-separated list of ICE servers
+.HP
+\fB\-keep\-local\-addresses\fR
+.IP
+keep local LAN address ICE candidates
+.HP
+\fB\-log\fR string
+.IP
+name of log file
+.HP
+\fB\-log\-to\-state\-dir\fR
+.IP
+resolve the log file relative to tor's pt state dir
+.HP
+\fB\-logToStateDir\fR
+.IP
+use \fB\-log\-to\-state\-dir\fR instead
+.HP
+\fB\-max\fR int
+.IP
+capacity for number of multiplexed WebRTC peers (default 1)
+.HP
+\fB\-unsafe\-logging\fR
+.IP
+prevent logs from being scrubbed
+.HP
+\fB\-url\fR string
+.IP
+URL of signaling broker
+.SH "SEE ALSO"
+https://snowflake.torproject.org
diff --git a/doc/snowflake-proxy.1 b/doc/snowflake-proxy.1
new file mode 100644
index 0000000..8a35d36
--- /dev/null
+++ b/doc/snowflake-proxy.1
@@ -0,0 +1,38 @@
+.TH SNOWFLAKE-PROXY "1" "June 2021" "swnoflake-proxy" "User Commands"
+.SH NAME
+snowflake-proxy \- WebRTC pluggable transport proxy for Tor
+.SH DESCRIPTION
+Snowflake helps users circumvent censorship by making a WebRTC
+connection to volunteer proxies. These proxies relay Tor traffic to a
+Snowflake bridge and then through the Tor network.
+.SS "Usage of snowflake-proxy:"
+.HP
+\fB\-broker\fR string
+.IP
+broker URL (default "https://snowflake\-broker.torproject.net/")
+.HP
+\fB\-capacity\fR uint
+.IP
+maximum concurrent clients (default 10)
+.HP
+\fB\-keep\-local\-addresses\fR
+.IP
+keep local LAN address ICE candidates
+.HP
+\fB\-log\fR string
+.IP
+log filename
+.HP
+\fB\-relay\fR string
+.IP
+websocket relay URL (default "wss://snowflake.torproject.net/")
+.HP
+\fB\-stun\fR string
+.IP
+stun URL (default "stun:stun.l.google.com:19302")
+.HP
+\fB\-unsafe\-logging\fR
+.IP
+prevent logs from being scrubbed
+.SH "SEE ALSO"
+https://snowflake.torproject.org
diff --git a/doc/using-the-snowflake-library.md b/doc/using-the-snowflake-library.md
new file mode 100644
index 0000000..03e85c2
--- /dev/null
+++ b/doc/using-the-snowflake-library.md
@@ -0,0 +1,165 @@
+Snowflake is available as a general-purpose pluggable transports library and adheres to the [pluggable transports v2.1 Go API](https://github.com/Pluggable-Transports/Pluggable-Transports-spec/blob/master/releases/PTSpecV2.1/Pluggable%20Transport%20Specification%20v2.1%20-%20Go%20Transport%20API.pdf).
+
+### Client library
+
+The Snowflake client library contains functions for running a Snowflake client.
+
+Example usage:
+
+```Golang
+package main
+
+import (
+    "log"
+
+    sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/client/lib"
+)
+
+func main() {
+
+    config := sf.ClientConfig{
+        BrokerURL:   "https://snowflake-broker.example.com",
+        FrontDomain: "https://friendlyfrontdomain.net",
+        ICEAddresses: []string{
+            "stun:stun.voip.blackberry.com:3478",
+            },
+        Max: 1,
+    }
+    transport, err := sf.NewSnowflakeClient(config)
+    if err != nil {
+        log.Fatal("Failed to start snowflake transport: ", err)
+    }
+
+    // transport implements the ClientFactory interface and returns a net.Conn
+    conn, err := transport.Dial()
+    if err != nil {
+        log.Printf("dial error: %s", err)
+        return
+    }
+    defer conn.Close()
+
+    // ...
+
+}
+```
+
+#### Using your own rendezvous method
+
+You can define and use your own rendezvous method to communicate with a Snowflake broker by implementing the `RendezvousMethod` interface.
+
+```Golang
+
+package main
+
+import (
+    "log"
+
+    sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/client/lib"
+)
+
+type StubMethod struct {
+}
+
+func (m *StubMethod) Exchange(pollReq []byte) ([]byte, error) {
+    var brokerResponse []byte
+    var err error
+
+    //Implement the logic you need to communicate with the Snowflake broker here
+
+    return brokerResponse, err
+}
+
+func main() {
+    config := sf.ClientConfig{
+        ICEAddresses:       []string{
+            "stun:stun.voip.blackberry.com:3478",
+            },
+    }
+    transport, err := sf.NewSnowflakeClient(config)
+    if err != nil {
+        log.Fatal("Failed to start snowflake transport: ", err)
+    }
+
+    // custom rendezvous methods can be set with `SetRendezvousMethod`
+    rendezvous := &StubMethod{}
+    transport.SetRendezvousMethod(rendezvous)
+
+    // transport implements the ClientFactory interface and returns a net.Conn
+    conn, err := transport.Dial()
+    if err != nil {
+        log.Printf("dial error: %s", err)
+        return
+    }
+    defer conn.Close()
+
+    // ...
+
+}
+```
+
+### Server library
+
+The Snowflake server library contains functions for running a Snowflake server.
+
+Example usage:
+```Golang
+
+package main
+
+import (
+    "log"
+    "net"
+
+    sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/server/lib"
+    "golang.org/x/crypto/acme/autocert"
+)
+
+func main() {
+
+    // The snowflake server runs a websocket server. To run this securely, you will
+    // need a valid certificate.
+    certManager := &autocert.Manager{
+        Prompt:     autocert.AcceptTOS,
+        HostPolicy: autocert.HostWhitelist("snowflake.yourdomain.com"),
+        Email:      "you@yourdomain.com",
+    }
+
+    transport := sf.NewSnowflakeServer(certManager.GetCertificate)
+
+    addr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:443")
+    if err != nil {
+        log.Printf("error resolving bind address: %s", err.Error())
+    }
+    numKCPInstances := 1
+    ln, err := transport.Listen(addr, numKCPInstances)
+    if err != nil {
+        log.Printf("error opening listener: %s", err.Error())
+    }
+    for {
+        conn, err := ln.Accept()
+        if err != nil {
+            if err, ok := err.(net.Error); ok && err.Temporary() {
+                continue
+            }
+            log.Printf("Snowflake accept error: %s", err)
+            break
+        }
+        go func() {
+            // ...
+
+            defer conn.Close()
+        }()
+    }
+
+    // ...
+
+}
+
+```
+### Running your own Snowflake infrastructure
+
+At the moment we do not have the ability to share Snowfake infrastructure between different types of applications. If you are planning on using Snowflake as a transport for your application, you will need to:
+
+- Run a Snowflake broker. See our [broker documentation](../broker/) and [installation guide](https://gitlab.torproject.org/tpo/anti-censorship/team/-/wikis/Survival-Guides/Snowflake-Broker-Installation-Guide) for more information
+
+- Run Snowflake proxies. These can be run as [standalone Go proxies](../proxy/) or [browser-based proxies](https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake-webext).
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..e8d8724
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,15 @@
+services:
+    snowflake-proxy:
+        network_mode: host
+        image: containers.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake:latest
+        container_name: snowflake-proxy
+        restart: unless-stopped
+        # For a full list of Snowflake Proxy CLI parameters see
+        # https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/tree/main/proxy?ref_type=heads#running-a-standalone-snowflake-proxy
+        #command: [ "-ephemeral-ports-range", "30000:60000" ]
+    watchtower:
+        image: containrrr/watchtower
+        container_name: watchtower
+        volumes:
+          - /var/run/docker.sock:/var/run/docker.sock
+        command: snowflake-proxy
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..6fbbf38
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,86 @@
+module gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2
+
+go 1.23.0
+
+require (
+	github.com/aws/aws-sdk-go-v2 v1.39.0
+	github.com/aws/aws-sdk-go-v2/config v1.31.8
+	github.com/aws/aws-sdk-go-v2/credentials v1.18.12
+	github.com/aws/aws-sdk-go-v2/service/sqs v1.42.5
+	github.com/golang/mock v1.6.0
+	github.com/gorilla/websocket v1.5.3
+	github.com/miekg/dns v1.1.65
+	github.com/pion/ice/v4 v4.0.10
+	github.com/pion/sdp/v3 v3.0.16
+	github.com/pion/stun/v3 v3.0.0
+	github.com/pion/transport/v3 v3.0.7
+	github.com/pion/webrtc/v4 v4.1.4
+	github.com/prometheus/client_golang v1.22.0
+	github.com/realclientip/realclientip-go v1.0.0
+	github.com/refraction-networking/utls v1.6.7
+	github.com/smartystreets/goconvey v1.8.1
+	github.com/stretchr/testify v1.11.1
+	github.com/txthinking/socks5 v0.0.0-20230325130024-4230056ae301
+	github.com/xtaci/kcp-go/v5 v5.6.24
+	github.com/xtaci/smux v1.5.35
+	gitlab.torproject.org/tpo/anti-censorship/geoip v0.0.0-20210928150955-7ce4b3d98d01
+	gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib v1.6.0
+	gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil v0.0.0-20250815012447-418f76dcf315
+	golang.org/x/crypto v0.41.0
+	golang.org/x/net v0.42.0
+	golang.org/x/sys v0.35.0
+)
+
+require (
+	github.com/andybalholm/brotli v1.0.6 // indirect
+	github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 // indirect
+	github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 // indirect
+	github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 // indirect
+	github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
+	github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
+	github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 // indirect
+	github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 // indirect
+	github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4 // indirect
+	github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 // indirect
+	github.com/aws/smithy-go v1.23.0 // indirect
+	github.com/beorn7/perks v1.0.1 // indirect
+	github.com/cespare/xxhash/v2 v2.3.0 // indirect
+	github.com/cloudflare/circl v1.3.7 // indirect
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/google/uuid v1.6.0 // indirect
+	github.com/gopherjs/gopherjs v1.17.2 // indirect
+	github.com/jtolds/gls v4.20.0+incompatible // indirect
+	github.com/klauspost/compress v1.18.0 // indirect
+	github.com/klauspost/cpuid/v2 v2.2.6 // indirect
+	github.com/klauspost/reedsolomon v1.12.0 // indirect
+	github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
+	github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
+	github.com/pion/datachannel v1.5.10 // indirect
+	github.com/pion/dtls/v3 v3.0.7 // indirect
+	github.com/pion/interceptor v0.1.40 // indirect
+	github.com/pion/logging v0.2.4 // indirect
+	github.com/pion/mdns/v2 v2.0.7 // indirect
+	github.com/pion/randutil v0.1.0 // indirect
+	github.com/pion/rtcp v1.2.15 // indirect
+	github.com/pion/rtp v1.8.21 // indirect
+	github.com/pion/sctp v1.8.39 // indirect
+	github.com/pion/srtp/v3 v3.0.7 // indirect
+	github.com/pion/turn/v4 v4.1.1 // indirect
+	github.com/pkg/errors v0.9.1 // indirect
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/prometheus/client_model v0.6.1 // indirect
+	github.com/prometheus/common v0.62.0 // indirect
+	github.com/prometheus/procfs v0.15.1 // indirect
+	github.com/smarty/assertions v1.15.0 // indirect
+	github.com/tjfoc/gmsm v1.4.1 // indirect
+	github.com/txthinking/runnergroup v0.0.0-20210608031112-152c7c4432bf // indirect
+	github.com/wlynxg/anet v0.0.5 // indirect
+	golang.org/x/mod v0.26.0 // indirect
+	golang.org/x/sync v0.16.0 // indirect
+	golang.org/x/text v0.28.0 // indirect
+	golang.org/x/tools v0.35.0 // indirect
+	google.golang.org/protobuf v1.36.5 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+)
+
+replace github.com/refraction-networking/utls v1.6.7 => gitlab.torproject.org/shelikhoo/utls-temporary v0.0.0-20250428152032-7f32539913c8
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..2e54433
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,274 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI=
+github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/aws/aws-sdk-go-v2 v1.39.0 h1:xm5WV/2L4emMRmMjHFykqiA4M/ra0DJVSWUkDyBjbg4=
+github.com/aws/aws-sdk-go-v2 v1.39.0/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
+github.com/aws/aws-sdk-go-v2/config v1.31.8 h1:kQjtOLlTU4m4A64TsRcqwNChhGCwaPBt+zCQt/oWsHU=
+github.com/aws/aws-sdk-go-v2/config v1.31.8/go.mod h1:QPpc7IgljrKwH0+E6/KolCgr4WPLerURiU592AYzfSY=
+github.com/aws/aws-sdk-go-v2/credentials v1.18.12 h1:zmc9e1q90wMn8wQbjryy8IwA6Q4XlaL9Bx2zIqdNNbk=
+github.com/aws/aws-sdk-go-v2/credentials v1.18.12/go.mod h1:3VzdRDR5u3sSJRI4kYcOSIBbeYsgtVk7dG5R/U6qLWY=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 h1:Is2tPmieqGS2edBnmOJIbdvOA6Op+rRpaYR60iBAwXM=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7/go.mod h1:F1i5V5421EGci570yABvpIXgRIBPb5JM+lSkHF6Dq5w=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 h1:UCxq0X9O3xrlENdKf1r9eRJoKz/b0AfGkpp3a7FPlhg=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7/go.mod h1:rHRoJUNUASj5Z/0eqI4w32vKvC7atoWR0jC+IkmVH8k=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 h1:Y6DTZUn7ZUC4th9FMBbo8LVE+1fyq3ofw+tRwkUd3PY=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7/go.mod h1:x3XE6vMnU9QvHN/Wrx2s44kwzV2o2g5x/siw4ZUJ9g8=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 h1:mLgc5QIgOy26qyh5bvW+nDoAppxgn3J2WV3m9ewq7+8=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7/go.mod h1:wXb/eQnqt8mDQIQTTmcw58B5mYGxzLGZGK8PWNFZ0BA=
+github.com/aws/aws-sdk-go-v2/service/sqs v1.42.5 h1:HbaHWaTkGec2pMa/UQa3+WNWtUaFFF1ZLfwCeVFtBns=
+github.com/aws/aws-sdk-go-v2/service/sqs v1.42.5/go.mod h1:wCAPjT7bNg5+4HSNefwNEC2hM3d+NSD5w5DU/8jrPrI=
+github.com/aws/aws-sdk-go-v2/service/sso v1.29.3 h1:7PKX3VYsZ8LUWceVRuv0+PU+E7OtQb1lgmi5vmUE9CM=
+github.com/aws/aws-sdk-go-v2/service/sso v1.29.3/go.mod h1:Ql6jE9kyyWI5JHn+61UT/Y5Z0oyVJGmgmJbZD5g4unY=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4 h1:e0XBRn3AptQotkyBFrHAxFB8mDhAIOfsG+7KyJ0dg98=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4/go.mod h1:XclEty74bsGBCr1s0VSaA11hQ4ZidK4viWK7rRfO88I=
+github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 h1:PR00NXRYgY4FWHqOGx3fC3lhVKjsp1GdloDv2ynMSd8=
+github.com/aws/aws-sdk-go-v2/service/sts v1.38.4/go.mod h1:Z+Gd23v97pX9zK97+tX4ppAgqCt3Z2dIXB02CtBncK8=
+github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
+github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
+github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
+github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
+github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
+github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
+github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc=
+github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/reedsolomon v1.12.0 h1:I5FEp3xSwVCcEh3F5A7dofEfhXdF/bWhQWPH+XwBFno=
+github.com/klauspost/reedsolomon v1.12.0/go.mod h1:EPLZJeh4l27pUGC3aXOjheaoh1I9yut7xTURiW3LQ9Y=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/miekg/dns v1.1.51/go.mod h1:2Z9d3CP1LQWihRZUf29mQ19yDThaI4DAYzte2CaQW5c=
+github.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc=
+github.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
+github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
+github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
+github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
+github.com/pion/dtls/v3 v3.0.7 h1:bItXtTYYhZwkPFk4t1n3Kkf5TDrfj6+4wG+CZR8uI9Q=
+github.com/pion/dtls/v3 v3.0.7/go.mod h1:uDlH5VPrgOQIw59irKYkMudSFprY9IEFCqz/eTz16f8=
+github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
+github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
+github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
+github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
+github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8=
+github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so=
+github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
+github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
+github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
+github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
+github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
+github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
+github.com/pion/rtp v1.8.21 h1:3yrOwmZFyUpcIosNcWRpQaU+UXIJ6yxLuJ8Bx0mw37Y=
+github.com/pion/rtp v1.8.21/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
+github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
+github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
+github.com/pion/sdp/v3 v3.0.16 h1:0dKzYO6gTAvuLaAKQkC02eCPjMIi4NuAr/ibAwrGDCo=
+github.com/pion/sdp/v3 v3.0.16/go.mod h1:9tyKzznud3qiweZcD86kS0ff1pGYB3VX+Bcsmkx6IXo=
+github.com/pion/srtp/v3 v3.0.7 h1:QUElw0A/FUg3MP8/KNMZB3i0m8F9XeMnTum86F7S4bs=
+github.com/pion/srtp/v3 v3.0.7/go.mod h1:qvnHeqbhT7kDdB+OGB05KA/P067G3mm7XBfLaLiaNF0=
+github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
+github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
+github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
+github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
+github.com/pion/turn/v4 v4.1.1 h1:9UnY2HB99tpDyz3cVVZguSxcqkJ1DsTSZ+8TGruh4fc=
+github.com/pion/turn/v4 v4.1.1/go.mod h1:2123tHk1O++vmjI5VSD0awT50NywDAq5A2NNNU4Jjs8=
+github.com/pion/webrtc/v4 v4.1.4 h1:/gK1ACGHXQmtyVVbJFQDxNoODg4eSRiFLB7t9r9pg8M=
+github.com/pion/webrtc/v4 v4.1.4/go.mod h1:Oab9npu1iZtQRMic3K3toYq5zFPvToe/QBw7dMI2ok4=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
+github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
+github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
+github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/realclientip/realclientip-go v1.0.0 h1:+yPxeC0mEaJzq1BfCt2h4BxlyrvIIBzR6suDc3BEF1U=
+github.com/realclientip/realclientip-go v1.0.0/go.mod h1:CXnUdVwFRcXFJIRb/dTYqbT7ud48+Pi2pFm80bxDmcI=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
+github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
+github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho=
+github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE=
+github.com/txthinking/runnergroup v0.0.0-20210608031112-152c7c4432bf h1:7PflaKRtU4np/epFxRXlFhlzLXZzKFrH5/I4so5Ove0=
+github.com/txthinking/runnergroup v0.0.0-20210608031112-152c7c4432bf/go.mod h1:CLUSJbazqETbaR+i0YAhXBICV9TrKH93pziccMhmhpM=
+github.com/txthinking/socks5 v0.0.0-20230325130024-4230056ae301 h1:d/Wr/Vl/wiJHc3AHYbYs5I3PucJvRuw3SvbmlIRf+oM=
+github.com/txthinking/socks5 v0.0.0-20230325130024-4230056ae301/go.mod h1:ntmMHL/xPq1WLeKiw8p/eRATaae6PiVRNipHFJxI8PM=
+github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
+github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
+github.com/xtaci/kcp-go/v5 v5.6.24 h1:0tZL4NfpoESDrhaScrZfVDnYZ/3LhyVAbN/dQ2b4hbI=
+github.com/xtaci/kcp-go/v5 v5.6.24/go.mod h1:7cAxNX/qFGeRUmUSnnDMoOg53FbXDK9IWBXAUfh+aBA=
+github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae h1:J0GxkO96kL4WF+AIT3M4mfUVinOCPgf2uUWYFUzN0sM=
+github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE=
+github.com/xtaci/smux v1.5.35 h1:RosihGJBeaS8gxOZ17HNxbhONwnqQwNwusHx4+SEGhk=
+github.com/xtaci/smux v1.5.35/go.mod h1:OMlQbT5vcgl2gb49mFkYo6SMf+zP3rcjcwQz7ZU7IGY=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+gitlab.torproject.org/shelikhoo/utls-temporary v0.0.0-20250428152032-7f32539913c8 h1:zZ1r9UjJ4qSPoLZG/vzITRsO0Qacpm20HlRAg7JVJ8Y=
+gitlab.torproject.org/shelikhoo/utls-temporary v0.0.0-20250428152032-7f32539913c8/go.mod h1:BC3O4vQzye5hqpmDTWUqi4P5DDhzJfkV1tdqtawQIH0=
+gitlab.torproject.org/tpo/anti-censorship/geoip v0.0.0-20210928150955-7ce4b3d98d01 h1:4949mHh9Vj2/okk48yG8nhP6TosFWOUfSfSr502sKGE=
+gitlab.torproject.org/tpo/anti-censorship/geoip v0.0.0-20210928150955-7ce4b3d98d01/go.mod h1:K3LOI4H8fa6j+7E10ViHeGEQV10304FG4j94ypmKLjY=
+gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib v1.6.0 h1:KD9m+mRBwtEdqe94Sv72uiedMWeRdIr4sXbrRyzRiIo=
+gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib v1.6.0/go.mod h1:70bhd4JKW/+1HLfm+TMrgHJsUHG4coelMWwiVEJ2gAg=
+gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil v0.0.0-20250815012447-418f76dcf315 h1:9lmXguW9aH5sdZR5h5jOrdInCt0tQ9NRa7+wFD4MQBk=
+gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil v0.0.0-20250815012447-418f76dcf315/go.mod h1:PK7EvweKeypdelDyh1m7N922aldSeCAG8n0lJ7RAXWQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201012173705-84dcc777aaee/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
+golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
+golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
+golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
+golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
+golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
+google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/probetest/Dockerfile b/probetest/Dockerfile
new file mode 100644
index 0000000..63fdd44
--- /dev/null
+++ b/probetest/Dockerfile
@@ -0,0 +1,28 @@
+FROM docker.io/library/golang:latest AS build
+
+
+ADD . /app
+
+WORKDIR /app/probetest
+RUN go get
+RUN CGO_ENABLED=0 go build -o probetest -ldflags '-extldflags "-static" -w -s'  .
+
+FROM containers.torproject.org/tpo/tpa/base-images/debian:bookworm as debian-base
+
+RUN apt-get update && apt-get install -y \
+    curl \
+    gpg \
+    gpg-agent \
+    ca-certificates \
+    libcap2-bin \
+    --no-install-recommends
+    
+FROM scratch
+
+COPY --from=debian-base /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
+COPY --from=debian-base /usr/share/zoneinfo /usr/share/zoneinfo
+COPY --from=build /app/probetest/probetest /bin/probetest
+
+ENTRYPOINT [ "/bin/probetest" ]
+
+LABEL org.opencontainers.image.authors="anti-censorship-team@lists.torproject.org"
diff --git a/probetest/README.md b/probetest/README.md
new file mode 100644
index 0000000..41451a9
--- /dev/null
+++ b/probetest/README.md
@@ -0,0 +1,60 @@
+
+
+**Table of Contents**
+
+- [Overview](#overview)
+- [Running your own](#running-your-own)
+
+
+
+This is code for a remote probe test component of Snowflake.
+
+### Overview
+
+This is a probe test server to allow proxies to test their compatability
+with Snowflake. Right now the only type of test implemented is a
+compatability check for clients with symmetric NATs.
+
+### Running your own
+
+The server uses TLS by default.
+There is a `--disable-tls` option for testing purposes,
+but you should use TLS in production.
+
+To build the probe server, run
+```go build```
+
+Or alternatively:
+
+```
+cd .. # switch to the repo root directory or $(git rev-parse --show-toplevel)
+docker build -t snowflake-probetest -f probetest/Dockerfile .
+```
+
+To deploy the probe server, first set the necessary env variables with
+```
+export HOSTNAMES=${YOUR HOSTNAMES}
+export EMAIL=${YOUR EMAIL}
+```
+then run ```docker-compose up```
+
+Setting up a symmetric NAT configuration requires a few extra steps. After
+upping the docker container, run
+```docker inspect snowflake-probetest```
+to find the subnet used by the probetest container. Then run
+```sudo iptables -L -t nat``` to find the POSTROUTING rules for the subnet.
+It should look something like this:
+```
+Chain POSTROUTING (policy ACCEPT)
+target     prot opt source               destination
+MASQUERADE  all  --  172.19.0.0/16        anywhere
+```
+to modify this rule, execute the command
+```sudo iptables -t nat -R POSTROUTING $RULE_NUM -s 172.19.0.0/16 -j MASQUERADE --random```
+where RULE_NUM is the numbered rule corresponding to your docker container's subnet masquerade rule.
+Afterwards, you should see the rule changed to be:
+```
+Chain POSTROUTING (policy ACCEPT)
+target     prot opt source               destination
+MASQUERADE  all  --  172.19.0.0/16        anywhere      random
+```
diff --git a/probetest/docker-compose.yml b/probetest/docker-compose.yml
new file mode 100644
index 0000000..9283383
--- /dev/null
+++ b/probetest/docker-compose.yml
@@ -0,0 +1,11 @@
+ version: "3.8"
+
+ services:
+    snowflake-probetest:
+        build: .
+        container_name: snowflake-probetest
+        ports:
+         - "8443:8443"
+        volumes:
+        - /home/snowflake-broker/acme-cert-cache:/go/bin/acme-cert-cache
+        entrypoint: [ "probetest" , "-addr", ":8443" , "-acme-hostnames", $HOSTNAMES, "-acme-email", $EMAIL, "-acme-cert-cache", "/go/bin/acme-cert-cache"]
diff --git a/probetest/probetest.go b/probetest/probetest.go
new file mode 100644
index 0000000..fb59cb7
--- /dev/null
+++ b/probetest/probetest.go
@@ -0,0 +1,305 @@
+/*
+Probe test server to check the reachability of Snowflake proxies from
+clients with symmetric NATs.
+
+The probe server receives an offer from a proxy, returns an answer, and then
+attempts to establish a datachannel connection to that proxy. The proxy will
+self-determine whether the connection opened successfully.
+*/
+package main
+
+import (
+	"crypto/tls"
+	"flag"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"net/http"
+	"os"
+	"strings"
+	"time"
+
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
+
+	"github.com/pion/transport/v3/stdnet"
+	"github.com/pion/webrtc/v4"
+	"golang.org/x/crypto/acme/autocert"
+)
+
+const (
+	// Maximum number of bytes to be read from an HTTP request
+	readLimit = 100000
+	// Time after which we assume proxy data channel will not open
+	dataChannelOpenTimeout = 20 * time.Second
+	// How long to wait after the data channel has been open before closing the peer connection.
+	dataChannelCloseTimeout = 5 * time.Second
+	// Default STUN URL
+	defaultStunUrls = "stun:stun.l.google.com:19302,stun:stun.voip.blackberry.com:3478"
+)
+
+type ProbeHandler struct {
+	stunURL string
+	handle  func(string, http.ResponseWriter, *http.Request)
+}
+
+func (h ProbeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	h.handle(h.stunURL, w, r)
+}
+
+// Create a PeerConnection from an SDP offer. Blocks until the gathering of ICE
+// candidates is complete and the answer is available in LocalDescription.
+func makePeerConnectionFromOffer(stunURL string, sdp *webrtc.SessionDescription,
+	dataChanOpen chan struct{}, dataChanClosed chan struct{}, iceGatheringTimeout time.Duration) (*webrtc.PeerConnection, error) {
+
+	settingsEngine := webrtc.SettingEngine{}
+
+	settingsEngine.SetIPFilter(func(ip net.IP) (keep bool) {
+		// `IsLoopback()` and `IsUnspecified` are likely not neded here,
+		// but let's keep them just in case.
+		// FYI there is similar code in other files in this project.
+		keep = !util.IsLocal(ip) && !ip.IsLoopback() && !ip.IsUnspecified()
+		return
+	})
+	// FYI this is `false` by default anyway as of pion/webrtc@4
+	settingsEngine.SetIncludeLoopbackCandidate(false)
+
+	// Use the SetNet setting https://pkg.go.dev/github.com/pion/webrtc/v3#SettingEngine.SetNet
+	// to functionally revert a new change in pion by silently ignoring
+	// when net.Interfaces() fails, rather than throwing an error
+	vnet, _ := stdnet.NewNet()
+	settingsEngine.SetNet(vnet)
+	api := webrtc.NewAPI(webrtc.WithSettingEngine(settingsEngine))
+
+	config := webrtc.Configuration{
+		ICEServers: []webrtc.ICEServer{
+			{
+				URLs: strings.Split(stunURL, ","),
+			},
+		},
+	}
+	pc, err := api.NewPeerConnection(config)
+	if err != nil {
+		return nil, fmt.Errorf("accept: NewPeerConnection: %s", err)
+	}
+	pc.OnDataChannel(func(dc *webrtc.DataChannel) {
+		dc.OnOpen(func() {
+			close(dataChanOpen)
+		})
+		dc.OnClose(func() {
+			close(dataChanClosed)
+			dc.Close()
+		})
+	})
+	// As of v3.0.0, pion-webrtc uses trickle ICE by default.
+	// We have to wait for candidate gathering to complete
+	// before we send the offer
+	done := webrtc.GatheringCompletePromise(pc)
+	err = pc.SetRemoteDescription(*sdp)
+	if err != nil {
+		if inerr := pc.Close(); inerr != nil {
+			log.Printf("unable to call pc.Close after pc.SetRemoteDescription with error: %v", inerr)
+		}
+		return nil, fmt.Errorf("accept: SetRemoteDescription: %s", err)
+	}
+
+	answer, err := pc.CreateAnswer(nil)
+	if err != nil {
+		if inerr := pc.Close(); inerr != nil {
+			log.Printf("ICE gathering has generated an error when calling pc.Close: %v", inerr)
+		}
+		return nil, err
+	}
+
+	err = pc.SetLocalDescription(answer)
+	if err != nil {
+		if err = pc.Close(); err != nil {
+			log.Printf("pc.Close after setting local description returned : %v", err)
+		}
+		return nil, err
+	}
+
+	// Wait for ICE candidate gathering to complete,
+	// or for whatever we managed to gather before the client times out.
+	// See https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40230
+	select {
+	case <-done:
+	case <-time.After(iceGatheringTimeout):
+	}
+	return pc, nil
+}
+
+func probeHandler(stunURL string, w http.ResponseWriter, r *http.Request) {
+	w.Header().Set("Access-Control-Allow-Origin", "*")
+	resp, err := io.ReadAll(http.MaxBytesReader(w, r.Body, readLimit))
+	if nil != err {
+		log.Println("Invalid data.")
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	offer, _, err := messages.DecodePollResponse(resp)
+	if err != nil {
+		log.Printf("Error reading offer: %s", err.Error())
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	if offer == "" {
+		log.Printf("Error processing session description: %s", err.Error())
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	sdp, err := util.DeserializeSessionDescription(offer)
+	if err != nil {
+		log.Printf("Error processing session description: %s", err.Error())
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	dataChanOpen := make(chan struct{})
+	dataChanClosed := make(chan struct{})
+	// TODO refactor: DRY this must be below `ResponseHeaderTimeout` in proxy
+	// https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/blob/e1d9b4ace69897521cc29585b5084c5f4d1ce874/proxy/lib/snowflake.go#L207
+	iceGatheringTimeout := 10 * time.Second
+	pc, err := makePeerConnectionFromOffer(stunURL, sdp, dataChanOpen, dataChanClosed, iceGatheringTimeout)
+	if err != nil {
+		log.Printf("Error making WebRTC connection: %s", err)
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	// We'll set this to `false` if the signaling (this function) succeeds.
+	closePcOnReturn := true
+	defer func() {
+		if closePcOnReturn {
+			if err := pc.Close(); err != nil {
+				log.Printf("Error calling pc.Close: %v", err)
+			}
+		}
+		// Otherwise it must be closed below, wherever `closePcOnReturn` is set to `false`.
+	}()
+
+	answer, err := util.SerializeSessionDescription(pc.LocalDescription())
+	if err != nil {
+		log.Printf("Error making WebRTC connection: %s", err)
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	body, err := messages.EncodeAnswerRequest(answer, "stub-sid")
+	if err != nil {
+		log.Printf("Error making WebRTC connection: %s", err)
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+
+	w.Write(body)
+	// Set a timeout on peerconnection. If the connection state has not
+	// advanced to PeerConnectionStateConnected in this time,
+	// destroy the peer connection and return the token.
+	closePcOnReturn = false
+	go func() {
+		timer := time.NewTimer(dataChannelOpenTimeout)
+		defer timer.Stop()
+
+		select {
+		case <-dataChanOpen:
+			// Let's not close the `PeerConnection` immediately now,
+			// instead let's wait for the peer (or timeout)
+			// to close the connection,
+			// in order to ensure that the DataChannel also gets opened
+			// on the proxy's side.
+			// Otherwise the proxy might receive the "close PeerConnection"
+			// "event" before they receive "dataChannel.OnOpen",
+			// which would wrongly result in a "restricted" NAT.
+			// See https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40387
+			select {
+			case <-dataChanClosed:
+			case <-time.After(dataChannelCloseTimeout):
+			}
+		case <-timer.C:
+		}
+
+		if err := pc.Close(); err != nil {
+			log.Printf("Error calling pc.Close: %v", err)
+		}
+	}()
+	return
+
+}
+
+func main() {
+	var acmeEmail string
+	var acmeHostnamesCommas string
+	var acmeCertCacheDir string
+	var addr string
+	var disableTLS bool
+	var certFilename, keyFilename string
+	var unsafeLogging bool
+	var stunURL string
+
+	flag.StringVar(&acmeEmail, "acme-email", "", "optional contact email for Let's Encrypt notifications")
+	flag.StringVar(&acmeHostnamesCommas, "acme-hostnames", "", "comma-separated hostnames for TLS certificate")
+	flag.StringVar(&acmeCertCacheDir, "acme-cert-cache", "acme-cert-cache", "directory in which certificates should be cached")
+	flag.StringVar(&certFilename, "cert", "", "TLS certificate file")
+	flag.StringVar(&keyFilename, "key", "", "TLS private key file")
+	flag.StringVar(&addr, "addr", ":8443", "address to listen on")
+	flag.BoolVar(&disableTLS, "disable-tls", false, "don't use HTTPS")
+	flag.BoolVar(&unsafeLogging, "unsafe-logging", false, "prevent logs from being scrubbed")
+	flag.StringVar(&stunURL, "stun", defaultStunUrls, "STUN servers to use for NAT traversal (comma-separated)")
+	flag.Parse()
+
+	var logOutput io.Writer = os.Stderr
+	if unsafeLogging {
+		log.SetOutput(logOutput)
+	} else {
+		// Scrub log output just in case an address ends up there
+		log.SetOutput(&safelog.LogScrubber{Output: logOutput})
+	}
+
+	log.SetFlags(log.LstdFlags | log.LUTC)
+
+	http.Handle("/probe", ProbeHandler{stunURL, probeHandler})
+
+	server := http.Server{
+		Addr: addr,
+	}
+
+	var err error
+	if acmeHostnamesCommas != "" {
+		acmeHostnames := strings.Split(acmeHostnamesCommas, ",")
+		log.Printf("ACME hostnames: %q", acmeHostnames)
+
+		var cache autocert.Cache
+		if err = os.MkdirAll(acmeCertCacheDir, 0700); err != nil {
+			log.Printf("Warning: Couldn't create cache directory %q (reason: %s) so we're *not* using our certificate cache.", acmeCertCacheDir, err)
+		} else {
+			cache = autocert.DirCache(acmeCertCacheDir)
+		}
+
+		certManager := autocert.Manager{
+			Cache:      cache,
+			Prompt:     autocert.AcceptTOS,
+			HostPolicy: autocert.HostWhitelist(acmeHostnames...),
+			Email:      acmeEmail,
+		}
+		// start certificate manager handler
+		go func() {
+			log.Printf("Starting HTTP-01 listener")
+			log.Fatal(http.ListenAndServe(":80", certManager.HTTPHandler(nil)))
+		}()
+
+		server.TLSConfig = &tls.Config{GetCertificate: certManager.GetCertificate}
+		err = server.ListenAndServeTLS("", "")
+	} else if certFilename != "" && keyFilename != "" {
+		err = server.ListenAndServeTLS(certFilename, keyFilename)
+	} else if disableTLS {
+		err = server.ListenAndServe()
+	} else {
+		log.Fatal("the --cert and --key, --acme-hostnames, or --disable-tls option is required")
+	}
+
+	if err != nil {
+		log.Println(err)
+	}
+}
diff --git a/proxy-go/README.md b/proxy-go/README.md
deleted file mode 100644
index 264fc4f..0000000
--- a/proxy-go/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-This is a standalone (not browser-based) version of the Snowflake proxy.
-
-Usage: ./proxy-go
diff --git a/proxy-go/snowflake.go b/proxy-go/snowflake.go
deleted file mode 100644
index b3c863a..0000000
--- a/proxy-go/snowflake.go
+++ /dev/null
@@ -1,436 +0,0 @@
-package main
-
-import (
-	"bytes"
-	"crypto/rand"
-	"encoding/base64"
-	"flag"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"log"
-	"net"
-	"net/http"
-	"net/url"
-	"os"
-	"regexp"
-	"strings"
-	"sync"
-	"time"
-
-	"git.torproject.org/pluggable-transports/snowflake.git/common/safelog"
-	"github.com/keroserene/go-webrtc"
-	"golang.org/x/net/websocket"
-)
-
-const defaultBrokerURL = "https://snowflake-broker.bamsoftware.com/"
-const defaultRelayURL = "wss://snowflake.bamsoftware.com/"
-const defaultSTUNURL = "stun:stun.l.google.com:19302"
-const pollInterval = 5 * time.Second
-
-//amount of time after sending an SDP answer before the proxy assumes the
-//client is not going to connect
-const dataChannelTimeout = 20 * time.Second
-
-const readLimit = 100000 //Maximum number of bytes to be read from an HTTP request
-
-var brokerURL *url.URL
-var relayURL string
-
-const (
-	sessionIDLength = 16
-)
-
-var (
-	tokens chan bool
-	config *webrtc.Configuration
-	client http.Client
-)
-
-var remoteIPPatterns = []*regexp.Regexp{
-	/* IPv4 */
-	regexp.MustCompile(`(?m)^c=IN IP4 ([\d.]+)(?:(?:\/\d+)?\/\d+)?(:? |\r?\n)`),
-	/* IPv6 */
-	regexp.MustCompile(`(?m)^c=IN IP6 ([0-9A-Fa-f:.]+)(?:\/\d+)?(:? |\r?\n)`),
-}
-
-// https://tools.ietf.org/html/rfc4566#section-5.7
-func remoteIPFromSDP(sdp string) net.IP {
-	for _, pattern := range remoteIPPatterns {
-		m := pattern.FindStringSubmatch(sdp)
-		if m != nil {
-			// Ignore parsing errors, ParseIP returns nil.
-			return net.ParseIP(m[1])
-		}
-	}
-	return nil
-}
-
-type webRTCConn struct {
-	dc *webrtc.DataChannel
-	pc *webrtc.PeerConnection
-	pr *io.PipeReader
-
-	lock sync.Mutex // Synchronization for DataChannel destruction
-	once sync.Once  // Synchronization for PeerConnection destruction
-}
-
-func (c *webRTCConn) Read(b []byte) (int, error) {
-	return c.pr.Read(b)
-}
-
-func (c *webRTCConn) Write(b []byte) (int, error) {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-	// log.Printf("webrtc Write %d %+q", len(b), string(b))
-	log.Printf("Write %d bytes --> WebRTC", len(b))
-	if c.dc != nil {
-		c.dc.Send(b)
-	}
-	return len(b), nil
-}
-
-func (c *webRTCConn) Close() (err error) {
-	c.once.Do(func() {
-		err = c.pc.Destroy()
-	})
-	return
-}
-
-func (c *webRTCConn) LocalAddr() net.Addr {
-	return nil
-}
-
-func (c *webRTCConn) RemoteAddr() net.Addr {
-	//Parse Remote SDP offer and extract client IP
-	clientIP := remoteIPFromSDP(c.pc.RemoteDescription().Sdp)
-	if clientIP == nil {
-		return nil
-	}
-	return &net.IPAddr{IP: clientIP, Zone: ""}
-}
-
-func (c *webRTCConn) SetDeadline(t time.Time) error {
-	return fmt.Errorf("SetDeadline not implemented")
-}
-
-func (c *webRTCConn) SetReadDeadline(t time.Time) error {
-	return fmt.Errorf("SetReadDeadline not implemented")
-}
-
-func (c *webRTCConn) SetWriteDeadline(t time.Time) error {
-	return fmt.Errorf("SetWriteDeadline not implemented")
-}
-
-func getToken() {
-	<-tokens
-}
-
-func retToken() {
-	tokens <- true
-}
-
-func genSessionID() string {
-	buf := make([]byte, sessionIDLength)
-	_, err := rand.Read(buf)
-	if err != nil {
-		panic(err.Error())
-	}
-	return strings.TrimRight(base64.StdEncoding.EncodeToString(buf), "=")
-}
-
-func limitedRead(r io.Reader, limit int64) ([]byte, error) {
-	p, err := ioutil.ReadAll(&io.LimitedReader{R: r, N: limit + 1})
-	if err != nil {
-		return p, err
-	} else if int64(len(p)) == limit+1 {
-		return p[0:limit], io.ErrUnexpectedEOF
-	}
-	return p, err
-}
-
-func pollOffer(sid string) *webrtc.SessionDescription {
-	broker := brokerURL.ResolveReference(&url.URL{Path: "proxy"})
-	timeOfNextPoll := time.Now()
-	for {
-		// Sleep until we're scheduled to poll again.
-		now := time.Now()
-		time.Sleep(timeOfNextPoll.Sub(now))
-		// Compute the next time to poll -- if it's in the past, that
-		// means that the POST took longer than pollInterval, so we're
-		// allowed to do another one immediately.
-		timeOfNextPoll = timeOfNextPoll.Add(pollInterval)
-		if timeOfNextPoll.Before(now) {
-			timeOfNextPoll = now
-		}
-
-		req, _ := http.NewRequest("POST", broker.String(), bytes.NewBuffer([]byte(sid)))
-		req.Header.Set("X-Session-ID", sid)
-		resp, err := client.Do(req)
-		if err != nil {
-			log.Printf("error polling broker: %s", err)
-		} else {
-			defer resp.Body.Close()
-			if resp.StatusCode != http.StatusOK {
-				log.Printf("broker returns: %d", resp.StatusCode)
-			} else {
-				body, err := limitedRead(resp.Body, readLimit)
-				if err != nil {
-					log.Printf("error reading broker response: %s", err)
-				} else {
-					return webrtc.DeserializeSessionDescription(string(body))
-				}
-			}
-		}
-	}
-}
-
-func sendAnswer(sid string, pc *webrtc.PeerConnection) error {
-	broker := brokerURL.ResolveReference(&url.URL{Path: "answer"})
-	body := bytes.NewBuffer([]byte(pc.LocalDescription().Serialize()))
-	req, _ := http.NewRequest("POST", broker.String(), body)
-	req.Header.Set("X-Session-ID", sid)
-	resp, err := client.Do(req)
-	if err != nil {
-		return err
-	}
-	if resp.StatusCode != http.StatusOK {
-		return fmt.Errorf("broker returned %d", resp.StatusCode)
-	}
-	return nil
-}
-
-type timeoutConn struct {
-	c net.Conn
-	t time.Duration
-}
-
-func (tc timeoutConn) Read(buf []byte) (int, error) {
-	tc.c.SetDeadline(time.Now().Add(tc.t))
-	return tc.c.Read(buf)
-}
-
-func (tc timeoutConn) Write(buf []byte) (int, error) {
-	tc.c.SetDeadline(time.Now().Add(tc.t))
-	return tc.c.Write(buf)
-}
-
-func (tc timeoutConn) Close() error {
-	return tc.c.Close()
-}
-
-func CopyLoopTimeout(c1 net.Conn, c2 net.Conn, timeout time.Duration) {
-	tc1 := timeoutConn{c: c1, t: timeout}
-	tc2 := timeoutConn{c: c2, t: timeout}
-	var wg sync.WaitGroup
-	copyer := func(dst io.ReadWriteCloser, src io.ReadWriteCloser) {
-		defer wg.Done()
-		io.Copy(dst, src)
-		dst.Close()
-		src.Close()
-	}
-	wg.Add(2)
-	go copyer(tc1, tc2)
-	go copyer(tc2, tc1)
-	wg.Wait()
-}
-
-// We pass conn.RemoteAddr() as an additional parameter, rather than calling
-// conn.RemoteAddr() inside this function, as a workaround for a hang that
-// otherwise occurs inside of conn.pc.RemoteDescription() (called by
-// RemoteAddr). https://bugs.torproject.org/18628#comment:8
-func datachannelHandler(conn *webRTCConn, remoteAddr net.Addr) {
-	defer conn.Close()
-	defer retToken()
-
-	u, err := url.Parse(relayURL)
-	if err != nil {
-		log.Fatalf("invalid relay url: %s", err)
-	}
-
-	// Retrieve client IP address
-	if remoteAddr != nil {
-		// Encode client IP address in relay URL
-		q := u.Query()
-		clientIP := remoteAddr.String()
-		q.Set("client_ip", clientIP)
-		u.RawQuery = q.Encode()
-	} else {
-		log.Printf("no remote address given in websocket")
-	}
-
-	wsConn, err := websocket.Dial(u.String(), "", relayURL)
-	if err != nil {
-		log.Printf("error dialing relay: %s", err)
-		return
-	}
-	log.Printf("connected to relay")
-	defer wsConn.Close()
-	wsConn.PayloadType = websocket.BinaryFrame
-	CopyLoopTimeout(conn, wsConn, time.Minute)
-	log.Printf("datachannelHandler ends")
-}
-
-// Create a PeerConnection from an SDP offer. Blocks until the gathering of ICE
-// candidates is complete and the answer is available in LocalDescription.
-// Installs an OnDataChannel callback that creates a webRTCConn and passes it to
-// datachannelHandler.
-func makePeerConnectionFromOffer(sdp *webrtc.SessionDescription, config *webrtc.Configuration, dataChan chan struct{}) (*webrtc.PeerConnection, error) {
-	pc, err := webrtc.NewPeerConnection(config)
-	if err != nil {
-		return nil, fmt.Errorf("accept: NewPeerConnection: %s", err)
-	}
-	pc.OnNegotiationNeeded = func() {
-		panic("OnNegotiationNeeded")
-	}
-	pc.OnDataChannel = func(dc *webrtc.DataChannel) {
-		log.Println("OnDataChannel")
-		close(dataChan)
-
-		pr, pw := io.Pipe()
-		conn := &webRTCConn{pc: pc, dc: dc, pr: pr}
-
-		dc.OnOpen = func() {
-			log.Println("OnOpen channel")
-		}
-		dc.OnClose = func() {
-			conn.lock.Lock()
-			defer conn.lock.Unlock()
-			log.Println("OnClose channel")
-			conn.dc = nil
-			pc.DeleteDataChannel(dc)
-			pw.Close()
-		}
-		dc.OnMessage = func(msg []byte) {
-			log.Printf("OnMessage <--- %d bytes", len(msg))
-			n, err := pw.Write(msg)
-			if err != nil {
-				pw.CloseWithError(err)
-			}
-			if n != len(msg) {
-				panic("short write")
-			}
-		}
-
-		go datachannelHandler(conn, conn.RemoteAddr())
-	}
-
-	err = pc.SetRemoteDescription(sdp)
-	if err != nil {
-		pc.Destroy()
-		return nil, fmt.Errorf("accept: SetRemoteDescription: %s", err)
-	}
-	log.Println("sdp offer successfully received.")
-
-	log.Println("Generating answer...")
-	answer, err := pc.CreateAnswer()
-	// blocks on ICE gathering. we need to add a timeout if needed
-	// not putting this in a separate go routine, because we need
-	// SetLocalDescription(answer) to be called before sendAnswer
-	if err != nil {
-		pc.Destroy()
-		return nil, err
-	}
-
-	if answer == nil {
-		pc.Destroy()
-		return nil, fmt.Errorf("Failed gathering ICE candidates.")
-	}
-
-	err = pc.SetLocalDescription(answer)
-	if err != nil {
-		pc.Destroy()
-		return nil, err
-	}
-
-	return pc, nil
-}
-
-func runSession(sid string) {
-	offer := pollOffer(sid)
-	if offer == nil {
-		log.Printf("bad offer from broker")
-		retToken()
-		return
-	}
-	dataChan := make(chan struct{})
-	pc, err := makePeerConnectionFromOffer(offer, config, dataChan)
-	if err != nil {
-		log.Printf("error making WebRTC connection: %s", err)
-		retToken()
-		return
-	}
-	err = sendAnswer(sid, pc)
-	if err != nil {
-		log.Printf("error sending answer to client through broker: %s", err)
-		pc.Destroy()
-		retToken()
-		return
-	}
-	// Set a timeout on peerconnection. If the connection state has not
-	// advanced to PeerConnectionStateConnected in this time,
-	// destroy the peer connection and return the token.
-	select {
-	case <-dataChan:
-		log.Println("Connection successful.")
-	case <-time.After(dataChannelTimeout):
-		log.Println("Timed out waiting for client to open data channel.")
-		pc.Destroy()
-		retToken()
-	}
-}
-
-func main() {
-	var capacity uint
-	var stunURL string
-	var logFilename string
-	var rawBrokerURL string
-
-	flag.UintVar(&capacity, "capacity", 10, "maximum concurrent clients")
-	flag.StringVar(&rawBrokerURL, "broker", defaultBrokerURL, "broker URL")
-	flag.StringVar(&relayURL, "relay", defaultRelayURL, "websocket relay URL")
-	flag.StringVar(&stunURL, "stun", defaultSTUNURL, "stun URL")
-	flag.StringVar(&logFilename, "log", "", "log filename")
-	flag.Parse()
-
-	var logOutput io.Writer = os.Stderr
-	log.SetFlags(log.LstdFlags | log.LUTC)
-	if logFilename != "" {
-		f, err := os.OpenFile(logFilename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
-		if err != nil {
-			log.Fatal(err)
-		}
-		defer f.Close()
-		logOutput = io.MultiWriter(os.Stderr, f)
-	}
-	//We want to send the log output through our scrubber first
-	log.SetOutput(&safelog.LogScrubber{Output: logOutput})
-
-	log.Println("starting")
-
-	var err error
-	brokerURL, err = url.Parse(rawBrokerURL)
-	if err != nil {
-		log.Fatalf("invalid broker url: %s", err)
-	}
-	_, err = url.Parse(stunURL)
-	if err != nil {
-		log.Fatalf("invalid stun url: %s", err)
-	}
-	_, err = url.Parse(relayURL)
-	if err != nil {
-		log.Fatalf("invalid relay url: %s", err)
-	}
-
-	config = webrtc.NewConfiguration(webrtc.OptionIceServer(stunURL))
-	tokens = make(chan bool, capacity)
-	for i := uint(0); i < capacity; i++ {
-		tokens <- true
-	}
-
-	for {
-		getToken()
-		sessionID := genSessionID()
-		runSession(sessionID)
-	}
-}
diff --git a/proxy-go/webrtc_test.go b/proxy-go/webrtc_test.go
deleted file mode 100644
index 2413207..0000000
--- a/proxy-go/webrtc_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package main
-
-import (
-	"net"
-	"strings"
-	"testing"
-)
-
-func TestRemoteIPFromSDP(t *testing.T) {
-	tests := []struct {
-		sdp      string
-		expected net.IP
-	}{
-		// https://tools.ietf.org/html/rfc4566#section-5
-		{`v=0
-o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
-s=SDP Seminar
-i=A Seminar on the session description protocol
-u=http://www.example.com/seminars/sdp.pdf
-e=j.doe@example.com (Jane Doe)
-c=IN IP4 224.2.17.12/127
-t=2873397496 2873404696
-a=recvonly
-m=audio 49170 RTP/AVP 0
-m=video 51372 RTP/AVP 99
-a=rtpmap:99 h263-1998/90000
-`, net.ParseIP("224.2.17.12")},
-		// Missing c= line
-		{`v=0
-o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
-s=SDP Seminar
-i=A Seminar on the session description protocol
-u=http://www.example.com/seminars/sdp.pdf
-e=j.doe@example.com (Jane Doe)
-t=2873397496 2873404696
-a=recvonly
-m=audio 49170 RTP/AVP 0
-m=video 51372 RTP/AVP 99
-a=rtpmap:99 h263-1998/90000
-`, nil},
-		// Single line, IP address only
-		{`c=IN IP4 224.2.1.1
-`, net.ParseIP("224.2.1.1")},
-		// Same, with TTL
-		{`c=IN IP4 224.2.1.1/127
-`, net.ParseIP("224.2.1.1")},
-		// Same, with TTL and multicast addresses
-		{`c=IN IP4 224.2.1.1/127/3
-`, net.ParseIP("224.2.1.1")},
-		// IPv6, address only
-		{`c=IN IP6 FF15::101
-`, net.ParseIP("ff15::101")},
-		// Same, with multicast addresses
-		{`c=IN IP6 FF15::101/3
-`, net.ParseIP("ff15::101")},
-		// Multiple c= lines
-		{`c=IN IP4 1.2.3.4
-c=IN IP4 5.6.7.8
-`, net.ParseIP("1.2.3.4")},
-		// Modified from SDP sent by snowflake-client.
-		{`v=0
-o=- 7860378660295630295 2 IN IP4 127.0.0.1
-s=-
-t=0 0
-a=group:BUNDLE data
-a=msid-semantic: WMS
-m=application 54653 DTLS/SCTP 5000
-c=IN IP4 1.2.3.4
-a=candidate:3581707038 1 udp 2122260223 192.168.0.1 54653 typ host generation 0 network-id 1 network-cost 50
-a=candidate:2617212910 1 tcp 1518280447 192.168.0.1 59673 typ host tcptype passive generation 0 network-id 1 network-cost 50
-a=candidate:2082671819 1 udp 1686052607 1.2.3.4 54653 typ srflx raddr 192.168.0.1 rport 54653 generation 0 network-id 1 network-cost 50
-a=ice-ufrag:IBdf
-a=ice-pwd:G3lTrrC9gmhQx481AowtkhYz
-a=fingerprint:sha-256 53:F8:84:D9:3C:1F:A0:44:AA:D6:3C:65:80:D3:CB:6F:23:90:17:41:06:F9:9C:10:D8:48:4A:A8:B6:FA:14:A1
-a=setup:actpass
-a=mid:data
-a=sctpmap:5000 webrtc-datachannel 1024
-`, net.ParseIP("1.2.3.4")},
-		// Improper character within IPv4
-		{`c=IN IP4 224.2z.1.1
-`, nil},
-		// Improper character within IPv6
-		{`c=IN IP6 ff15:g::101
-`, nil},
-		// Bogus "IP7" addrtype
-		{`c=IN IP7 1.2.3.4
-`, nil},
-	}
-
-	for _, test := range tests {
-		// https://tools.ietf.org/html/rfc4566#section-5: "The sequence
-		// CRLF (0x0d0a) is used to end a record, although parsers
-		// SHOULD be tolerant and also accept records terminated with a
-		// single newline character." We represent the test cases with
-		// LF line endings for convenience, and test them both that way
-		// and with CRLF line endings.
-		lfSDP := test.sdp
-		crlfSDP := strings.Replace(lfSDP, "\n", "\r\n", -1)
-
-		ip := remoteIPFromSDP(lfSDP)
-		if !ip.Equal(test.expected) {
-			t.Errorf("expected %q, got %q from %q", test.expected, ip, lfSDP)
-		}
-		ip = remoteIPFromSDP(crlfSDP)
-		if !ip.Equal(test.expected) {
-			t.Errorf("expected %q, got %q from %q", test.expected, ip, crlfSDP)
-		}
-	}
-}
diff --git a/proxy/.eslintignore b/proxy/.eslintignore
deleted file mode 100644
index f580632..0000000
--- a/proxy/.eslintignore
+++ /dev/null
@@ -1,7 +0,0 @@
-build/
-test/
-webext/snowflake.js
-
-# FIXME: Whittle these away
-spec/
-shims.js
diff --git a/proxy/.eslintrc.json b/proxy/.eslintrc.json
deleted file mode 100644
index f94ba96..0000000
--- a/proxy/.eslintrc.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
-    "env": {
-        "browser": true,
-        "es6": true
-    },
-    "extends": "eslint:recommended"
-}
diff --git a/proxy/README.md b/proxy/README.md
index bd1f819..7299695 100644
--- a/proxy/README.md
+++ b/proxy/README.md
@@ -1,34 +1,90 @@
-This is the browser proxy component of Snowflake.
+
+
+**Table of Contents**
 
-### Embedding
+- [Dependencies](#dependencies)
+- [Building the standalone Snowflake proxy](#building-the-standalone-snowflake-proxy)
+- [Running a standalone Snowflake proxy](#running-a-standalone-snowflake-proxy)
 
-See https://snowflake.torproject.org/ for more info:
-```
-
-```
+
 
-### Building
+This is a standalone (not browser-based) version of the Snowflake proxy. For browser-based versions of the Snowflake proxy, see https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake-webext.
+
+### Dependencies
+
+- Go 1.15+
+- We use the [pion/webrtc](https://github.com/pion/webrtc) library for WebRTC communication with Snowflake proxies. Note: running `go get` will fetch this dependency automatically during the build process.
+
+### Building the standalone Snowflake proxy
+
+To build the Snowflake proxy, make sure you are in the `proxy/` directory, and then run:
 
 ```
-npm run build
+go get
+go build
 ```
 
-### Testing
+### Running a standalone Snowflake proxy
+
+The Snowflake proxy can be run with the following options:
+
+
 
-Unit testing with Jasmine are available with:
 ```
-npm install
-npm test
+Usage of ./proxy:
+  -allow-non-tls-relay
+        allow this proxy to pass client's data to the relay in an unencrypted form.
+        This is only useful if the relay doesn't support encryption, e.g. for testing / development purposes.
+  -allow-proxying-to-private-addresses
+        allow forwarding client connections to private IP addresses.
+        Useful when a Snowflake server (relay) is hosted on the same private network as this proxy.
+  -allowed-relay-hostname-pattern string
+        this proxy will only be allowed to forward client connections to relays (servers) whose URL matches this pattern.
+        Note that a pattern "example.com$" will match "subdomain.example.com" as well as "other-domain-example.com".
+        In order to only match "example.com", prefix the pattern with "^": "^example.com$" (default "snowflake.torproject.net$")
+  -broker URL
+        The URL of the broker server that the proxy will be using to find clients (default "https://snowflake-broker.torproject.net/")
+  -capacity uint
+        maximum concurrent clients (default is to accept an unlimited number of clients)
+  -disable-stats-logger
+        disable the exposing mechanism for stats using logs
+  -ephemeral-ports-range range
+        Set the range of ports used for client connections (format:":").
+        Useful in conjunction with port forwarding, in order to make the proxy NAT type "unrestricted".
+        If omitted, the ports will be chosen automatically from a wide range.
+        When specifying the range, make sure it's at least 2x as wide as the amount of clients that you are hoping to serve concurrently (see the "capacity" flag).
+  -keep-local-addresses
+        keep local LAN address ICE candidates.
+        This is usually pointless because Snowflake clients don't usually reside on the same local network as the proxy.
+  -log filename
+        log filename. If not specified, logs will be output to stderr (console).
+  -metrics
+        enable the exposing mechanism for stats using metrics
+  -metrics-address address
+        set listen address for metrics service (default "localhost")
+  -metrics-port int
+        set port for the metrics service (default 9999)
+  -nat-probe-server URL
+        The URL of the server that this proxy will use to check its network NAT type.
+        Determining NAT type helps to understand whether this proxy is compatible with certain clients' NAT (default "https://snowflake-broker.torproject.net:8443/probe")
+  -nat-retest-interval duration
+        the time interval between NAT type is retests (see "nat-probe-server"). 0s disables retest. Valid time units are "s", "m", "h". (default 24h0m0s)
+  -outbound-address address
+        prefer the given address as outbound address for client connections
+  -poll-interval duration
+        how often to ask the broker for a new client. Keep in mind that asking for a client will not always result in getting one. Minumum value is 2s. Valid time units are "ms", "s", "m", "h". (default 5s)
+  -relay URL
+        The default URL of the server (relay) that this proxy will forward client connections to, in case the broker itself did not specify the said URL (default "wss://snowflake.torproject.net/")
+  -stun URL
+        STUN server `URL` that this proxy will use will use to, among some other things, determine its public IP address (default "stun:stun.l.google.com:19302")
+  -summary-interval duration
+        the time interval between summary log outputs, 0s disables summaries. Valid time units are "s", "m", "h". (default 1h0m0s)
+  -unsafe-logging
+        keep IP addresses and other sensitive info in the logs
+  -verbose
+        increase log verbosity
+  -version
+        display version info to stderr and quit
 ```
 
-To run locally, either:
-- Navigate to `proxy/build/embed.html`
-- For a more fully featured "debug" version,
-  start a webserver and navigate to `snowflake.html`.
-
-### Parameters
-
-With no parameters,
-snowflake uses the default relay `snowflake.bamsoftware.com:443` and
-uses automatic signaling with the default broker at
-`https://snowflake-broker.bamsoftware.com/`.
+For more information on how to run a Snowflake proxy in deployment, see our [community documentation](https://community.torproject.org/relay/setup/snowflake/standalone/).
diff --git a/proxy/broker.js b/proxy/broker.js
deleted file mode 100644
index 9806e76..0000000
--- a/proxy/broker.js
+++ /dev/null
@@ -1,122 +0,0 @@
-/* global log, dbg, snowflake */
-
-/*
-Communication with the snowflake broker.
-
-Browser snowflakes must register with the broker in order
-to get assigned to clients.
-*/
-
-// Represents a broker running remotely.
-class Broker {
-
-  // When interacting with the Broker, snowflake must generate a unique session
-  // ID so the Broker can keep track of each proxy's signalling channels.
-  // On construction, this Broker object does not do anything until
-  // |getClientOffer| is called.
-  constructor(url) {
-    // Promises some client SDP Offer.
-    // Registers this Snowflake with the broker using an HTTP POST request, and
-    // waits for a response containing some client offer that the Broker chooses
-    // for this proxy..
-    // TODO: Actually support multiple clients.
-    this.getClientOffer = this.getClientOffer.bind(this);
-    // urlSuffix for the broker is different depending on what action
-    // is desired.
-    this._postRequest = this._postRequest.bind(this);
-    this.url = url;
-    this.clients = 0;
-    if (0 === this.url.indexOf('localhost', 0)) {
-      // Ensure url has the right protocol + trailing slash.
-      this.url = 'http://' + this.url;
-    }
-    if (0 !== this.url.indexOf('http', 0)) {
-      this.url = 'https://' + this.url;
-    }
-    if ('/' !== this.url.substr(-1)) {
-      this.url += '/';
-    }
-  }
-
-  getClientOffer(id) {
-    return new Promise((fulfill, reject) => {
-      var xhr;
-      xhr = new XMLHttpRequest();
-      xhr.onreadystatechange = function() {
-        if (xhr.DONE !== xhr.readyState) {
-          return;
-        }
-        switch (xhr.status) {
-          case Broker.STATUS.OK:
-            return fulfill(xhr.responseText); // Should contain offer.
-          case Broker.STATUS.GATEWAY_TIMEOUT:
-            return reject(Broker.MESSAGE.TIMEOUT);
-          default:
-            log('Broker ERROR: Unexpected ' + xhr.status + ' - ' + xhr.statusText);
-            snowflake.ui.setStatus(' failure. Please refresh.');
-            return reject(Broker.MESSAGE.UNEXPECTED);
-        }
-      };
-      this._xhr = xhr; // Used by spec to fake async Broker interaction
-      return this._postRequest(id, xhr, 'proxy', id);
-    });
-  }
-
-  // Assumes getClientOffer happened, and a WebRTC SDP answer has been generated.
-  // Sends it back to the broker, which passes it to back to the original client.
-  sendAnswer(id, answer) {
-    var xhr;
-    dbg(id + ' - Sending answer back to broker...\n');
-    dbg(answer.sdp);
-    xhr = new XMLHttpRequest();
-    xhr.onreadystatechange = function() {
-      if (xhr.DONE !== xhr.readyState) {
-        return;
-      }
-      switch (xhr.status) {
-        case Broker.STATUS.OK:
-          dbg('Broker: Successfully replied with answer.');
-          return dbg(xhr.responseText);
-        case Broker.STATUS.GONE:
-          return dbg('Broker: No longer valid to reply with answer.');
-        default:
-          dbg('Broker ERROR: Unexpected ' + xhr.status + ' - ' + xhr.statusText);
-          return snowflake.ui.setStatus(' failure. Please refresh.');
-      }
-    };
-    return this._postRequest(id, xhr, 'answer', JSON.stringify(answer));
-  }
-
-  _postRequest(id, xhr, urlSuffix, payload) {
-    var err;
-    try {
-      xhr.open('POST', this.url + urlSuffix);
-      xhr.setRequestHeader('X-Session-ID', id);
-    } catch (error) {
-      err = error;
-      /*
-      An exception happens here when, for example, NoScript allows the domain
-      on which the proxy badge runs, but not the domain to which it's trying
-      to make the HTTP xhr. The exception message is like "Component
-      returned failure code: 0x805e0006 [nsIXMLHttpRequest.open]" on Firefox.
-      */
-      log('Broker: exception while connecting: ' + err.message);
-      return;
-    }
-    return xhr.send(payload);
-  }
-
-}
-
-Broker.STATUS = {
-  OK: 200,
-  GONE: 410,
-  GATEWAY_TIMEOUT: 504
-};
-
-Broker.MESSAGE = {
-  TIMEOUT: 'Timed out waiting for a client offer.',
-  UNEXPECTED: 'Unexpected status.'
-};
-
-Broker.prototype.clients = 0;
diff --git a/proxy/config.js b/proxy/config.js
deleted file mode 100644
index b49bb8d..0000000
--- a/proxy/config.js
+++ /dev/null
@@ -1,36 +0,0 @@
-
-class Config {}
-
-Config.prototype.brokerUrl = 'snowflake-broker.freehaven.net';
-
-Config.prototype.relayAddr = {
-  host: 'snowflake.freehaven.net',
-  port: '443'
-};
-
-// Original non-wss relay:
-// host: '192.81.135.242'
-// port: 9902
-Config.prototype.cookieName = "snowflake-allow";
-
-// Bytes per second. Set to undefined to disable limit.
-Config.prototype.rateLimitBytes = void 0;
-
-Config.prototype.minRateLimit = 10 * 1024;
-
-Config.prototype.rateLimitHistory = 5.0;
-
-Config.prototype.defaultBrokerPollInterval = 5.0 * 1000;
-
-Config.prototype.maxNumClients = 1;
-
-Config.prototype.connectionsPerClient = 1;
-
-// TODO: Different ICE servers.
-Config.prototype.pcConfig = {
-  iceServers: [
-    {
-      urls: ['stun:stun.l.google.com:19302']
-    }
-  ]
-};
diff --git a/proxy/init-badge.js b/proxy/init-badge.js
deleted file mode 100644
index 8646bc4..0000000
--- a/proxy/init-badge.js
+++ /dev/null
@@ -1,85 +0,0 @@
-/* global TESTING, Util, Params, Config, DebugUI, BadgeUI, UI, Broker, Snowflake */
-
-/*
-Entry point.
-*/
-
-var snowflake, query, debug, silenceNotifications, log, dbg, init;
-
-(function() {
-
-  if (((typeof TESTING === "undefined" || TESTING === null) || !TESTING) && !Util.featureDetect()) {
-    console.log('webrtc feature not detected. shutting down');
-    return;
-  }
-
-  snowflake = null;
-
-  query = new URLSearchParams(location.search);
-
-  debug = Params.getBool(query, 'debug', false);
-
-  silenceNotifications = Params.getBool(query, 'silent', false);
-
-  // Log to both console and UI if applicable.
-  // Requires that the snowflake and UI objects are hooked up in order to
-  // log to console.
-  log = function(msg) {
-    console.log('Snowflake: ' + msg);
-    return snowflake != null ? snowflake.ui.log(msg) : void 0;
-  };
-
-  dbg = function(msg) {
-    if (debug || ((snowflake != null ? snowflake.ui : void 0) instanceof DebugUI)) {
-      return log(msg);
-    }
-  };
-
-  init = function() {
-    var broker, config, ui;
-    config = new Config;
-    if ('off' !== query.get('ratelimit')) {
-      config.rateLimitBytes = Params.getByteCount(query, 'ratelimit', config.rateLimitBytes);
-    }
-    ui = null;
-    if (document.getElementById('badge') !== null) {
-      ui = new BadgeUI();
-    } else if (document.getElementById('status') !== null) {
-      ui = new DebugUI();
-    } else {
-      ui = new UI();
-    }
-    broker = new Broker(config.brokerUrl);
-    snowflake = new Snowflake(config, ui, broker);
-    log('== snowflake proxy ==');
-    if (Util.snowflakeIsDisabled(config.cookieName)) {
-      // Do not activate the proxy if any number of conditions are true.
-      log('Currently not active.');
-      return;
-    }
-    // Otherwise, begin setting up WebRTC and acting as a proxy.
-    dbg('Contacting Broker at ' + broker.url);
-    snowflake.setRelayAddr(config.relayAddr);
-    return snowflake.beginWebRTC();
-  };
-
-  // Notification of closing tab with active proxy.
-  window.onbeforeunload = function() {
-    if (
-      !silenceNotifications &&
-      snowflake !== null &&
-      Snowflake.MODE.WEBRTC_READY === snowflake.state
-    ) {
-      return Snowflake.MESSAGE.CONFIRMATION;
-    }
-    return null;
-  };
-
-  window.onunload = function() {
-    if (snowflake !== null) { snowflake.disable(); }
-    return null;
-  };
-
-  window.onload = init;
-
-}());
diff --git a/proxy/init-node.js b/proxy/init-node.js
deleted file mode 100644
index 789e6e3..0000000
--- a/proxy/init-node.js
+++ /dev/null
@@ -1,27 +0,0 @@
-/* global Config, UI, Broker, Snowflake */
-
-/*
-Entry point.
-*/
-
-var config = new Config;
-
-var ui = new UI();
-
-var broker = new Broker(config.brokerUrl);
-
-var snowflake = new Snowflake(config, ui, broker);
-
-var log = function(msg) {
-  return console.log('Snowflake: ' + msg);
-};
-
-var dbg = log;
-
-log('== snowflake proxy ==');
-
-dbg('Contacting Broker at ' + broker.url);
-
-snowflake.setRelayAddr(config.relayAddr);
-
-snowflake.beginWebRTC();
diff --git a/proxy/init-webext.js b/proxy/init-webext.js
deleted file mode 100644
index c641621..0000000
--- a/proxy/init-webext.js
+++ /dev/null
@@ -1,85 +0,0 @@
-/* global Util, chrome, Config, WebExtUI, Broker, Snowflake */
-/* eslint no-unused-vars: 0 */
-
-/*
-Entry point.
-*/
-
-var debug, snowflake, config, broker, ui, log, dbg, init, update, silenceNotifications;
-
-(function () {
-
-  silenceNotifications = false;
-  debug = false;
-  snowflake = null;
-  config = null;
-  broker = null;
-  ui = null;
-
-  // Log to both console and UI if applicable.
-  // Requires that the snowflake and UI objects are hooked up in order to
-  // log to console.
-  log = function(msg) {
-    console.log('Snowflake: ' + msg);
-    return snowflake != null ? snowflake.ui.log(msg) : void 0;
-  };
-
-  dbg = function(msg) {
-    if (debug) {
-      return log(msg);
-    }
-  };
-
-  if (!Util.featureDetect()) {
-    chrome.runtime.onConnect.addListener(function(port) {
-      return port.postMessage({
-        missingFeature: true
-      });
-    });
-    chrome.browserAction.setIcon({ path: { 32: "icons/status-off.png" } });
-    return;
-  }
-
-  init = function() {
-    config = new Config;
-    ui = new WebExtUI();
-    broker = new Broker(config.brokerUrl);
-    snowflake = new Snowflake(config, ui, broker);
-    log('== snowflake proxy ==');
-    return ui.initToggle();
-  };
-
-  update = function() {
-    if (!ui.enabled) {
-      // Do not activate the proxy if any number of conditions are true.
-      snowflake.disable();
-      log('Currently not active.');
-      return;
-    }
-    // Otherwise, begin setting up WebRTC and acting as a proxy.
-    dbg('Contacting Broker at ' + broker.url);
-    log('Starting snowflake');
-    snowflake.setRelayAddr(config.relayAddr);
-    return snowflake.beginWebRTC();
-  };
-
-  // Notification of closing tab with active proxy.
-  window.onbeforeunload = function() {
-    if (
-      !silenceNotifications &&
-      snowflake !== null &&
-      Snowflake.MODE.WEBRTC_READY === snowflake.state
-    ) {
-      return Snowflake.MESSAGE.CONFIRMATION;
-    }
-    return null;
-  };
-
-  window.onunload = function() {
-    if (snowflake !== null) { snowflake.disable(); }
-    return null;
-  };
-
-  window.onload = init;
-
-}());
diff --git a/proxy/lib/metrics.go b/proxy/lib/metrics.go
new file mode 100644
index 0000000..24dd776
--- /dev/null
+++ b/proxy/lib/metrics.go
@@ -0,0 +1,92 @@
+package snowflake_proxy
+
+import (
+	"net/http"
+
+	"github.com/prometheus/client_golang/prometheus"
+	"github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+const (
+	// metricNamespace represent prometheus namespace
+	metricNamespace = "tor_snowflake_proxy"
+)
+
+type Metrics struct {
+	totalInBoundTraffic    prometheus.Counter
+	totalOutBoundTraffic   prometheus.Counter
+	totalConnections       *prometheus.CounterVec
+	totalFailedConnections prometheus.Counter
+}
+
+func NewMetrics() *Metrics {
+	return &Metrics{
+		totalConnections: prometheus.NewCounterVec(prometheus.CounterOpts{
+			Namespace: metricNamespace,
+			Name:      "connections_total",
+			Help:      "The total number of successful connections handled by the snowflake proxy",
+		},
+			[]string{"country"},
+		),
+		totalFailedConnections: prometheus.NewCounter(prometheus.CounterOpts{
+			Namespace: metricNamespace,
+			Name:      "connection_timeouts_total",
+			Help:      "The total number of client connection attempts that failed after successful rendezvous. Note that failures can occur for reasons outside of the proxy's control, such as the client's NAT and censorship situation.",
+		}),
+		totalInBoundTraffic: prometheus.NewCounter(prometheus.CounterOpts{
+			Namespace: metricNamespace,
+			Name:      "traffic_inbound_bytes_total",
+			Help:      "The total in bound traffic by the snowflake proxy (KB)",
+		}),
+		totalOutBoundTraffic: prometheus.NewCounter(prometheus.CounterOpts{
+			Namespace: metricNamespace,
+			Name:      "traffic_outbound_bytes_total",
+			Help:      "The total out bound traffic by the snowflake proxy (KB)",
+		}),
+	}
+}
+
+// Start register the metrics server and serve them on the given address
+func (m *Metrics) Start(addr string) error {
+	go func() {
+		http.Handle("/internal/metrics", promhttp.Handler())
+		if err := http.ListenAndServe(addr, nil); err != nil {
+			panic(err)
+		}
+	}()
+
+	return prometheus.Register(m)
+}
+
+func (m *Metrics) Collect(ch chan<- prometheus.Metric) {
+	m.totalConnections.Collect(ch)
+	m.totalFailedConnections.Collect(ch)
+	m.totalInBoundTraffic.Collect(ch)
+	m.totalOutBoundTraffic.Collect(ch)
+}
+
+func (m *Metrics) Describe(descs chan<- *prometheus.Desc) {
+	prometheus.DescribeByCollect(m, descs)
+}
+
+// TrackInBoundTraffic counts the received traffic by the snowflake proxy
+func (m *Metrics) TrackInBoundTraffic(value int64) {
+	m.totalInBoundTraffic.Add(float64(value))
+}
+
+// TrackOutBoundTraffic counts the transmitted traffic by the snowflake proxy
+func (m *Metrics) TrackOutBoundTraffic(value int64) {
+	m.totalOutBoundTraffic.Add(float64(value))
+}
+
+// TrackNewConnection counts the new connections
+func (m *Metrics) TrackNewConnection(country string) {
+	m.totalConnections.
+		With(prometheus.Labels{"country": country}).
+		Inc()
+}
+
+// TrackFailedConnection counts failed connection attempts
+func (m *Metrics) TrackFailedConnection() {
+	m.totalFailedConnections.Inc()
+}
diff --git a/proxy/lib/proxy-go_test.go b/proxy/lib/proxy-go_test.go
new file mode 100644
index 0000000..e8e50db
--- /dev/null
+++ b/proxy/lib/proxy-go_test.go
@@ -0,0 +1,577 @@
+package snowflake_proxy
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"strconv"
+	"strings"
+	"testing"
+
+	"github.com/pion/webrtc/v4"
+	. "github.com/smartystreets/goconvey/convey"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
+)
+
+// Set up a mock broker to communicate with
+type MockTransport struct {
+	statusOverride int
+	body           []byte
+}
+
+// Just returns a response with fake SDP answer.
+func (m *MockTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+	s := io.NopCloser(bytes.NewReader(m.body))
+	r := &http.Response{
+		StatusCode: m.statusOverride,
+		Body:       s,
+	}
+	return r, nil
+}
+
+// Set up a mock faulty transport
+type FaultyTransport struct {
+	statusOverride int
+	body           []byte
+}
+
+// Just returns a response with fake SDP answer.
+func (f *FaultyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+	return nil, fmt.Errorf("TransportFailed")
+}
+
+func TestRemoteIPFromSDP(t *testing.T) {
+	tests := []struct {
+		sdp      string
+		expected net.IP
+	}{
+		// https://tools.ietf.org/html/rfc4566#section-5
+		{`v=0
+o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
+s=SDP Seminar
+i=A Seminar on the session description protocol
+u=http://www.example.com/seminars/sdp.pdf
+e=j.doe@example.com (Jane Doe)
+c=IN IP4 224.2.17.12/127
+t=2873397496 2873404696
+a=recvonly
+m=audio 49170 RTP/AVP 0
+m=video 51372 RTP/AVP 99
+a=rtpmap:99 h263-1998/90000
+`, net.ParseIP("224.2.17.12")},
+		// local addresses only
+		{`v=0
+o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
+s=SDP Seminar
+i=A Seminar on the session description protocol
+u=http://www.example.com/seminars/sdp.pdf
+e=j.doe@example.com (Jane Doe)
+c=IN IP4 10.47.16.5/127
+t=2873397496 2873404696
+a=recvonly
+m=audio 49170 RTP/AVP 0
+m=video 51372 RTP/AVP 99
+a=rtpmap:99 h263-1998/90000
+`, nil},
+		// Remote IP in candidate attribute only
+		{`v=0
+o=- 4358805017720277108 2 IN IP4 0.0.0.0
+s=-
+t=0 0
+a=group:BUNDLE data
+a=msid-semantic: WMS
+m=application 56688 DTLS/SCTP 5000
+c=IN IP4 0.0.0.0
+a=candidate:3769337065 1 udp 2122260223 1.2.3.4 56688 typ host generation 0 network-id 1 network-cost 50
+a=ice-ufrag:aMAZ
+a=ice-pwd:jcHb08Jjgrazp2dzjdrvPPvV
+a=ice-options:trickle
+a=fingerprint:sha-256 C8:88:EE:B9:E7:02:2E:21:37:ED:7A:D1:EB:2B:A3:15:A2:3B:5B:1C:3D:D4:D5:1F:06:CF:52:40:03:F8:DD:66
+a=setup:actpass
+a=mid:data
+a=sctpmap:5000 webrtc-datachannel 1024
+`, net.ParseIP("1.2.3.4")},
+		// Unspecified address
+		{`v=0
+o=jdoe 2890844526 2890842807 IN IP4 0.0.0.0
+s=SDP Seminar
+i=A Seminar on the session description protocol
+u=http://www.example.com/seminars/sdp.pdf
+e=j.doe@example.com (Jane Doe)
+t=2873397496 2873404696
+a=recvonly
+m=audio 49170 RTP/AVP 0
+m=video 51372 RTP/AVP 99
+a=rtpmap:99 h263-1998/90000
+`, nil},
+		// Missing c= line
+		{`v=0
+o=jdoe 2890844526 2890842807 IN IP4 10.47.16.5
+s=SDP Seminar
+i=A Seminar on the session description protocol
+u=http://www.example.com/seminars/sdp.pdf
+e=j.doe@example.com (Jane Doe)
+t=2873397496 2873404696
+a=recvonly
+m=audio 49170 RTP/AVP 0
+m=video 51372 RTP/AVP 99
+a=rtpmap:99 h263-1998/90000
+`, nil},
+		// Single line, IP address only
+		{`v=0
+o=- 4358805017720277108 2 IN IP4 0.0.0.0
+s=-
+t=0 0
+a=group:BUNDLE data
+a=msid-semantic: WMS
+m=application 56688 DTLS/SCTP 5000
+c=IN IP4 224.2.1.1
+`, net.ParseIP("224.2.1.1")},
+		// Same, with TTL
+		{`v=0
+o=- 4358805017720277108 2 IN IP4 0.0.0.0
+s=-
+t=0 0
+a=group:BUNDLE data
+a=msid-semantic: WMS
+m=application 56688 DTLS/SCTP 5000
+c=IN IP4 224.2.1.1/127
+`, net.ParseIP("224.2.1.1")},
+		// Same, with TTL and multicast addresses
+		{`v=0
+o=- 4358805017720277108 2 IN IP4 0.0.0.0
+s=-
+t=0 0
+a=group:BUNDLE data
+a=msid-semantic: WMS
+m=application 56688 DTLS/SCTP 5000
+c=IN IP4 224.2.1.1/127/3
+`, net.ParseIP("224.2.1.1")},
+		// IPv6, address only
+		{`v=0
+o=- 4358805017720277108 2 IN IP4 0.0.0.0
+s=-
+t=0 0
+a=group:BUNDLE data
+a=msid-semantic: WMS
+m=application 56688 DTLS/SCTP 5000
+c=IN IP6 FF15::101
+`, net.ParseIP("ff15::101")},
+		// Same, with multicast addresses
+		{`v=0
+o=- 4358805017720277108 2 IN IP4 0.0.0.0
+s=-
+t=0 0
+a=group:BUNDLE data
+a=msid-semantic: WMS
+m=application 56688 DTLS/SCTP 5000
+c=IN IP6 FF15::101/3
+`, net.ParseIP("ff15::101")},
+		// Multiple c= lines
+		{`v=0
+o=- 4358805017720277108 2 IN IP4 0.0.0.0
+s=-
+t=0 0
+a=group:BUNDLE data
+a=msid-semantic: WMS
+m=application 56688 DTLS/SCTP 5000
+c=IN IP4 1.2.3.4
+c=IN IP4 5.6.7.8
+`, net.ParseIP("1.2.3.4")},
+		// Modified from SDP sent by snowflake-client.
+		{`v=0
+o=- 7860378660295630295 2 IN IP4 127.0.0.1
+s=-
+t=0 0
+a=group:BUNDLE data
+a=msid-semantic: WMS
+m=application 54653 DTLS/SCTP 5000
+c=IN IP4 1.2.3.4
+a=candidate:3581707038 1 udp 2122260223 192.168.0.1 54653 typ host generation 0 network-id 1 network-cost 50
+a=candidate:2617212910 1 tcp 1518280447 192.168.0.1 59673 typ host tcptype passive generation 0 network-id 1 network-cost 50
+a=candidate:2082671819 1 udp 1686052607 1.2.3.4 54653 typ srflx raddr 192.168.0.1 rport 54653 generation 0 network-id 1 network-cost 50
+a=ice-ufrag:IBdf
+a=ice-pwd:G3lTrrC9gmhQx481AowtkhYz
+a=fingerprint:sha-256 53:F8:84:D9:3C:1F:A0:44:AA:D6:3C:65:80:D3:CB:6F:23:90:17:41:06:F9:9C:10:D8:48:4A:A8:B6:FA:14:A1
+a=setup:actpass
+a=mid:data
+a=sctpmap:5000 webrtc-datachannel 1024
+`, net.ParseIP("1.2.3.4")},
+		// Improper character within IPv4
+		{`v=0
+o=- 4358805017720277108 2 IN IP4 0.0.0.0
+s=-
+t=0 0
+a=group:BUNDLE data
+a=msid-semantic: WMS
+m=application 56688 DTLS/SCTP 5000
+c=IN IP4 224.2z.1.1
+`, nil},
+		// Improper character within IPv6
+		{`v=0
+o=- 4358805017720277108 2 IN IP4 0.0.0.0
+s=-
+t=0 0
+a=group:BUNDLE data
+a=msid-semantic: WMS
+m=application 56688 DTLS/SCTP 5000
+c=IN IP6 ff15:g::101
+`, nil},
+		// Bogus "IP7" addrtype
+		{`v=0
+o=- 4358805017720277108 2 IN IP4 0.0.0.0
+s=-
+t=0 0
+a=group:BUNDLE data
+a=msid-semantic: WMS
+m=application 56688 DTLS/SCTP 5000
+c=IN IP7 1.2.3.4
+`, nil},
+	}
+
+	for _, test := range tests {
+		// https://tools.ietf.org/html/rfc4566#section-5: "The sequence
+		// CRLF (0x0d0a) is used to end a record, although parsers
+		// SHOULD be tolerant and also accept records terminated with a
+		// single newline character." We represent the test cases with
+		// LF line endings for convenience, and test them both that way
+		// and with CRLF line endings.
+		lfSDP := test.sdp
+		crlfSDP := strings.Replace(lfSDP, "\n", "\r\n", -1)
+
+		ip := remoteIPFromSDP(lfSDP)
+		if !ip.Equal(test.expected) {
+			t.Errorf("expected %q, got %q from %q", test.expected, ip, lfSDP)
+		}
+		ip = remoteIPFromSDP(crlfSDP)
+		if !ip.Equal(test.expected) {
+			t.Errorf("expected %q, got %q from %q", test.expected, ip, crlfSDP)
+		}
+	}
+}
+
+func TestSessionDescriptions(t *testing.T) {
+	Convey("Session description deserialization", t, func() {
+		for _, test := range []struct {
+			msg string
+			ret *webrtc.SessionDescription
+		}{
+			{
+				"test",
+				nil,
+			},
+			{
+				`{"type":"answer"}`,
+				nil,
+			},
+			{
+				`{"sdp":"test"}`,
+				nil,
+			},
+			{
+				`{"type":"test", "sdp":"test"}`,
+				nil,
+			},
+			{
+				`{"type":"answer", "sdp":"test"}`,
+				&webrtc.SessionDescription{
+					Type: webrtc.SDPTypeAnswer,
+					SDP:  "test",
+				},
+			},
+			{
+				`{"type":"pranswer", "sdp":"test"}`,
+				&webrtc.SessionDescription{
+					Type: webrtc.SDPTypePranswer,
+					SDP:  "test",
+				},
+			},
+			{
+				`{"type":"rollback", "sdp":"test"}`,
+				&webrtc.SessionDescription{
+					Type: webrtc.SDPTypeRollback,
+					SDP:  "test",
+				},
+			},
+			{
+				`{"type":"offer", "sdp":"test"}`,
+				&webrtc.SessionDescription{
+					Type: webrtc.SDPTypeOffer,
+					SDP:  "test",
+				},
+			},
+		} {
+			desc, _ := util.DeserializeSessionDescription(test.msg)
+			So(desc, ShouldResemble, test.ret)
+		}
+	})
+	Convey("Session description serialization", t, func() {
+		for _, test := range []struct {
+			desc *webrtc.SessionDescription
+			ret  string
+		}{
+			{
+				&webrtc.SessionDescription{
+					Type: webrtc.SDPTypeOffer,
+					SDP:  "test",
+				},
+				`{"type":"offer","sdp":"test"}`,
+			},
+		} {
+			msg, err := util.SerializeSessionDescription(test.desc)
+			So(msg, ShouldResemble, test.ret)
+			So(err, ShouldBeNil)
+		}
+	})
+}
+
+func TestBrokerInteractions(t *testing.T) {
+	const sampleSDP = `"v=0\r\no=- 4358805017720277108 2 IN IP4 8.8.8.8\r\ns=-\r\nt=0 0\r\na=group:BUNDLE data\r\na=msid-semantic: WMS\r\nm=application 56688 DTLS/SCTP 5000\r\nc=IN IP4 8.8.8.8\r\na=candidate:3769337065 1 udp 2122260223 8.8.8.8 56688 typ host generation 0 network-id 1 network-cost 50\r\na=candidate:2921887769 1 tcp 1518280447 8.8.8.8 35441 typ host tcptype passive generation 0 network-id 1 network-cost 50\r\na=ice-ufrag:aMAZ\r\na=ice-pwd:jcHb08Jjgrazp2dzjdrvPPvV\r\na=ice-options:trickle\r\na=fingerprint:sha-256 C8:88:EE:B9:E7:02:2E:21:37:ED:7A:D1:EB:2B:A3:15:A2:3B:5B:1C:3D:D4:D5:1F:06:CF:52:40:03:F8:DD:66\r\na=setup:actpass\r\na=mid:data\r\na=sctpmap:5000 webrtc-datachannel 1024\r\n"`
+
+	const sampleOffer = `{"type":"offer","sdp":` + sampleSDP + `}`
+	const sampleAnswer = `{"type":"answer","sdp":` + sampleSDP + `}`
+
+	Convey("Proxy connections to broker", t, func() {
+		var err error
+		broker, err = newSignalingServer("localhost")
+		So(err, ShouldBeNil)
+		tokens = newTokens(0)
+
+		//Mock peerConnection
+		config = webrtc.Configuration{
+			ICEServers: []webrtc.ICEServer{
+				{
+					URLs: []string{"stun:stun.l.google.com:19302"},
+				},
+			},
+		}
+		pc, _ := webrtc.NewPeerConnection(config)
+		offer, _ := util.DeserializeSessionDescription(sampleOffer)
+		pc.SetRemoteDescription(*offer)
+		answer, _ := pc.CreateAnswer(nil)
+		pc.SetLocalDescription(answer)
+
+		Convey("polls broker correctly", func() {
+			var err error
+
+			b, err := messages.EncodePollResponse(sampleOffer, true, "unknown")
+			So(err, ShouldBeNil)
+			broker.transport = &MockTransport{
+				http.StatusOK,
+				b,
+			}
+
+			sdp, _ := broker.pollOffer(sampleOffer, DefaultProxyType, "")
+			expectedSDP, _ := strconv.Unquote(sampleSDP)
+			So(sdp.SDP, ShouldResemble, expectedSDP)
+		})
+		Convey("handles poll error", func() {
+			var err error
+
+			b := []byte("test")
+			So(err, ShouldBeNil)
+			broker.transport = &MockTransport{
+				http.StatusOK,
+				b,
+			}
+
+			sdp, _ := broker.pollOffer(sampleOffer, DefaultProxyType, "")
+			So(sdp, ShouldBeNil)
+		})
+		Convey("sends answer to broker", func() {
+			var err error
+
+			b, err := messages.EncodeAnswerResponse(true)
+			So(err, ShouldBeNil)
+			broker.transport = &MockTransport{
+				http.StatusOK,
+				b,
+			}
+
+			err = broker.sendAnswer(sampleAnswer, pc)
+			So(err, ShouldBeNil)
+
+			b, err = messages.EncodeAnswerResponse(false)
+			So(err, ShouldBeNil)
+			broker.transport = &MockTransport{
+				http.StatusOK,
+				b,
+			}
+
+			err = broker.sendAnswer(sampleAnswer, pc)
+			So(err, ShouldNotBeNil)
+		})
+		Convey("handles answer error", func() {
+			//Error if faulty transport
+			broker.transport = &FaultyTransport{}
+			err := broker.sendAnswer(sampleAnswer, pc)
+			So(err, ShouldNotBeNil)
+
+			//Error if status code is not ok
+			broker.transport = &MockTransport{
+				http.StatusGone,
+				[]byte(""),
+			}
+			err = broker.sendAnswer("test", pc)
+			So(err, ShouldNotEqual, nil)
+			So(err.Error(), ShouldResemble,
+				"error sending answer to broker: remote returned status code 410")
+
+			//Error if we can't parse broker message
+			broker.transport = &MockTransport{
+				http.StatusOK,
+				[]byte("test"),
+			}
+			err = broker.sendAnswer("test", pc)
+			So(err, ShouldNotBeNil)
+
+			//Error if broker message surpasses read limit
+			broker.transport = &MockTransport{
+				http.StatusOK,
+				make([]byte, 100001),
+			}
+			err = broker.sendAnswer("test", pc)
+			So(err, ShouldNotBeNil)
+		})
+	})
+}
+
+func TestUtilityFuncs(t *testing.T) {
+	Convey("LimitedRead", t, func() {
+		c, s := net.Pipe()
+		Convey("Successful read", func() {
+			go func() {
+				bytes := make([]byte, 50)
+				c.Write(bytes)
+				c.Close()
+			}()
+			bytes, err := limitedRead(s, 60)
+			So(len(bytes), ShouldEqual, 50)
+			So(err, ShouldBeNil)
+		})
+		Convey("Large read", func() {
+			go func() {
+				bytes := make([]byte, 50)
+				c.Write(bytes)
+				c.Close()
+			}()
+			bytes, err := limitedRead(s, 49)
+			So(len(bytes), ShouldEqual, 49)
+			So(err, ShouldEqual, io.ErrUnexpectedEOF)
+		})
+		Convey("Failed read", func() {
+			s.Close()
+			bytes, err := limitedRead(s, 49)
+			So(len(bytes), ShouldEqual, 0)
+			So(err, ShouldEqual, io.ErrClosedPipe)
+		})
+	})
+	Convey("SessionID Generation", t, func() {
+		sid1 := genSessionID()
+		sid2 := genSessionID()
+		So(sid1, ShouldNotEqual, sid2)
+	})
+	Convey("CopyLoop", t, func() {
+		c1, s1 := net.Pipe()
+		c2, s2 := net.Pipe()
+		go copyLoop(s1, s2, nil)
+		go func() {
+			bytes := []byte("Hello!")
+			c1.Write(bytes)
+		}()
+		bytes := make([]byte, 6)
+		n, err := c2.Read(bytes)
+		So(n, ShouldEqual, 6)
+		So(err, ShouldBeNil)
+		So(bytes, ShouldResemble, []byte("Hello!"))
+		s1.Close()
+
+		//Check that copy loop has closed other connection
+		_, err = s2.Write(bytes)
+		So(err, ShouldNotBeNil)
+	})
+	Convey("isRelayURLAcceptable", t, func() {
+		testingVector := []struct {
+			pattern               string
+			allowPrivateAddresses bool
+			allowNonTLS           bool
+			targetURL             string
+			expects               error
+		}{
+			// These are copied from `TestMatchMember`.
+			{pattern: "^snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://snowflake.torproject.net", expects: nil},
+			{pattern: "^snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://faketorproject.net", expects: fmt.Errorf("")},
+			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://faketorproject.net", expects: fmt.Errorf("")},
+			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://snowflake.torproject.net", expects: nil},
+			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://imaginary-01-snowflake.torproject.net", expects: nil},
+			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://imaginary-aaa-snowflake.torproject.net", expects: nil},
+			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://imaginary-aaa-snowflake.faketorproject.net", expects: fmt.Errorf("")},
+
+			{pattern: "^torproject.net$", allowNonTLS: false, targetURL: "wss://faketorproject.net", expects: fmt.Errorf("")},
+			// Yes, this is how it works if there is no "^".
+			{pattern: "torproject.net$", allowNonTLS: false, targetURL: "wss://faketorproject.net", expects: nil},
+
+			// NonTLS
+			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "ws://snowflake.torproject.net", expects: fmt.Errorf("")},
+			{pattern: "snowflake.torproject.net$", allowNonTLS: true, targetURL: "ws://snowflake.torproject.net", expects: nil},
+
+			// Sneaky attempt to use path
+			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://evil.com/snowflake.torproject.net", expects: fmt.Errorf("")},
+			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://evil.com/?test=snowflake.torproject.net", expects: fmt.Errorf("")},
+
+			// IP address
+			{pattern: "^1.1.1.1$", allowNonTLS: true, targetURL: "ws://1.1.1.1/test?test=test#test", expects: nil},
+			{pattern: "^1.1.1.1$", allowNonTLS: true, targetURL: "ws://231.1.1.1/test?test=test#test", expects: fmt.Errorf("")},
+			{pattern: "1.1.1.1$", allowNonTLS: true, targetURL: "ws://231.1.1.1/test?test=test#test", expects: nil},
+			// Private IP address
+			{pattern: "$", allowNonTLS: true, targetURL: "ws://192.168.1.1", expects: fmt.Errorf("")},
+			{pattern: "$", allowNonTLS: true, targetURL: "ws://127.0.0.1", expects: fmt.Errorf("")},
+			{pattern: "$", allowNonTLS: true, targetURL: "ws://[fc00::]/", expects: fmt.Errorf("")},
+			{pattern: "$", allowNonTLS: true, targetURL: "ws://[::1]/", expects: fmt.Errorf("")},
+			{pattern: "$", allowNonTLS: true, targetURL: "ws://0.0.0.0/", expects: fmt.Errorf("")},
+			{pattern: "$", allowNonTLS: true, targetURL: "ws://169.254.1.1/", expects: fmt.Errorf("")},
+			{pattern: "$", allowNonTLS: true, targetURL: "ws://100.111.1.1/", expects: fmt.Errorf("")},
+			{pattern: "192.168.1.100$", allowPrivateAddresses: true, allowNonTLS: true, targetURL: "ws://192.168.1.100/test?test=test", expects: nil},
+			{pattern: "localhost$", allowPrivateAddresses: true, allowNonTLS: true, targetURL: "ws://localhost/test?test=test", expects: nil},
+			{pattern: "::1$", allowPrivateAddresses: true, allowNonTLS: true, targetURL: "ws://[::1]/test?test=test", expects: nil},
+			// Multicast IP address. `checkIsRelayURLAcceptable` allows it,
+			// but it's not valid in the context of WebSocket
+			{pattern: "255.255.255.255$", allowPrivateAddresses: true, allowNonTLS: true, targetURL: "ws://255.255.255.255/test?test=test", expects: nil},
+
+			// Port
+			{pattern: "^snowflake.torproject.net$", allowNonTLS: false, targetURL: "wss://snowflake.torproject.net:8080/test?test=test#test", expects: nil},
+			// This currently doesn't work as we only check hostname.
+			// {pattern: "^snowflake.torproject.net:443$", allowNonTLS: false, targetURL: "wss://snowflake.torproject.net:443", expects: nil},
+			// {pattern: "^snowflake.torproject.net:443$", allowNonTLS: false, targetURL: "wss://snowflake.torproject.net:9999", expects: fmt.Errorf("")},
+
+			// Any URL
+			{pattern: "$", allowNonTLS: false, targetURL: "wss://any.com/test?test=test#test", expects: nil},
+			{pattern: "$", allowNonTLS: false, targetURL: "wss://1.1.1.1/test?test=test#test", expects: nil},
+
+			// Weird / invalid / ambiguous URL
+			{pattern: "$", allowNonTLS: true, targetURL: "snowflake.torproject.net", expects: fmt.Errorf("")},
+			{pattern: "$", allowNonTLS: true, targetURL: "//snowflake.torproject.net", expects: fmt.Errorf("")},
+			{pattern: "$", allowNonTLS: true, targetURL: "/path", expects: fmt.Errorf("")},
+			{pattern: "$", allowNonTLS: true, targetURL: "wss://snowflake.torproject .net", expects: fmt.Errorf("")},
+			{pattern: "$", allowNonTLS: true, targetURL: "wss://😀", expects: nil},
+			{pattern: "$", allowNonTLS: true, targetURL: "wss://пример.рф", expects: nil},
+
+			// Non-websocket protocols
+			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "https://snowflake.torproject.net", expects: fmt.Errorf("")},
+			{pattern: "snowflake.torproject.net$", allowNonTLS: false, targetURL: "ftp://snowflake.torproject.net", expects: fmt.Errorf("")},
+			{pattern: "snowflake.torproject.net$", allowNonTLS: true, targetURL: "https://snowflake.torproject.net", expects: fmt.Errorf("")},
+			{pattern: "snowflake.torproject.net$", allowNonTLS: true, targetURL: "ftp://snowflake.torproject.net", expects: fmt.Errorf("")},
+		}
+		for _, v := range testingVector {
+			err := checkIsRelayURLAcceptable(v.pattern, v.allowPrivateAddresses, v.allowNonTLS, v.targetURL)
+			if v.expects != nil {
+				So(err, ShouldNotBeNil)
+			} else {
+				So(err, ShouldBeNil)
+			}
+		}
+	})
+}
diff --git a/proxy/lib/pt_event_logger.go b/proxy/lib/pt_event_logger.go
new file mode 100644
index 0000000..5edc5d3
--- /dev/null
+++ b/proxy/lib/pt_event_logger.go
@@ -0,0 +1,87 @@
+package snowflake_proxy
+
+import (
+	"io"
+	"log"
+	"sync/atomic"
+	"time"
+
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/task"
+)
+
+func NewProxyEventLogger(output io.Writer, disableStats bool) event.SnowflakeEventReceiver {
+	logger := log.New(output, "", log.Flags())
+	return &proxyEventLogger{logger: logger, disableStats: disableStats}
+}
+
+type proxyEventLogger struct {
+	logger       *log.Logger
+	disableStats bool
+}
+
+func (p *proxyEventLogger) OnNewSnowflakeEvent(e event.SnowflakeEvent) {
+	switch e.(type) {
+	case event.EventOnProxyStarting:
+		p.logger.Println(e.String())
+
+		if p.logger.Flags()&log.LUTC == 0 {
+			p.logger.Println("Local time is being used for logging. If you want to " +
+				"share your log, consider to modify the date/time for more anonymity.")
+		}
+	case event.EventOnProxyStats:
+		if !p.disableStats {
+			p.logger.Println(e.String())
+		}
+	case event.EventOnCurrentNATTypeDetermined:
+		p.logger.Println(e.String())
+	default:
+		// Suppress logs of these events
+		// https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40310
+		// https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40413
+	}
+}
+
+type periodicProxyStats struct {
+	bytesLogger bytesLogger
+	// Completed successful connections.
+	connectionCount atomic.Int32
+	// Connections that failed to establish.
+	failedConnectionCount atomic.Uint32
+	logPeriod             time.Duration
+	task                  *task.Periodic
+	dispatcher            event.SnowflakeEventDispatcher
+}
+
+func newPeriodicProxyStats(logPeriod time.Duration, dispatcher event.SnowflakeEventDispatcher, bytesLogger bytesLogger) *periodicProxyStats {
+	el := &periodicProxyStats{logPeriod: logPeriod, dispatcher: dispatcher, bytesLogger: bytesLogger}
+	el.task = &task.Periodic{Interval: logPeriod, Execute: el.logTick}
+	el.task.WaitThenStart()
+	return el
+}
+
+func (p *periodicProxyStats) OnNewSnowflakeEvent(e event.SnowflakeEvent) {
+	switch e.(type) {
+	case event.EventOnProxyConnectionOver:
+		p.connectionCount.Add(1)
+	case event.EventOnProxyConnectionFailed:
+		p.failedConnectionCount.Add(1)
+	}
+}
+
+func (p *periodicProxyStats) logTick() error {
+	inboundSum, outboundSum := p.bytesLogger.GetStat()
+	e := event.EventOnProxyStats{
+		SummaryInterval:       p.logPeriod,
+		ConnectionCount:       int(p.connectionCount.Swap(0)),
+		FailedConnectionCount: uint(p.failedConnectionCount.Swap(0)),
+	}
+	e.InboundBytes, e.InboundUnit = formatTraffic(inboundSum)
+	e.OutboundBytes, e.OutboundUnit = formatTraffic(outboundSum)
+	p.dispatcher.OnNewSnowflakeEvent(e)
+	return nil
+}
+
+func (p *periodicProxyStats) Close() error {
+	return p.task.Close()
+}
diff --git a/proxy/lib/pt_event_metrics.go b/proxy/lib/pt_event_metrics.go
new file mode 100644
index 0000000..4cfab78
--- /dev/null
+++ b/proxy/lib/pt_event_metrics.go
@@ -0,0 +1,34 @@
+package snowflake_proxy
+
+import (
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
+)
+
+type EventCollector interface {
+	TrackInBoundTraffic(value int64)
+	TrackOutBoundTraffic(value int64)
+	TrackNewConnection(country string)
+	TrackFailedConnection()
+}
+
+type EventMetrics struct {
+	collector EventCollector
+}
+
+func NewEventMetrics(collector EventCollector) *EventMetrics {
+	return &EventMetrics{collector: collector}
+}
+
+func (em *EventMetrics) OnNewSnowflakeEvent(e event.SnowflakeEvent) {
+	switch e.(type) {
+	case event.EventOnProxyStats:
+		e := e.(event.EventOnProxyStats)
+		em.collector.TrackInBoundTraffic(e.InboundBytes)
+		em.collector.TrackOutBoundTraffic(e.OutboundBytes)
+	case event.EventOnProxyConnectionOver:
+		e := e.(event.EventOnProxyConnectionOver)
+		em.collector.TrackNewConnection(e.Country)
+	case event.EventOnProxyConnectionFailed:
+		em.collector.TrackFailedConnection()
+	}
+}
diff --git a/proxy/lib/snowflake.go b/proxy/lib/snowflake.go
new file mode 100644
index 0000000..bcdfbda
--- /dev/null
+++ b/proxy/lib/snowflake.go
@@ -0,0 +1,949 @@
+/*
+Package snowflake_proxy provides functionality for creating, starting, and stopping a snowflake
+proxy.
+
+To run a proxy, you must first create a proxy configuration. Unconfigured fields
+will be set to the defined defaults.
+
+	proxy := snowflake_proxy.SnowflakeProxy{
+		BrokerURL: "https://snowflake-broker.example.com",
+		STUNURL: "stun:stun.l.google.com:19302",
+		// ...
+	}
+
+You may then start and stop the proxy. Stopping the proxy will close existing connections and
+the proxy will not poll for more clients.
+
+	go func() {
+		err := proxy.Start()
+		// handle error
+	}
+
+	// ...
+
+	proxy.Stop()
+*/
+package snowflake_proxy
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"net/http"
+	"net/url"
+	"reflect"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/pion/ice/v4"
+
+	"github.com/gorilla/websocket"
+	"github.com/pion/transport/v3/stdnet"
+	"github.com/pion/webrtc/v4"
+
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/constants"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/messages"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/namematcher"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/task"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/util"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/websocketconn"
+)
+
+const (
+	DefaultPollInterval = 5 * time.Second
+	DefaultBrokerURL    = "https://snowflake-broker.torproject.net/"
+	DefaultNATProbeURL  = "https://snowflake-broker.torproject.net:8443/probe"
+	// This is rather a "DefaultDefaultRelayURL"
+	DefaultRelayURL  = "wss://snowflake.torproject.net/"
+	DefaultSTUNURL   = "stun:stun.l.google.com:19302,stun:stun.voip.blackberry.com:3478"
+	DefaultProxyType = "standalone"
+)
+
+const (
+	// NATUnknown is set if the proxy cannot connect to probetest.
+	NATUnknown = "unknown"
+
+	// NATRestricted is set if the proxy times out when connecting to a symmetric NAT.
+	NATRestricted = "restricted"
+
+	// NATUnrestricted is set if the proxy successfully connects to a symmetric NAT.
+	NATUnrestricted = "unrestricted"
+)
+
+const (
+	// Amount of time after sending an SDP answer before the proxy assumes the
+	// client is not going to connect
+	dataChannelTimeout = 20 * time.Second
+
+	// Maximum number of bytes to be read from an HTTP request
+	readLimit = 100000
+
+	sessionIDLength = 16
+)
+
+const bufferedAmountLowThreshold uint64 = 256 * 1024 // 256 KB
+
+var broker *SignalingServer
+
+var currentNATTypeAccess = &sync.RWMutex{}
+
+// currentNATType describes local network environment.
+// Obtain currentNATTypeAccess before access.
+var currentNATType = NATUnknown
+
+func getCurrentNATType() string {
+	currentNATTypeAccess.RLock()
+	defer currentNATTypeAccess.RUnlock()
+	return currentNATType
+}
+
+func setCurrentNATType(newType string) {
+	currentNATTypeAccess.Lock()
+	defer currentNATTypeAccess.Unlock()
+	currentNATType = newType
+}
+
+var (
+	tokens *tokens_t
+	config webrtc.Configuration
+	client http.Client
+)
+
+type GeoIP interface {
+	GetCountryByAddr(net.IP) (string, bool)
+}
+
+// SnowflakeProxy is used to configure an embedded
+// Snowflake in another Go application.
+// For some more info also see CLI parameter descriptions in README.
+type SnowflakeProxy struct {
+	// How often to ask the broker for a new client
+	PollInterval time.Duration
+	// Capacity is the maximum number of clients a Snowflake will serve.
+	// Proxies with a capacity of 0 will accept an unlimited number of clients.
+	Capacity uint
+	// STUNURL is the URLs (comma-separated) of the STUN server the proxy will use
+	STUNURL string
+	// BrokerURL is the URL of the Snowflake broker
+	BrokerURL string
+	// KeepLocalAddresses indicates whether local SDP candidates will be sent to the broker
+	KeepLocalAddresses bool
+	// RelayURL is the default `URL` of the server (relay)
+	// that this proxy will forward client connections to,
+	// in case the broker itself did not specify the said URL
+	RelayURL string
+	// OutboundAddress specify an IP address to use as SDP host candidate
+	OutboundAddress string
+	// EphemeralMinPort and EphemeralMaxPort limit the range of ports that
+	// ICE UDP connections may allocate from.
+	// When specifying the range, make sure it's at least 2x as wide
+	// as the amount of clients that you are hoping to serve concurrently
+	// (see the `Capacity` property).
+	EphemeralMinPort uint16
+	EphemeralMaxPort uint16
+	// RelayDomainNamePattern is the pattern specify allowed domain name for relay
+	// If the pattern starts with ^ then an exact match is required.
+	// The rest of pattern is the suffix of domain name.
+	// There is no look ahead assertion when matching domain name suffix,
+	// thus the string prepend the suffix does not need to be empty or ends with a dot.
+	RelayDomainNamePattern string
+	// AllowProxyingToPrivateAddresses determines whether to allow forwarding
+	// client connections to private IP addresses.
+	// Useful when a Snowflake server (relay) is hosted on the same private network
+	// as this proxy.
+	AllowProxyingToPrivateAddresses bool
+	AllowNonTLSRelay                bool
+	// NATProbeURL is the URL of the probe service we use for NAT checks
+	NATProbeURL string
+	// NATTypeMeasurementInterval is time before NAT type is retested
+	NATTypeMeasurementInterval time.Duration
+	// ProxyType is the type reported to the broker, if not provided it "standalone" will be used
+	ProxyType       string
+	EventDispatcher event.SnowflakeEventDispatcher
+	shutdown        chan struct{}
+
+	// SummaryInterval is the time interval at which proxy stats will be logged
+	SummaryInterval time.Duration
+
+	// GeoIP will be used to detect the country of the clients if provided
+	GeoIP GeoIP
+
+	periodicProxyStats *periodicProxyStats
+	bytesLogger        bytesLogger
+}
+
+// Checks whether an IP address is a remote address for the client
+func isRemoteAddress(ip net.IP) bool {
+	return !(util.IsLocal(ip) || ip.IsUnspecified() || ip.IsLoopback())
+}
+
+func genSessionID() string {
+	buf := make([]byte, sessionIDLength)
+	_, err := rand.Read(buf)
+	if err != nil {
+		panic(err.Error())
+	}
+	return strings.TrimRight(base64.StdEncoding.EncodeToString(buf), "=")
+}
+
+func limitedRead(r io.Reader, limit int64) ([]byte, error) {
+	p, err := io.ReadAll(&io.LimitedReader{R: r, N: limit + 1})
+	if err != nil {
+		return p, err
+	} else if int64(len(p)) == limit+1 {
+		return p[0:limit], io.ErrUnexpectedEOF
+	}
+	return p, err
+}
+
+// SignalingServer keeps track of the SignalingServer in use by the Snowflake
+type SignalingServer struct {
+	url       *url.URL
+	transport http.RoundTripper
+}
+
+func newSignalingServer(rawURL string) (*SignalingServer, error) {
+	var err error
+	s := new(SignalingServer)
+	s.url, err = url.Parse(rawURL)
+	if err != nil {
+		return nil, fmt.Errorf("invalid broker url: %s", err)
+	}
+
+	s.transport = http.DefaultTransport.(*http.Transport)
+	s.transport.(*http.Transport).ResponseHeaderTimeout = 30 * time.Second
+
+	return s, nil
+}
+
+// Post sends a POST request to the SignalingServer
+func (s *SignalingServer) Post(path string, payload io.Reader) ([]byte, error) {
+	req, err := http.NewRequest("POST", path, payload)
+	if err != nil {
+		return nil, err
+	}
+
+	resp, err := s.transport.RoundTrip(req)
+	if err != nil {
+		return nil, err
+	}
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("remote returned status code %d", resp.StatusCode)
+	}
+
+	defer resp.Body.Close()
+	return limitedRead(resp.Body, readLimit)
+}
+
+// pollOffer communicates the proxy's capabilities with broker
+// and retrieves a compatible SDP offer and relay URL.
+func (s *SignalingServer) pollOffer(sid string, proxyType string, acceptedRelayPattern string) (*webrtc.SessionDescription, string) {
+	brokerPath := s.url.ResolveReference(&url.URL{Path: "proxy"})
+
+	numClients := int((tokens.count() / 8) * 8) // Round down to 8
+	currentNATTypeLoaded := getCurrentNATType()
+	body, err := messages.EncodeProxyPollRequestWithRelayPrefix(sid, proxyType, currentNATTypeLoaded, numClients, acceptedRelayPattern)
+	if err != nil {
+		log.Printf("Error encoding poll message: %s", err.Error())
+		return nil, ""
+	}
+
+	resp, err := s.Post(brokerPath.String(), bytes.NewBuffer(body))
+	if err != nil {
+		log.Printf("error polling broker: %s", err.Error())
+	}
+
+	offer, _, relayURL, err := messages.DecodePollResponseWithRelayURL(resp)
+	if err != nil {
+		log.Printf("Error reading broker response: %s", err.Error())
+		log.Printf("body: %s", resp)
+		return nil, ""
+	}
+	if offer != "" {
+		offer, err := util.DeserializeSessionDescription(offer)
+		if err != nil {
+			log.Printf("Error processing session description: %s", err.Error())
+			return nil, ""
+		}
+		return offer, relayURL
+	}
+	return nil, ""
+}
+
+// sendAnswer encodes an SDP answer, sends it to the broker
+// and wait for its response
+func (s *SignalingServer) sendAnswer(sid string, pc *webrtc.PeerConnection) error {
+	ld := pc.LocalDescription()
+	answer, err := util.SerializeSessionDescription(ld)
+	if err != nil {
+		return err
+	}
+
+	body, err := messages.EncodeAnswerRequest(answer, sid)
+	if err != nil {
+		return err
+	}
+
+	brokerPath := s.url.ResolveReference(&url.URL{Path: "answer"})
+	resp, err := s.Post(brokerPath.String(), bytes.NewBuffer(body))
+	if err != nil {
+		return fmt.Errorf("error sending answer to broker: %s", err.Error())
+	}
+
+	success, err := messages.DecodeAnswerResponse(resp)
+	if err != nil {
+		return err
+	}
+	if !success {
+		return fmt.Errorf("broker returned client timeout")
+	}
+
+	return nil
+}
+
+func copyLoop(c1 io.ReadWriteCloser, c2 io.ReadWriteCloser, shutdown chan struct{}) {
+	var once sync.Once
+	defer c2.Close()
+	defer c1.Close()
+	done := make(chan struct{})
+	copyer := func(dst io.ReadWriteCloser, src io.ReadWriteCloser) {
+		// Experimentally each usage of buffer has been observed to be lower than
+		// 2K; io.Copy defaults to 32K.
+		// This is probably determined by MTU in the server's `newHTTPHandler`.
+		size := 2 * 1024
+		buffer := make([]byte, size)
+		// Ignore io.ErrClosedPipe because it is likely caused by the
+		// termination of copyer in the other direction.
+		if _, err := io.CopyBuffer(dst, src, buffer); err != nil && err != io.ErrClosedPipe {
+			log.Printf("io.CopyBuffer inside CopyLoop generated an error: %v", err)
+		}
+		once.Do(func() {
+			close(done)
+		})
+	}
+
+	go copyer(c1, c2)
+	go copyer(c2, c1)
+
+	select {
+	case <-done:
+	case <-shutdown:
+	}
+	log.Println("copy loop ended")
+}
+
+// We pass conn.RemoteAddr() as an additional parameter, rather than calling
+// conn.RemoteAddr() inside this function, as a workaround for a hang that
+// otherwise occurs inside conn.pc.RemoteDescription() (called by RemoteAddr).
+// https://bugs.torproject.org/18628#comment:8
+func (sf *SnowflakeProxy) datachannelHandler(conn *webRTCConn, remoteIP net.IP, relayURL string) {
+	defer conn.Close()
+	defer tokens.ret()
+
+	if relayURL == "" {
+		relayURL = sf.RelayURL
+	}
+
+	wsConn, err := connectToRelay(relayURL, remoteIP)
+	if err != nil {
+		log.Print(err)
+		return
+	}
+	defer wsConn.Close()
+
+	copyLoop(conn, wsConn, sf.shutdown)
+	log.Printf("datachannelHandler ends")
+}
+
+func connectToRelay(relayURL string, remoteIP net.IP) (*websocketconn.Conn, error) {
+	u, err := url.Parse(relayURL)
+	if err != nil {
+		return nil, fmt.Errorf("invalid relay url: %s", err)
+	}
+
+	if remoteIP != nil {
+		// Encode client IP address in relay URL
+		q := u.Query()
+		q.Set("client_ip", remoteIP.String())
+		u.RawQuery = q.Encode()
+	} else {
+		log.Printf("no remote address given in websocket")
+	}
+
+	ws, _, err := websocket.DefaultDialer.Dial(u.String(), nil)
+	if err != nil {
+		return nil, fmt.Errorf("error dialing relay: %s = %s", u.String(), err)
+	}
+
+	wsConn := websocketconn.New(ws)
+	log.Printf("Connected to relay: %v", relayURL)
+	return wsConn, nil
+}
+
+type dataChannelHandlerWithRelayURL struct {
+	RelayURL string
+	sf       *SnowflakeProxy
+}
+
+func (d dataChannelHandlerWithRelayURL) datachannelHandler(conn *webRTCConn, remoteIP net.IP) {
+	d.sf.datachannelHandler(conn, remoteIP, d.RelayURL)
+}
+
+func (sf *SnowflakeProxy) makeWebRTCAPI() *webrtc.API {
+	settingsEngine := webrtc.SettingEngine{}
+
+	if !sf.KeepLocalAddresses {
+		settingsEngine.SetIPFilter(func(ip net.IP) (keep bool) {
+			// `IsLoopback()` and `IsUnspecified` are likely not neded here,
+			// but let's keep them just in case.
+			// FYI there is similar code in other files in this project.
+			keep = !util.IsLocal(ip) && !ip.IsLoopback() && !ip.IsUnspecified()
+			return
+		})
+	}
+	settingsEngine.SetIncludeLoopbackCandidate(sf.KeepLocalAddresses)
+
+	// Use the SetNet setting https://pkg.go.dev/github.com/pion/webrtc/v3#SettingEngine.SetNet
+	// to get snowflake working in shadow (where the AF_NETLINK family is not implemented).
+	// These two lines of code functionally revert a new change in pion by silently ignoring
+	// when net.Interfaces() fails, rather than throwing an error
+	vnet, _ := stdnet.NewNet()
+	settingsEngine.SetNet(vnet)
+
+	if sf.EphemeralMinPort != 0 && sf.EphemeralMaxPort != 0 {
+		err := settingsEngine.SetEphemeralUDPPortRange(sf.EphemeralMinPort, sf.EphemeralMaxPort)
+		if err != nil {
+			log.Fatal("Invalid port range: min > max")
+		}
+	}
+
+	if sf.OutboundAddress != "" {
+		// replace SDP host candidates with the given IP without validation
+		// still have server reflexive candidates to fall back on
+		settingsEngine.SetNAT1To1IPs([]string{sf.OutboundAddress}, webrtc.ICECandidateTypeHost)
+	}
+
+	settingsEngine.SetICEMulticastDNSMode(ice.MulticastDNSModeDisabled)
+
+	settingsEngine.SetDTLSInsecureSkipHelloVerify(true)
+
+	return webrtc.NewAPI(webrtc.WithSettingEngine(settingsEngine))
+}
+
+// Create a PeerConnection from an SDP offer. Blocks until the gathering of ICE
+// candidates is complete and the answer is available in LocalDescription.
+// Installs an OnDataChannel callback that creates a webRTCConn and passes it to
+// datachannelHandler.
+func (sf *SnowflakeProxy) makePeerConnectionFromOffer(
+	sdp *webrtc.SessionDescription,
+	config webrtc.Configuration, dataChan chan struct{},
+	handler func(conn *webRTCConn, remoteIP net.IP),
+) (*webrtc.PeerConnection, error) {
+	api := sf.makeWebRTCAPI()
+	pc, err := api.NewPeerConnection(config)
+	if err != nil {
+		return nil, fmt.Errorf("accept: NewPeerConnection: %s", err)
+	}
+
+	pc.OnDataChannel(func(dc *webrtc.DataChannel) {
+		log.Printf("New Data Channel %s-%d\n", dc.Label(), dc.ID())
+		close(dataChan)
+
+		pr, pw := io.Pipe()
+		conn := newWebRTCConn(pc, dc, pr, sf.bytesLogger)
+		remoteIP := conn.RemoteIP()
+
+		dc.SetBufferedAmountLowThreshold(bufferedAmountLowThreshold)
+
+		dc.OnBufferedAmountLow(func() {
+			select {
+			case conn.sendMoreCh <- struct{}{}:
+			default:
+			}
+		})
+
+		dc.OnOpen(func() {
+			log.Printf("Data Channel %s-%d open\n", dc.Label(), dc.ID())
+			sf.EventDispatcher.OnNewSnowflakeEvent(event.EventOnProxyClientConnected{})
+
+			if sf.OutboundAddress != "" {
+				selectedCandidatePair, err := pc.SCTP().Transport().ICETransport().GetSelectedCandidatePair()
+				if err != nil {
+					log.Printf("Warning: couldn't get the selected candidate pair")
+				}
+
+				log.Printf("Selected Local Candidate: %s:%d", selectedCandidatePair.Local.Address, selectedCandidatePair.Local.Port)
+				if sf.OutboundAddress != selectedCandidatePair.Local.Address {
+					log.Printf("Warning: the IP address provided by --outbound-address is not used for establishing peerconnection")
+				}
+			}
+		})
+		dc.OnClose(func() {
+			// Make sure that the `Write()`s are not blocked any more.
+			dc.OnBufferedAmountLow(func() {})
+			close(conn.sendMoreCh)
+
+			conn.lock.Lock()
+			defer conn.lock.Unlock()
+			log.Printf("Data Channel %s-%d close\n", dc.Label(), dc.ID())
+
+			country := ""
+			if sf.GeoIP != nil && !reflect.ValueOf(sf.GeoIP).IsNil() && remoteIP != nil {
+				country, _ = sf.GeoIP.GetCountryByAddr(remoteIP)
+			}
+			sf.EventDispatcher.OnNewSnowflakeEvent(event.EventOnProxyConnectionOver{Country: country})
+
+			conn.dc = nil
+			dc.Close()
+			pw.Close()
+		})
+		dc.OnMessage(func(msg webrtc.DataChannelMessage) {
+			n, err := pw.Write(msg.Data)
+			if err != nil {
+				if inErr := pw.CloseWithError(err); inErr != nil {
+					log.Printf("close with error generated an error: %v", inErr)
+				}
+
+				return
+			}
+
+			conn.bytesLogger.AddOutbound(int64(n))
+
+			if n != len(msg.Data) {
+				// XXX: Maybe don't panic here and log an error instead?
+				panic("short write")
+			}
+		})
+
+		go handler(conn, remoteIP)
+	})
+	// As of v3.0.0, pion-webrtc uses trickle ICE by default.
+	// We have to wait for candidate gathering to complete
+	// before we send the offer
+	done := webrtc.GatheringCompletePromise(pc)
+	err = pc.SetRemoteDescription(*sdp)
+	if err != nil {
+		if inerr := pc.Close(); inerr != nil {
+			log.Printf("unable to call pc.Close after pc.SetRemoteDescription with error: %v", inerr)
+		}
+		return nil, fmt.Errorf("accept: SetRemoteDescription: %s", err)
+	}
+
+	log.Println("Generating answer...")
+	answer, err := pc.CreateAnswer(nil)
+	if err != nil {
+		if inerr := pc.Close(); inerr != nil {
+			log.Printf("ICE gathering has generated an error when calling pc.Close: %v", inerr)
+		}
+		return nil, err
+	}
+
+	err = pc.SetLocalDescription(answer)
+	if err != nil {
+		if err = pc.Close(); err != nil {
+			log.Printf("pc.Close after setting local description returned : %v", err)
+		}
+		return nil, err
+	}
+
+	// Wait for ICE candidate gathering to complete,
+	// or for whatever we managed to gather before the broker
+	// responds with an error to the client offer.
+	// See https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40230
+	select {
+	case <-done:
+	case <-time.After(constants.BrokerClientTimeout * time.Second * 3 / 4):
+		log.Print("ICE gathering is not yet complete, but let's send the answer" +
+			" before the client times out")
+	}
+
+	log.Printf("Answer: \n\t%s", strings.ReplaceAll(pc.LocalDescription().SDP, "\n", "\n\t"))
+
+	return pc, nil
+}
+
+// Create a new PeerConnection. Blocks until the gathering of ICE
+// candidates is complete and the answer is available in LocalDescription.
+func (sf *SnowflakeProxy) makeNewPeerConnection(
+	config webrtc.Configuration, dataChan chan struct{},
+) (*webrtc.PeerConnection, error) {
+	api := sf.makeWebRTCAPI()
+	pc, err := api.NewPeerConnection(config)
+	if err != nil {
+		return nil, fmt.Errorf("accept: NewPeerConnection: %s", err)
+	}
+	pc.OnConnectionStateChange(func(pcs webrtc.PeerConnectionState) {
+		log.Printf("NAT check: WebRTC: OnConnectionStateChange: %v", pcs)
+	})
+
+	// Must create a data channel before creating an offer
+	// https://github.com/pion/webrtc/wiki/Release-WebRTC@v3.0.0#a-data-channel-is-no-longer-implicitly-created-with-a-peerconnection
+	dc, err := pc.CreateDataChannel("test", &webrtc.DataChannelInit{})
+	if err != nil {
+		log.Printf("CreateDataChannel ERROR: %s", err)
+		return nil, err
+	}
+	dc.OnOpen(func() {
+		log.Println("WebRTC: DataChannel.OnOpen")
+		close(dataChan)
+	})
+	dc.OnClose(func() {
+		log.Println("WebRTC: DataChannel.OnClose")
+		go func() {
+			// A hack to make NAT testing more reliable and not mis-identify
+			// as "restricted".
+			// See https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40419#note_3141855.
+			// Instead we should just `dc.Close()` without waiting
+			// and without a goroutine.
+			// (or, perhaps, `dc.Close()` is not needed at all
+			// in the OnClose callback?)
+			<-time.After(5 * time.Second)
+
+			log.Print("NAT check: WebRTC: dc.Close()")
+			dc.Close()
+		}()
+	})
+
+	offer, err := pc.CreateOffer(nil)
+	// TODO: Potentially timeout and retry if ICE isn't working.
+	if err != nil {
+		log.Println("Failed to prepare offer", err)
+		pc.Close()
+		return nil, err
+	}
+	log.Println("Probetest: Created Offer")
+
+	// As of v3.0.0, pion-webrtc uses trickle ICE by default.
+	// We have to wait for candidate gathering to complete
+	// before we send the offer
+	done := webrtc.GatheringCompletePromise(pc)
+	// start the gathering of ICE candidates
+	err = pc.SetLocalDescription(offer)
+	if err != nil {
+		log.Println("Failed to apply offer", err)
+		pc.Close()
+		return nil, err
+	}
+	log.Println("Probetest: Set local description")
+
+	// Wait for ICE candidate gathering to complete
+	<-done
+
+	return pc, nil
+}
+
+func (sf *SnowflakeProxy) runSession(sid string) {
+	connectedToClient := false
+	defer func() {
+		if !connectedToClient {
+			tokens.ret()
+		}
+		// Otherwise we'll `tokens.ret()` when the connection finishes.
+	}()
+
+	offer, relayURL := broker.pollOffer(sid, sf.ProxyType, sf.RelayDomainNamePattern)
+	if offer == nil {
+		return
+	}
+	log.Printf("Received Offer From Broker: \n\t%s", strings.ReplaceAll(offer.SDP, "\n", "\n\t"))
+
+	if relayURL != "" {
+		if err := checkIsRelayURLAcceptable(sf.RelayDomainNamePattern, sf.AllowProxyingToPrivateAddresses, sf.AllowNonTLSRelay, relayURL); err != nil {
+			log.Printf("bad offer from broker: %v", err)
+			return
+		}
+	}
+
+	dataChan := make(chan struct{})
+	dataChannelAdaptor := dataChannelHandlerWithRelayURL{RelayURL: relayURL, sf: sf}
+	pc, err := sf.makePeerConnectionFromOffer(offer, config, dataChan, dataChannelAdaptor.datachannelHandler)
+	if err != nil {
+		log.Printf("error making WebRTC connection: %s", err)
+		return
+	}
+
+	err = broker.sendAnswer(sid, pc)
+	if err != nil {
+		log.Printf("error sending answer to client through broker: %s", err)
+		if inerr := pc.Close(); inerr != nil {
+			log.Printf("error calling pc.Close: %v", inerr)
+		}
+		return
+	}
+	// Set a timeout on peerconnection. If the connection state has not
+	// advanced to PeerConnectionStateConnected in this time,
+	// destroy the peer connection and return the token.
+	select {
+	case <-dataChan:
+		log.Println("Connection successful")
+		connectedToClient = true
+	case <-time.After(dataChannelTimeout):
+		log.Println("Timed out waiting for client to open data channel.")
+		sf.EventDispatcher.OnNewSnowflakeEvent(
+			event.EventOnProxyConnectionFailed{},
+		)
+		if err := pc.Close(); err != nil {
+			log.Printf("error calling pc.Close: %v", err)
+		}
+	}
+}
+
+// Returns nil if the relayURL is acceptable.
+// This is a pure function.
+// If the hostname in the `relayURL` is not an IP address
+// (but a name instead, e.g. `localhost`),
+// this function will _not_ perform a DNS request to figure out
+// if the name resolves to a private IP address,
+// i.e. the private / public check will effectively be skipped.
+func checkIsRelayURLAcceptable(
+	allowedHostNamePattern string,
+	allowPrivateIPs bool,
+	allowNonTLSRelay bool,
+	relayURL string,
+) error {
+	parsedRelayURL, err := url.Parse(relayURL)
+	if err != nil {
+		return fmt.Errorf("bad Relay URL %w", err)
+	}
+	if !allowPrivateIPs {
+		ip := net.ParseIP(parsedRelayURL.Hostname())
+		// Otherwise it's a domain name, or an invalid IP.
+		if ip != nil {
+			// We should probably use a ready library for this.
+			if !isRemoteAddress(ip) {
+				return fmt.Errorf("rejected Relay URL: private IPs are not allowed")
+			}
+		}
+	}
+	if !allowNonTLSRelay && parsedRelayURL.Scheme != "wss" {
+		return fmt.Errorf("rejected Relay URL protocol: non-TLS not allowed")
+	}
+	// FYI our websocket library also rejects other protocols
+	// https://github.com/gorilla/websocket/blob/5e002381133d322c5f1305d171f3bdd07decf229/client.go#L174-L181
+	if parsedRelayURL.Scheme != "wss" && parsedRelayURL.Scheme != "ws" {
+		return fmt.Errorf("rejected Relay URL protocol: only WebSocket is allowed")
+	}
+	matcher := namematcher.NewNameMatcher(allowedHostNamePattern)
+	if !matcher.IsMember(parsedRelayURL.Hostname()) {
+		return fmt.Errorf("rejected Relay URL: hostname does not match allowed pattern \"%v\"", allowedHostNamePattern)
+	}
+	return nil
+}
+
+// Start configures and starts a Snowflake, fully formed and special. Configuration
+// values that are unset will default to their corresponding default values.
+func (sf *SnowflakeProxy) Start() error {
+	var err error
+
+	sf.EventDispatcher.OnNewSnowflakeEvent(event.EventOnProxyStarting{})
+	sf.shutdown = make(chan struct{})
+
+	// blank configurations revert to default
+	if sf.PollInterval == 0 {
+		sf.PollInterval = DefaultPollInterval
+	}
+	if sf.BrokerURL == "" {
+		sf.BrokerURL = DefaultBrokerURL
+	}
+	if sf.RelayURL == "" {
+		sf.RelayURL = DefaultRelayURL
+	}
+	if sf.STUNURL == "" {
+		sf.STUNURL = DefaultSTUNURL
+	}
+	if sf.NATProbeURL == "" {
+		sf.NATProbeURL = DefaultNATProbeURL
+	}
+	if sf.ProxyType == "" {
+		sf.ProxyType = DefaultProxyType
+	}
+	if sf.EventDispatcher == nil {
+		sf.EventDispatcher = event.NewSnowflakeEventDispatcher()
+	}
+
+	sf.bytesLogger = newBytesSyncLogger()
+	sf.periodicProxyStats = newPeriodicProxyStats(sf.SummaryInterval, sf.EventDispatcher, sf.bytesLogger)
+	sf.EventDispatcher.AddSnowflakeEventListener(sf.periodicProxyStats)
+
+	broker, err = newSignalingServer(sf.BrokerURL)
+	if err != nil {
+		return fmt.Errorf("error configuring broker: %s", err)
+	}
+
+	_, err = url.Parse(sf.STUNURL)
+	if err != nil {
+		return fmt.Errorf("invalid stun url: %s", err)
+	}
+	_, err = url.Parse(sf.RelayURL)
+	if err != nil {
+		return fmt.Errorf("invalid default relay url: %s", err)
+	}
+
+	if !namematcher.IsValidRule(sf.RelayDomainNamePattern) {
+		return fmt.Errorf("invalid relay domain name pattern")
+	}
+
+	if sf.EphemeralMaxPort != 0 {
+		rangeWidth := sf.EphemeralMaxPort - sf.EphemeralMinPort
+		expectedNumConcurrentClients := sf.Capacity
+		if sf.Capacity == 0 {
+			// Just a guess, since 0 means "unlimited".
+			expectedNumConcurrentClients = 10
+		}
+		// See https://forum.torproject.org/t/remote-returned-status-code-400/15026/9?u=wofwca
+		if uint(rangeWidth) < expectedNumConcurrentClients*2 {
+			log.Printf(
+				"Warning: ephemeral ports range seems narrow (%v-%v) "+
+					"for the client capacity (%v). "+
+					"Some client connections might fail. "+
+					"Please widen the port range, or limit the 'capacity'.",
+				sf.EphemeralMinPort,
+				sf.EphemeralMaxPort,
+				sf.Capacity,
+			)
+			// Instead of simply printing a warning, we could look into
+			// utilizing [SetICEUDPMux](https://pkg.go.dev/github.com/pion/webrtc/v4#SettingEngine.SetICEUDPMux)
+			// to multiplex multiple connections over one (or more?) ports.
+		}
+	}
+
+	config = webrtc.Configuration{
+		ICEServers: []webrtc.ICEServer{
+			{
+				URLs: strings.Split(sf.STUNURL, ","),
+			},
+		},
+	}
+	tokens = newTokens(sf.Capacity)
+
+	err = sf.checkNATType(config, sf.NATProbeURL)
+	if err != nil {
+		// non-fatal error. Log it and continue
+		log.Printf(err.Error())
+		setCurrentNATType(NATUnknown)
+	}
+	sf.EventDispatcher.OnNewSnowflakeEvent(event.EventOnCurrentNATTypeDetermined{CurNATType: getCurrentNATType()})
+
+	NatRetestTask := task.Periodic{
+		Interval: sf.NATTypeMeasurementInterval,
+		Execute: func() error {
+			return sf.checkNATType(config, sf.NATProbeURL)
+		},
+		// Not setting OnError would shut down the periodic task on error by default.
+		OnError: func(err error) {
+			log.Printf("Periodic probetest failed: %s, retaining current NAT type: %s", err.Error(), getCurrentNATType())
+		},
+	}
+
+	if sf.NATTypeMeasurementInterval != 0 {
+		NatRetestTask.WaitThenStart()
+		defer NatRetestTask.Close()
+	}
+
+	ticker := time.NewTicker(sf.PollInterval)
+	defer ticker.Stop()
+
+	for ; true; <-ticker.C {
+		select {
+		case <-sf.shutdown:
+			return nil
+		default:
+			tokens.get()
+			sessionID := genSessionID()
+			sf.runSession(sessionID)
+		}
+	}
+	return nil
+}
+
+// Stop closes all existing connections and shuts down the Snowflake.
+func (sf *SnowflakeProxy) Stop() {
+	close(sf.shutdown)
+}
+
+// checkNATType use probetest to determine NAT compatability by
+// attempting to connect with a known symmetric NAT. If success,
+// it is considered "unrestricted". If timeout it is considered "restricted"
+func (sf *SnowflakeProxy) checkNATType(config webrtc.Configuration, probeURL string) error {
+	log.Printf("Checking our NAT type, contacting NAT check probe server at \"%v\"...", probeURL)
+
+	probe, err := newSignalingServer(probeURL)
+	if err != nil {
+		return fmt.Errorf("Error parsing url: %w", err)
+	}
+
+	dataChan := make(chan struct{})
+	pc, err := sf.makeNewPeerConnection(config, dataChan)
+	if err != nil {
+		return fmt.Errorf("Error making WebRTC connection: %w", err)
+	}
+	defer func() {
+		if err := pc.Close(); err != nil {
+			log.Printf("Probetest: error calling pc.Close: %v", err)
+		}
+	}()
+
+	offer := pc.LocalDescription()
+	log.Printf("Probetest offer: \n\t%s", strings.ReplaceAll(offer.SDP, "\n", "\n\t"))
+	sdp, err := util.SerializeSessionDescription(offer)
+	if err != nil {
+		return fmt.Errorf("Error encoding probe message: %w", err)
+	}
+
+	// send offer
+	body, err := messages.EncodePollResponse(sdp, true, "")
+	if err != nil {
+		return fmt.Errorf("Error encoding probe message: %w", err)
+	}
+
+	resp, err := probe.Post(probe.url.String(), bytes.NewBuffer(body))
+	if err != nil {
+		return fmt.Errorf("Error polling probe: %w", err)
+	}
+
+	sdp, _, err = messages.DecodeAnswerRequest(resp)
+	if err != nil {
+		return fmt.Errorf("Error reading probe response: %w", err)
+	}
+
+	answer, err := util.DeserializeSessionDescription(sdp)
+	if err != nil {
+		return fmt.Errorf("Error setting answer: %w", err)
+	}
+	log.Printf("Probetest answer: \n\t%s", strings.ReplaceAll(answer.SDP, "\n", "\n\t"))
+
+	err = pc.SetRemoteDescription(*answer)
+	if err != nil {
+		return fmt.Errorf("Error setting answer: %w", err)
+	}
+
+	prevNATType := getCurrentNATType()
+
+	log.Printf("Waiting for a test WebRTC connection with NAT check probe server to establish...")
+	select {
+	case <-dataChan:
+		log.Printf(
+			"Test WebRTC connection with NAT check probe server established!"+
+				" This means our NAT is %v!",
+			NATUnrestricted,
+		)
+		setCurrentNATType(NATUnrestricted)
+	case <-time.After(dataChannelTimeout):
+		log.Printf(
+			"Test WebRTC connection with NAT check probe server timed out."+
+				" This means our NAT is %v.",
+			NATRestricted,
+		)
+		setCurrentNATType(NATRestricted)
+	}
+
+	log.Printf("NAT Type measurement: %v -> %v\n", prevNATType, getCurrentNATType())
+
+	return nil
+}
diff --git a/proxy/lib/tokens.go b/proxy/lib/tokens.go
new file mode 100644
index 0000000..7fc0c99
--- /dev/null
+++ b/proxy/lib/tokens.go
@@ -0,0 +1,44 @@
+package snowflake_proxy
+
+import (
+	"sync/atomic"
+)
+
+type tokens_t struct {
+	ch       chan struct{}
+	capacity uint
+	clients  atomic.Int64
+}
+
+func newTokens(capacity uint) *tokens_t {
+	var ch chan struct{}
+	if capacity != 0 {
+		ch = make(chan struct{}, capacity)
+	}
+
+	return &tokens_t{
+		ch:       ch,
+		capacity: capacity,
+		clients:  atomic.Int64{},
+	}
+}
+
+func (t *tokens_t) get() {
+	t.clients.Add(1)
+
+	if t.capacity != 0 {
+		t.ch <- struct{}{}
+	}
+}
+
+func (t *tokens_t) ret() {
+	t.clients.Add(-1)
+
+	if t.capacity != 0 {
+		<-t.ch
+	}
+}
+
+func (t *tokens_t) count() int64 {
+	return t.clients.Load()
+}
diff --git a/proxy/lib/tokens_test.go b/proxy/lib/tokens_test.go
new file mode 100644
index 0000000..4393a21
--- /dev/null
+++ b/proxy/lib/tokens_test.go
@@ -0,0 +1,28 @@
+package snowflake_proxy
+
+import (
+	"testing"
+
+	. "github.com/smartystreets/goconvey/convey"
+)
+
+func TestTokens(t *testing.T) {
+	Convey("Tokens", t, func() {
+		tokens := newTokens(2)
+		So(tokens.count(), ShouldEqual, 0)
+		tokens.get()
+		So(tokens.count(), ShouldEqual, 1)
+		tokens.ret()
+		So(tokens.count(), ShouldEqual, 0)
+	})
+	Convey("Tokens capacity 0", t, func() {
+		tokens := newTokens(0)
+		So(tokens.count(), ShouldEqual, 0)
+		for i := 0; i < 20; i++ {
+			tokens.get()
+		}
+		So(tokens.count(), ShouldEqual, 20)
+		tokens.ret()
+		So(tokens.count(), ShouldEqual, 19)
+	})
+}
diff --git a/proxy/lib/util.go b/proxy/lib/util.go
new file mode 100644
index 0000000..fcb301f
--- /dev/null
+++ b/proxy/lib/util.go
@@ -0,0 +1,86 @@
+package snowflake_proxy
+
+import (
+	"time"
+)
+
+// bytesLogger is an interface which is used to allow logging the throughput
+// of the Snowflake. A default bytesLogger(bytesNullLogger) does nothing.
+type bytesLogger interface {
+	AddOutbound(int64)
+	AddInbound(int64)
+	GetStat() (in int64, out int64)
+}
+
+// bytesNullLogger Default bytesLogger does nothing.
+type bytesNullLogger struct{}
+
+// AddOutbound in bytesNullLogger does nothing
+func (b bytesNullLogger) AddOutbound(amount int64) {}
+
+// AddInbound in bytesNullLogger does nothing
+func (b bytesNullLogger) AddInbound(amount int64) {}
+
+func (b bytesNullLogger) GetStat() (in int64, out int64) { return -1, -1 }
+
+// bytesSyncLogger uses channels to safely log from multiple sources with output
+// occuring at reasonable intervals.
+type bytesSyncLogger struct {
+	outboundChan, inboundChan chan int64
+	statsChan                 chan bytesLoggerStats
+	stats                     bytesLoggerStats
+	outEvents, inEvents       int
+	start                     time.Time
+}
+
+type bytesLoggerStats struct {
+	outbound, inbound int64
+}
+
+// newBytesSyncLogger returns a new bytesSyncLogger and starts it loggin.
+func newBytesSyncLogger() *bytesSyncLogger {
+	b := &bytesSyncLogger{
+		outboundChan: make(chan int64, 5),
+		inboundChan:  make(chan int64, 5),
+		statsChan:    make(chan bytesLoggerStats),
+	}
+	go b.log()
+	b.start = time.Now()
+	return b
+}
+
+func (b *bytesSyncLogger) log() {
+	for {
+		select {
+		case amount := <-b.outboundChan:
+			b.stats.outbound += amount
+			b.outEvents++
+		case amount := <-b.inboundChan:
+			b.stats.inbound += amount
+			b.inEvents++
+		case b.statsChan <- b.stats:
+			b.stats.inbound = 0
+			b.stats.outbound = 0
+			b.inEvents = 0
+			b.outEvents = 0
+		}
+	}
+}
+
+// AddOutbound add a number of bytes to the outbound total reported by the logger
+func (b *bytesSyncLogger) AddOutbound(amount int64) {
+	b.outboundChan <- amount
+}
+
+// AddInbound add a number of bytes to the inbound total reported by the logger
+func (b *bytesSyncLogger) AddInbound(amount int64) {
+	b.inboundChan <- amount
+}
+
+// GetStat returns the current inbound and outbound stats from the logger and then zeros the counts
+func (b *bytesSyncLogger) GetStat() (in int64, out int64) {
+	stats := <-b.statsChan
+	return stats.inbound, stats.outbound
+}
+
+func formatTraffic(amount int64) (value int64, unit string) { return amount / 1000, "KB" }
diff --git a/proxy/lib/webrtcconn.go b/proxy/lib/webrtcconn.go
new file mode 100644
index 0000000..f849bfa
--- /dev/null
+++ b/proxy/lib/webrtcconn.go
@@ -0,0 +1,165 @@
+package snowflake_proxy
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"regexp"
+	"sync"
+	"time"
+
+	"github.com/pion/ice/v4"
+	"github.com/pion/sdp/v3"
+	"github.com/pion/webrtc/v4"
+)
+
+const maxBufferedAmount uint64 = 512 * 1024 // 512 KB
+
+var remoteIPPatterns = []*regexp.Regexp{
+	/* IPv4 */
+	regexp.MustCompile(`(?m)^c=IN IP4 ([\d.]+)(?:(?:\/\d+)?\/\d+)?(:? |\r?\n)`),
+	/* IPv6 */
+	regexp.MustCompile(`(?m)^c=IN IP6 ([0-9A-Fa-f:.]+)(?:\/\d+)?(:? |\r?\n)`),
+}
+
+type webRTCConn struct {
+	dc *webrtc.DataChannel
+	pc *webrtc.PeerConnection
+	pr *io.PipeReader
+
+	lock sync.Mutex // Synchronization for DataChannel destruction
+	once sync.Once  // Synchronization for PeerConnection destruction
+
+	inactivityTimeout time.Duration
+	activity          chan struct{}
+	sendMoreCh        chan struct{}
+	cancelTimeoutLoop context.CancelFunc
+
+	bytesLogger bytesLogger
+}
+
+func newWebRTCConn(pc *webrtc.PeerConnection, dc *webrtc.DataChannel, pr *io.PipeReader, bytesLogger bytesLogger) *webRTCConn {
+	conn := &webRTCConn{pc: pc, dc: dc, pr: pr, bytesLogger: bytesLogger}
+	conn.activity = make(chan struct{}, 100)
+	conn.sendMoreCh = make(chan struct{}, 1)
+	conn.inactivityTimeout = 30 * time.Second
+	ctx, cancel := context.WithCancel(context.Background())
+	conn.cancelTimeoutLoop = cancel
+	go conn.timeoutLoop(ctx)
+	return conn
+}
+
+func (c *webRTCConn) timeoutLoop(ctx context.Context) {
+	timer := time.NewTimer(c.inactivityTimeout)
+	for {
+		select {
+		case <-timer.C:
+			_ = c.Close()
+			log.Println("Closed connection due to inactivity")
+			return
+		case <-c.activity:
+			if !timer.Stop() {
+				<-timer.C
+			}
+			timer.Reset(c.inactivityTimeout)
+			continue
+		case <-ctx.Done():
+			return
+		}
+	}
+}
+
+func (c *webRTCConn) Read(b []byte) (int, error) {
+	return c.pr.Read(b)
+}
+
+func (c *webRTCConn) Write(b []byte) (int, error) {
+	c.bytesLogger.AddInbound(int64(len(b)))
+	select {
+	case c.activity <- struct{}{}:
+	default:
+	}
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	if c.dc != nil {
+		_ = c.dc.Send(b)
+		if c.dc.BufferedAmount() >= maxBufferedAmount {
+			<-c.sendMoreCh
+		}
+	}
+	return len(b), nil
+}
+
+func (c *webRTCConn) Close() (err error) {
+	c.once.Do(func() {
+		c.cancelTimeoutLoop()
+		err = errors.Join(c.pr.Close(), c.pc.Close())
+	})
+	return
+}
+
+func (c *webRTCConn) LocalAddr() net.Addr {
+	return nil
+}
+
+func (c *webRTCConn) RemoteIP() net.IP {
+	//Parse Remote SDP offer and extract client IP
+	return remoteIPFromSDP(c.pc.RemoteDescription().SDP)
+}
+
+func (c *webRTCConn) SetDeadline(t time.Time) error {
+	// nolint: golint
+	return fmt.Errorf("SetDeadline not implemented")
+}
+
+func (c *webRTCConn) SetReadDeadline(t time.Time) error {
+	// nolint: golint
+	return fmt.Errorf("SetReadDeadline not implemented")
+}
+
+func (c *webRTCConn) SetWriteDeadline(t time.Time) error {
+	// nolint: golint
+	return fmt.Errorf("SetWriteDeadline not implemented")
+}
+
+func remoteIPFromSDP(str string) net.IP {
+	// Look for remote IP in "a=candidate" attribute fields
+	// https://tools.ietf.org/html/rfc5245#section-15.1
+	var desc sdp.SessionDescription
+	err := desc.Unmarshal([]byte(str))
+	if err != nil {
+		log.Println("Error parsing SDP: ", err.Error())
+		return nil
+	}
+	for _, m := range desc.MediaDescriptions {
+		for _, a := range m.Attributes {
+			if a.IsICECandidate() {
+				c, err := ice.UnmarshalCandidate(a.Value)
+				if err == nil {
+					ip := net.ParseIP(c.Address())
+					if ip != nil && isRemoteAddress(ip) {
+						return ip
+					}
+				}
+			}
+		}
+	}
+	// Finally look for remote IP in "c=" Connection Data field
+	// https://tools.ietf.org/html/rfc4566#section-5.7
+	for _, pattern := range remoteIPPatterns {
+		m := pattern.FindStringSubmatch(str)
+		if m != nil {
+			// Ignore parsing errors, ParseIP returns nil.
+			ip := net.ParseIP(m[1])
+			if ip != nil && isRemoteAddress(ip) {
+				return ip
+			}
+
+		}
+	}
+
+	return nil
+}
diff --git a/proxy/main.go b/proxy/main.go
new file mode 100644
index 0000000..360703e
--- /dev/null
+++ b/proxy/main.go
@@ -0,0 +1,180 @@
+package main
+
+import (
+	"flag"
+	"fmt"
+	"io"
+	"log"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+
+	"gitlab.torproject.org/tpo/anti-censorship/geoip"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/event"
+	"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/version"
+	sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/proxy/lib"
+)
+
+const minPollInterval = 2 * time.Second
+
+func main() {
+	pollInterval := flag.Duration("poll-interval", sf.DefaultPollInterval,
+		fmt.Sprint("how often to ask the broker for a new client. Keep in mind that asking for a client will not always result in getting one. Minumum value is ", minPollInterval, ". Valid time units are \"ms\", \"s\", \"m\", \"h\"."))
+	capacity := flag.Uint("capacity", 0, "maximum concurrent clients (default is to accept an unlimited number of clients)")
+	stunURL := flag.String("stun", sf.DefaultSTUNURL, "Comma-separated STUN server `URL`s that this proxy will use will use to, among some other things, determine its public IP address")
+	logFilename := flag.String("log", "", "log `filename`. If not specified, logs will be output to stderr (console).")
+	rawBrokerURL := flag.String("broker", sf.DefaultBrokerURL, "The `URL` of the broker server that the proxy will be using to find clients")
+	unsafeLogging := flag.Bool("unsafe-logging", false, "keep IP addresses and other sensitive info in the logs")
+	logLocalTime := flag.Bool("log-local-time", false, "Use local time for logging (default: UTC)")
+	keepLocalAddresses := flag.Bool("keep-local-addresses", false, "keep local LAN address ICE candidates.\nThis is usually pointless because Snowflake clients don't usually reside on the same local network as the proxy.")
+	defaultRelayURL := flag.String("relay", sf.DefaultRelayURL, "The default `URL` of the server (relay) that this proxy will forward client connections to, in case the broker itself did not specify the said URL")
+	probeURL := flag.String("nat-probe-server", sf.DefaultNATProbeURL, "The `URL` of the server that this proxy will use to check its network NAT type.\nDetermining NAT type helps to understand whether this proxy is compatible with certain clients' NAT")
+	outboundAddress := flag.String("outbound-address", "", "prefer the given `address` as outbound address for client connections")
+	allowedRelayHostNamePattern := flag.String("allowed-relay-hostname-pattern", "snowflake.torproject.net$", "this proxy will only be allowed to forward client connections to relays (servers) whose URL matches this pattern.\nNote that a pattern \"example.com$\" will match \"subdomain.example.com\" as well as \"other-domain-example.com\".\nIn order to only match \"example.com\", prefix the pattern with \"^\": \"^example.com$\"")
+	allowProxyingToPrivateAddresses := flag.Bool("allow-proxying-to-private-addresses", false, "allow forwarding client connections to private IP addresses.\nUseful when a Snowflake server (relay) is hosted on the same private network as this proxy.")
+	allowNonTLSRelay := flag.Bool("allow-non-tls-relay", false, "allow this proxy to pass client's data to the relay in an unencrypted form.\nThis is only useful if the relay doesn't support encryption, e.g. for testing / development purposes.")
+	NATTypeMeasurementInterval := flag.Duration("nat-retest-interval", time.Hour*24,
+		"the time interval between NAT type is retests (see \"nat-probe-server\"). 0s disables retest. Valid time units are \"s\", \"m\", \"h\".")
+	summaryInterval := flag.Duration("summary-interval", time.Hour,
+		"the time interval between summary log outputs, 0s disables summaries. Valid time units are \"s\", \"m\", \"h\".")
+	disableStatsLogger := flag.Bool("disable-stats-logger", false, "disable the exposing mechanism for stats using logs")
+	enableMetrics := flag.Bool("metrics", false, "enable the exposing mechanism for stats using metrics")
+	metricsAddress := flag.String("metrics-address", "localhost", "set listen `address` for metrics service")
+	metricsPort := flag.Int("metrics-port", 9999, "set port for the metrics service")
+	verboseLogging := flag.Bool("verbose", false, "increase log verbosity")
+	ephemeralPortsRangeFlag := flag.String("ephemeral-ports-range", "", "Set the `range` of ports used for client connections (format:\":\").\nUseful in conjunction with port forwarding, in order to make the proxy NAT type \"unrestricted\".\nIf omitted, the ports will be chosen automatically from a wide range.\nWhen specifying the range, make sure it's at least 2x as wide as the amount of clients that you are hoping to serve concurrently (see the \"capacity\" flag).")
+	geoipDatabase := flag.String("geoipdb", "/usr/share/tor/geoip", "path to correctly formatted geoip database mapping IPv4 address ranges to country codes")
+	geoip6Database := flag.String("geoip6db", "/usr/share/tor/geoip6", "path to correctly formatted geoip database mapping IPv6 address ranges to country codes")
+	versionFlag := flag.Bool("version", false, "display version info to stderr and quit")
+
+	var ephemeralPortsRange []uint16 = []uint16{0, 0}
+
+	flag.Parse()
+
+	if *versionFlag {
+		fmt.Fprintf(os.Stderr, "snowflake-proxy %s", version.ConstructResult())
+		os.Exit(0)
+	}
+
+	if *pollInterval < minPollInterval {
+		log.Fatalf("poll-interval must be >= %v", minPollInterval)
+	}
+
+	if *outboundAddress != "" && *keepLocalAddresses {
+		log.Fatal("Cannot keep local address candidates when outbound address is specified")
+	}
+
+	eventLogger := event.NewSnowflakeEventDispatcher()
+
+	if *ephemeralPortsRangeFlag != "" {
+		ephemeralPortsRangeParts := strings.Split(*ephemeralPortsRangeFlag, ":")
+		if len(ephemeralPortsRangeParts) == 2 {
+			ephemeralMinPort, err := strconv.ParseUint(ephemeralPortsRangeParts[0], 10, 16)
+			if err != nil {
+				log.Fatal(err)
+			}
+
+			ephemeralMaxPort, err := strconv.ParseUint(ephemeralPortsRangeParts[1], 10, 16)
+			if err != nil {
+				log.Fatal(err)
+			}
+
+			if ephemeralMinPort == 0 || ephemeralMaxPort == 0 {
+				log.Fatal("Ephemeral port cannot be zero")
+			}
+			if ephemeralMinPort > ephemeralMaxPort {
+				log.Fatal("Invalid port range: min > max")
+			}
+
+			ephemeralPortsRange = []uint16{uint16(ephemeralMinPort), uint16(ephemeralMaxPort)}
+		} else {
+			log.Fatalf("Bad range port format: %v", *ephemeralPortsRangeFlag)
+		}
+	}
+
+	gip, err := geoip.New(*geoipDatabase, *geoip6Database)
+	if *enableMetrics && err != nil {
+		// The geoip DB is only used for metrics, let's only report the error if enabled
+		log.Println("Error loading geoip db for country based metrics:", err)
+	}
+
+	proxy := sf.SnowflakeProxy{
+		PollInterval:       *pollInterval,
+		Capacity:           uint(*capacity),
+		STUNURL:            *stunURL,
+		BrokerURL:          *rawBrokerURL,
+		KeepLocalAddresses: *keepLocalAddresses,
+		RelayURL:           *defaultRelayURL,
+		NATProbeURL:        *probeURL,
+		OutboundAddress:    *outboundAddress,
+		EphemeralMinPort:   ephemeralPortsRange[0],
+		EphemeralMaxPort:   ephemeralPortsRange[1],
+
+		NATTypeMeasurementInterval: *NATTypeMeasurementInterval,
+		EventDispatcher:            eventLogger,
+
+		RelayDomainNamePattern:          *allowedRelayHostNamePattern,
+		AllowProxyingToPrivateAddresses: *allowProxyingToPrivateAddresses,
+		AllowNonTLSRelay:                *allowNonTLSRelay,
+
+		SummaryInterval: *summaryInterval,
+		GeoIP:           gip,
+	}
+
+	var logOutput = io.Discard
+	var eventlogOutput io.Writer = os.Stderr
+
+	loggerFlags := log.LstdFlags
+
+	if !*logLocalTime {
+		loggerFlags |= log.LUTC
+	}
+
+	log.SetFlags(loggerFlags)
+
+	if *verboseLogging {
+		logOutput = os.Stderr
+	}
+
+	if *logFilename != "" {
+		f, err := os.OpenFile(*logFilename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600)
+		if err != nil {
+			log.Fatal(err)
+		}
+		defer f.Close()
+		if *verboseLogging {
+			logOutput = io.MultiWriter(logOutput, f)
+		}
+		eventlogOutput = io.MultiWriter(eventlogOutput, f)
+	}
+
+	if *unsafeLogging {
+		log.SetOutput(logOutput)
+	} else {
+		log.SetOutput(&safelog.LogScrubber{Output: logOutput})
+	}
+
+	proxyEventLogger := sf.NewProxyEventLogger(eventlogOutput, *disableStatsLogger)
+	eventLogger.AddSnowflakeEventListener(proxyEventLogger)
+
+	if *enableMetrics {
+		metrics := sf.NewMetrics()
+
+		err := metrics.Start(net.JoinHostPort(*metricsAddress, strconv.Itoa(*metricsPort)))
+		if err != nil {
+			log.Fatalf("could not enable metrics: %v", err)
+		}
+
+		eventLogger.AddSnowflakeEventListener(sf.NewEventMetrics(metrics))
+	}
+
+	log.Printf("snowflake-proxy %s\n", version.GetVersion())
+
+	err = proxy.Start()
+	if err != nil {
+		log.Fatal(err)
+	}
+}
diff --git a/proxy/make.js b/proxy/make.js
deleted file mode 100755
index b614864..0000000
--- a/proxy/make.js
+++ /dev/null
@@ -1,103 +0,0 @@
-#!/usr/bin/env node
-
-/* global require, process */
-
-var { exec, spawn } = require('child_process');
-
-// All files required.
-var FILES = [
-  'broker.js',
-  'config.js',
-  'proxypair.js',
-  'snowflake.js',
-  'ui.js',
-  'util.js',
-  'websocket.js',
-  'shims.js'
-];
-
-var FILES_SPEC = [
-  'spec/broker.spec.js',
-  'spec/init.spec.js',
-  'spec/proxypair.spec.js',
-  'spec/snowflake.spec.js',
-  'spec/ui.spec.js',
-  'spec/util.spec.js',
-  'spec/websocket.spec.js'
-];
-
-var OUTFILE = 'snowflake.js';
-
-var STATIC = 'static';
-
-var copyStaticFiles = function() {
-  exec('cp ' + STATIC + '/* build/');
-};
-
-var concatJS = function(outDir, init) {
-  var files;
-  files = FILES.concat(`init-${init}.js`);
-  return exec(`cat ${files.join(' ')} > ${outDir}/${OUTFILE}`, function(err) {
-    if (err) {
-      throw err;
-    }
-  });
-};
-
-var tasks = new Map();
-
-var task = function(key, msg, func) {
-  tasks.set(key, {
-    msg, func
-  });
-};
-
-task('test', 'snowflake unit tests', function() {
-  var jasmineFiles, outFile, proc;
-  exec('mkdir -p test');
-  exec('jasmine init >&-');
-  // Simply concat all the files because we're not using node exports.
-  jasmineFiles = FILES.concat('init-badge.js', FILES_SPEC);
-  outFile = 'test/bundle.spec.js';
-  exec('echo "TESTING = true" > ' + outFile);
-  exec('cat ' + jasmineFiles.join(' ') + ' | cat >> ' + outFile);
-  proc = spawn('jasmine', ['test/bundle.spec.js'], {
-    stdio: 'inherit'
-  });
-  proc.on("exit", function(code) {
-    process.exit(code);
-  });
-});
-
-task('build', 'build the snowflake proxy', function() {
-  exec('mkdir -p build');
-  copyStaticFiles();
-  concatJS('build', 'badge');
-  console.log('Snowflake prepared.');
-});
-
-task('webext', 'build the webextension', function() {
-  exec('mkdir -p webext');
-  concatJS('webext', 'webext');
-  console.log('Webextension prepared.');
-});
-
-task('node', 'build the node binary', function() {
-  exec('mkdir -p build');
-  concatJS('build', 'node');
-  console.log('Node prepared.');
-});
-
-task('clean', 'remove all built files', function() {
-  exec('rm -r build test spec/support');
-});
-
-var cmd = process.argv[2];
-
-if (tasks.has(cmd)) {
-  var t = tasks.get(cmd);
-  console.log(t.msg);
-  t.func();
-} else {
-  console.error('Command not supported.');
-}
diff --git a/proxy/package.json b/proxy/package.json
deleted file mode 100644
index c5a571b..0000000
--- a/proxy/package.json
+++ /dev/null
@@ -1,32 +0,0 @@
-{
-  "name": "snowflake-pt",
-  "version": "0.0.0-git",
-  "description": "Snowflake is a WebRTC pluggable transport for Tor.",
-  "main": "build/snowflake.js",
-  "directories": {
-    "test": "test"
-  },
-  "scripts": {
-    "test": "node make.js test",
-    "build": "node make.js build",
-    "webext": "node make.js webext",
-    "clean": "node make.js clean",
-    "prepublish": "node make.js node",
-    "start": "node build/snowflake.js",
-    "lint": "eslint . --ext .js"
-  },
-  "bin": {
-    "snowflake": "build/snowflake.js"
-  },
-  "author": "Serene Han",
-  "license": "BSD-3-Clause",
-  "devDependencies": {
-    "eslint": "^6.0.1",
-    "jasmine": "2.5.2"
-  },
-  "dependencies": {
-    "wrtc": "^0.0.61",
-    "ws": "^3.3.1",
-    "xmlhttprequest": "^1.8.0"
-  }
-}
diff --git a/proxy/proxypair.js b/proxy/proxypair.js
deleted file mode 100644
index 6ffa9e5..0000000
--- a/proxy/proxypair.js
+++ /dev/null
@@ -1,256 +0,0 @@
-/* global snowflake, log, dbg, Util, PeerConnection, Snowflake, Parse, WS */
-
-/*
-Represents a single:
-
-   client <-- webrtc --> snowflake <-- websocket --> relay
-
-Every ProxyPair has a Snowflake ID, which is necessary when responding to the
-Broker with an WebRTC answer.
-*/
-
-class ProxyPair {
-
-  /*
-  Constructs a ProxyPair where:
-  - @relayAddr is the destination relay
-  - @rateLimit specifies a rate limit on traffic
-  */
-  constructor(relayAddr, rateLimit, pcConfig) {
-    // Given a WebRTC DataChannel, prepare callbacks.
-    this.prepareDataChannel = this.prepareDataChannel.bind(this);
-    // Assumes WebRTC datachannel is connected.
-    this.connectRelay = this.connectRelay.bind(this);
-    // WebRTC --> websocket
-    this.onClientToRelayMessage = this.onClientToRelayMessage.bind(this);
-    // websocket --> WebRTC
-    this.onRelayToClientMessage = this.onRelayToClientMessage.bind(this);
-    this.onError = this.onError.bind(this);
-    // Send as much data in both directions as the rate limit currently allows.
-    this.flush = this.flush.bind(this);
-    this.relayAddr = relayAddr;
-    this.rateLimit = rateLimit;
-    this.pcConfig = pcConfig;
-    this.id = Util.genSnowflakeID();
-    this.c2rSchedule = [];
-    this.r2cSchedule = [];
-  }
-
-  // Prepare a WebRTC PeerConnection and await for an SDP offer.
-  begin() {
-    this.pc = new PeerConnection(this.pcConfig, {
-      optional: [
-        {
-          DtlsSrtpKeyAgreement: true
-        },
-        {
-          RtpDataChannels: false
-        }
-      ]
-    });
-    this.pc.onicecandidate = (evt) => {
-      // Browser sends a null candidate once the ICE gathering completes.
-      if (null === evt.candidate) {
-        // TODO: Use a promise.all to tell Snowflake about all offers at once,
-        // once multiple proxypairs are supported.
-        dbg('Finished gathering ICE candidates.');
-        return snowflake.broker.sendAnswer(this.id, this.pc.localDescription);
-      }
-    };
-    // OnDataChannel triggered remotely from the client when connection succeeds.
-    return this.pc.ondatachannel = (dc) => {
-      var channel;
-      channel = dc.channel;
-      dbg('Data Channel established...');
-      this.prepareDataChannel(channel);
-      return this.client = channel;
-    };
-  }
-
-  receiveWebRTCOffer(offer) {
-    if ('offer' !== offer.type) {
-      log('Invalid SDP received -- was not an offer.');
-      return false;
-    }
-    try {
-      this.pc.setRemoteDescription(offer);
-    } catch (error) {
-      log('Invalid SDP message.');
-      return false;
-    }
-    dbg('SDP ' + offer.type + ' successfully received.');
-    return true;
-  }
-
-  prepareDataChannel(channel) {
-    channel.onopen = () => {
-      log('WebRTC DataChannel opened!');
-      snowflake.state = Snowflake.MODE.WEBRTC_READY;
-      snowflake.ui.setActive(true);
-      // This is the point when the WebRTC datachannel is done, so the next step
-      // is to establish websocket to the server.
-      return this.connectRelay();
-    };
-    channel.onclose = () => {
-      log('WebRTC DataChannel closed.');
-      snowflake.ui.setStatus('disconnected by webrtc.');
-      snowflake.ui.setActive(false);
-      snowflake.state = Snowflake.MODE.INIT;
-      this.flush();
-      return this.close();
-    };
-    channel.onerror = function() {
-      return log('Data channel error!');
-    };
-    channel.binaryType = "arraybuffer";
-    return channel.onmessage = this.onClientToRelayMessage;
-  }
-
-  connectRelay() {
-    var params, peer_ip, ref;
-    dbg('Connecting to relay...');
-    // Get a remote IP address from the PeerConnection, if possible. Add it to
-    // the WebSocket URL's query string if available.
-    // MDN marks remoteDescription as "experimental". However the other two
-    // options, currentRemoteDescription and pendingRemoteDescription, which
-    // are not marked experimental, were undefined when I tried them in Firefox
-    // 52.2.0.
-    // https://developer.mozilla.org/en-US/docs/Web/API/RTCPeerConnection/remoteDescription
-    peer_ip = Parse.ipFromSDP((ref = this.pc.remoteDescription) != null ? ref.sdp : void 0);
-    params = [];
-    if (peer_ip != null) {
-      params.push(["client_ip", peer_ip]);
-    }
-    var relay = this.relay = WS.makeWebsocket(this.relayAddr, params);
-    this.relay.label = 'websocket-relay';
-    this.relay.onopen = () => {
-      if (this.timer) {
-        clearTimeout(this.timer);
-        this.timer = 0;
-      }
-      log(relay.label + ' connected!');
-      return snowflake.ui.setStatus('connected');
-    };
-    this.relay.onclose = () => {
-      log(relay.label + ' closed.');
-      snowflake.ui.setStatus('disconnected.');
-      snowflake.ui.setActive(false);
-      snowflake.state = Snowflake.MODE.INIT;
-      this.flush();
-      return this.close();
-    };
-    this.relay.onerror = this.onError;
-    this.relay.onmessage = this.onRelayToClientMessage;
-    // TODO: Better websocket timeout handling.
-    return this.timer = setTimeout((() => {
-      if (0 === this.timer) {
-        return;
-      }
-      log(relay.label + ' timed out connecting.');
-      return relay.onclose();
-    }), 5000);
-  }
-
-  onClientToRelayMessage(msg) {
-    dbg('WebRTC --> websocket data: ' + msg.data.byteLength + ' bytes');
-    this.c2rSchedule.push(msg.data);
-    return this.flush();
-  }
-
-  onRelayToClientMessage(event) {
-    dbg('websocket --> WebRTC data: ' + event.data.byteLength + ' bytes');
-    this.r2cSchedule.push(event.data);
-    return this.flush();
-  }
-
-  onError(event) {
-    var ws;
-    ws = event.target;
-    log(ws.label + ' error.');
-    return this.close();
-  }
-
-  // Close both WebRTC and websocket.
-  close() {
-    if (this.timer) {
-      clearTimeout(this.timer);
-      this.timer = 0;
-    }
-    this.running = false;
-    if (this.webrtcIsReady()) {
-      this.client.close();
-    }
-    this.client = null;
-    if (this.relayIsReady()) {
-      this.relay.close();
-    }
-    this.relay = null;
-    this.onCleanup();
-  }
-
-  flush() {
-    var busy, checkChunks;
-    if (this.flush_timeout_id) {
-      clearTimeout(this.flush_timeout_id);
-    }
-    this.flush_timeout_id = null;
-    busy = true;
-    checkChunks = () => {
-      var chunk;
-      busy = false;
-      // WebRTC --> websocket
-      if (this.relayIsReady() && this.relay.bufferedAmount < this.MAX_BUFFER && this.c2rSchedule.length > 0) {
-        chunk = this.c2rSchedule.shift();
-        this.rateLimit.update(chunk.byteLength);
-        this.relay.send(chunk);
-        busy = true;
-      }
-      // websocket --> WebRTC
-      if (this.webrtcIsReady() && this.client.bufferedAmount < this.MAX_BUFFER && this.r2cSchedule.length > 0) {
-        chunk = this.r2cSchedule.shift();
-        this.rateLimit.update(chunk.byteLength);
-        this.client.send(chunk);
-        return busy = true;
-      }
-    };
-    while (busy && !this.rateLimit.isLimited()) {
-      checkChunks();
-    }
-    if (this.r2cSchedule.length > 0 || this.c2rSchedule.length > 0 || (this.relayIsReady() && this.relay.bufferedAmount > 0) || (this.webrtcIsReady() && this.client.bufferedAmount > 0)) {
-      return this.flush_timeout_id = setTimeout(this.flush, this.rateLimit.when() * 1000);
-    }
-  }
-
-  webrtcIsReady() {
-    return null !== this.client && 'open' === this.client.readyState;
-  }
-
-  relayIsReady() {
-    return (null !== this.relay) && (WebSocket.OPEN === this.relay.readyState);
-  }
-
-  isClosed(ws) {
-    return void 0 === ws || WebSocket.CLOSED === ws.readyState;
-  }
-
-}
-
-ProxyPair.prototype.MAX_BUFFER = 10 * 1024 * 1024;
-
-ProxyPair.prototype.pc = null;
-
-ProxyPair.prototype.client = null; // WebRTC Data channel
-
-ProxyPair.prototype.relay = null; // websocket
-
-ProxyPair.prototype.timer = 0;
-
-ProxyPair.prototype.running = true;
-
-ProxyPair.prototype.active = false; // Whether serving a client.
-
-ProxyPair.prototype.flush_timeout_id = null;
-
-ProxyPair.prototype.onCleanup = null;
-
-ProxyPair.prototype.id = null;
diff --git a/proxy/shims.js b/proxy/shims.js
deleted file mode 100644
index 5d93183..0000000
--- a/proxy/shims.js
+++ /dev/null
@@ -1,31 +0,0 @@
-/* global module, require */
-
-/*
-WebRTC shims for multiple browsers.
-*/
-
-if (typeof module !== "undefined" && module !== null ? module.exports : void 0) {
-  window = {};
-  document = {
-    getElementById: function() {
-      return null;
-    }
-  };
-  chrome = {};
-  location = { search: '' };
-  ({ URLSearchParams } = require('url'));
-  if ((typeof TESTING === "undefined" || TESTING === null) || !TESTING) {
-    webrtc = require('wrtc');
-    PeerConnection = webrtc.RTCPeerConnection;
-    IceCandidate = webrtc.RTCIceCandidate;
-    SessionDescription = webrtc.RTCSessionDescription;
-    WebSocket = require('ws');
-    ({ XMLHttpRequest } = require('xmlhttprequest'));
-  }
-} else {
-  PeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection || window.webkitRTCPeerConnection;
-  IceCandidate = window.RTCIceCandidate || window.mozRTCIceCandidate;
-  SessionDescription = window.RTCSessionDescription || window.mozRTCSessionDescription;
-  WebSocket = window.WebSocket;
-  XMLHttpRequest = window.XMLHttpRequest;
-}
diff --git a/proxy/snowflake.js b/proxy/snowflake.js
deleted file mode 100644
index bbf3d8b..0000000
--- a/proxy/snowflake.js
+++ /dev/null
@@ -1,178 +0,0 @@
-/* global log, dbg, DummyRateLimit, BucketRateLimit, SessionDescription, ProxyPair */
-
-/*
-A JavaScript WebRTC snowflake proxy
-
-Uses WebRTC from the client, and Websocket to the server.
-
-Assume that the webrtc client plugin is always the offerer, in which case
-this proxy must always act as the answerer.
-
-TODO: More documentation
-*/
-
-// Minimum viable snowflake for now - just 1 client.
-class Snowflake {
-
-  // Prepare the Snowflake with a Broker (to find clients) and optional UI.
-  constructor(config, ui, broker) {
-    // Receive an SDP offer from some client assigned by the Broker,
-    // |pair| - an available ProxyPair.
-    this.receiveOffer = this.receiveOffer.bind(this);
-    this.config = config;
-    this.ui = ui;
-    this.broker = broker;
-    this.state = Snowflake.MODE.INIT;
-    this.proxyPairs = [];
-    if (void 0 === this.config.rateLimitBytes) {
-      this.rateLimit = new DummyRateLimit();
-    } else {
-      this.rateLimit = new BucketRateLimit(this.config.rateLimitBytes * this.config.rateLimitHistory, this.config.rateLimitHistory);
-    }
-    this.retries = 0;
-  }
-
-  // Set the target relay address spec, which is expected to be websocket.
-  // TODO: Should potentially fetch the target from broker later, or modify
-  // entirely for the Tor-independent version.
-  setRelayAddr(relayAddr) {
-    this.relayAddr = relayAddr;
-    log('Using ' + relayAddr.host + ':' + relayAddr.port + ' as Relay.');
-    return true;
-  }
-
-  // Initialize WebRTC PeerConnection, which requires beginning the signalling
-  // process. |pollBroker| automatically arranges signalling.
-  beginWebRTC() {
-    this.state = Snowflake.MODE.WEBRTC_CONNECTING;
-    log('ProxyPair Slots: ' + this.proxyPairs.length);
-    log('Snowflake IDs: ' + (this.proxyPairs.map(function(p) {
-      return p.id;
-    })).join(' | '));
-    this.pollBroker();
-    return this.pollInterval = setInterval((() => {
-      return this.pollBroker();
-    }), this.config.defaultBrokerPollInterval);
-  }
-
-  // Regularly poll Broker for clients to serve until this snowflake is
-  // serving at capacity, at which point stop polling.
-  pollBroker() {
-    var msg, pair, recv;
-    // Poll broker for clients.
-    pair = this.nextAvailableProxyPair();
-    if (!pair) {
-      log('At client capacity.');
-      return;
-    }
-    // Do nothing until a new proxyPair is available.
-    pair.active = true;
-    msg = 'Polling for client ... ';
-    if (this.retries > 0) {
-      msg += '[retries: ' + this.retries + ']';
-    }
-    this.ui.setStatus(msg);
-    recv = this.broker.getClientOffer(pair.id);
-    recv.then((desc) => {
-      if (pair.running) {
-        if (!this.receiveOffer(pair, desc)) {
-          return pair.active = false;
-        }
-      } else {
-        return pair.active = false;
-      }
-    }, function() {
-      return pair.active = false;
-    });
-    return this.retries++;
-  }
-
-  // Returns the first ProxyPair that's available to connect.
-  nextAvailableProxyPair() {
-    if (this.proxyPairs.length < this.config.connectionsPerClient) {
-      return this.makeProxyPair(this.relayAddr);
-    }
-    return this.proxyPairs.find(function(pp) {
-      return !pp.active;
-    });
-  }
-
-  receiveOffer(pair, desc) {
-    var e, offer, sdp;
-    try {
-      offer = JSON.parse(desc);
-      dbg('Received:\n\n' + offer.sdp + '\n');
-      sdp = new SessionDescription(offer);
-      if (pair.receiveWebRTCOffer(sdp)) {
-        this.sendAnswer(pair);
-        return true;
-      } else {
-        return false;
-      }
-    } catch (error) {
-      e = error;
-      log('ERROR: Unable to receive Offer: ' + e);
-      return false;
-    }
-  }
-
-  sendAnswer(pair) {
-    var fail, next;
-    next = function(sdp) {
-      dbg('webrtc: Answer ready.');
-      return pair.pc.setLocalDescription(sdp);
-    };
-    fail = function() {
-      return dbg('webrtc: Failed to create Answer');
-    };
-    return pair.pc.createAnswer().then(next).catch(fail);
-  }
-
-  makeProxyPair(relay) {
-    var pair;
-    pair = new ProxyPair(relay, this.rateLimit, this.config.pcConfig);
-    this.proxyPairs.push(pair);
-    pair.onCleanup = () => {
-      var ind;
-      // Delete from the list of active proxy pairs.
-      ind = this.proxyPairs.indexOf(pair);
-      if (ind > -1) {
-        return this.proxyPairs.splice(ind, 1);
-      }
-    };
-    pair.begin();
-    return pair;
-  }
-
-  // Stop all proxypairs.
-  disable() {
-    var results;
-    log('Disabling Snowflake.');
-    clearInterval(this.pollInterval);
-    results = [];
-    while (this.proxyPairs.length > 0) {
-      results.push(this.proxyPairs.pop().close());
-    }
-    return results;
-  }
-
-}
-
-Snowflake.prototype.relayAddr = null;
-
-Snowflake.prototype.rateLimit = null;
-
-Snowflake.prototype.pollInterval = null;
-
-Snowflake.prototype.retries = 0;
-
-// Janky state machine
-Snowflake.MODE = {
-  INIT: 0,
-  WEBRTC_CONNECTING: 1,
-  WEBRTC_READY: 2
-};
-
-Snowflake.MESSAGE = {
-  CONFIRMATION: 'You\'re currently serving a Tor user via Snowflake.'
-};
diff --git a/proxy/spec/broker.spec.js b/proxy/spec/broker.spec.js
deleted file mode 100644
index 4eb3029..0000000
--- a/proxy/spec/broker.spec.js
+++ /dev/null
@@ -1,119 +0,0 @@
-/* global expect, it, describe, spyOn, Broker */
-
-/*
-jasmine tests for Snowflake broker
-*/
-
-// fake xhr
-// class XMLHttpRequest
-class XMLHttpRequest {
-  constructor() {
-    this.onreadystatechange = null;
-  }
-  open() {}
-  setRequestHeader() {}
-  send() {}
-};
-
-XMLHttpRequest.prototype.DONE = 1;
-
-
-describe('Broker', function() {
-
-  it('can be created', function() {
-    var b;
-    b = new Broker('fake');
-    expect(b.url).toEqual('https://fake/');
-    expect(b.id).not.toBeNull();
-  });
-
-  describe('getClientOffer', function() {
-
-    it('polls and promises a client offer', function(done) {
-      var b, poll;
-      b = new Broker('fake');
-      // fake successful request and response from broker.
-      spyOn(b, '_postRequest').and.callFake(function() {
-        b._xhr.readyState = b._xhr.DONE;
-        b._xhr.status = Broker.STATUS.OK;
-        b._xhr.responseText = 'fake offer';
-        return b._xhr.onreadystatechange();
-      });
-      poll = b.getClientOffer();
-      expect(poll).not.toBeNull();
-      expect(b._postRequest).toHaveBeenCalled();
-      return poll.then(function(desc) {
-        expect(desc).toEqual('fake offer');
-        return done();
-      }).catch(function() {
-        fail('should not reject on Broker.STATUS.OK');
-        return done();
-      });
-    });
-
-    it('rejects if the broker timed-out', function(done) {
-      var b, poll;
-      b = new Broker('fake');
-      // fake timed-out request from broker
-      spyOn(b, '_postRequest').and.callFake(function() {
-        b._xhr.readyState = b._xhr.DONE;
-        b._xhr.status = Broker.STATUS.GATEWAY_TIMEOUT;
-        return b._xhr.onreadystatechange();
-      });
-      poll = b.getClientOffer();
-      expect(poll).not.toBeNull();
-      expect(b._postRequest).toHaveBeenCalled();
-      return poll.then(function(desc) {
-        fail('should not fulfill on Broker.STATUS.GATEWAY_TIMEOUT');
-        return done();
-      }, function(err) {
-        expect(err).toBe(Broker.MESSAGE.TIMEOUT);
-        return done();
-      });
-    });
-
-    it('rejects on any other status', function(done) {
-      var b, poll;
-      b = new Broker('fake');
-      // fake timed-out request from broker
-      spyOn(b, '_postRequest').and.callFake(function() {
-        b._xhr.readyState = b._xhr.DONE;
-        b._xhr.status = 1337;
-        return b._xhr.onreadystatechange();
-      });
-      poll = b.getClientOffer();
-      expect(poll).not.toBeNull();
-      expect(b._postRequest).toHaveBeenCalled();
-      return poll.then(function(desc) {
-        fail('should not fulfill on non-OK status');
-        return done();
-      }, function(err) {
-        expect(err).toBe(Broker.MESSAGE.UNEXPECTED);
-        expect(b._xhr.status).toBe(1337);
-        return done();
-      });
-
-    });
-
-  });
-
-  it('responds to the broker with answer', function() {
-    var b = new Broker('fake');
-    spyOn(b, '_postRequest');
-    b.sendAnswer('fake id', 123);
-    expect(b._postRequest).toHaveBeenCalledWith('fake id', jasmine.any(Object), 'answer', '123');
-  });
-
-  it('POST XMLHttpRequests to the broker', function() {
-    var b = new Broker('fake');
-    b._xhr = new XMLHttpRequest();
-    spyOn(b._xhr, 'open');
-    spyOn(b._xhr, 'setRequestHeader');
-    spyOn(b._xhr, 'send');
-    b._postRequest(0, b._xhr, 'test', 'data');
-    expect(b._xhr.open).toHaveBeenCalled();
-    expect(b._xhr.setRequestHeader).toHaveBeenCalled();
-    expect(b._xhr.send).toHaveBeenCalled();
-  });
-
-});
diff --git a/proxy/spec/init.spec.js b/proxy/spec/init.spec.js
deleted file mode 100644
index 748bc86..0000000
--- a/proxy/spec/init.spec.js
+++ /dev/null
@@ -1,35 +0,0 @@
-/* global expect, it, describe, Snowflake, UI */
-
-// Fake snowflake to interact with
-
-var snowflake = {
-  ui: new UI,
-  broker: {
-    sendAnswer: function() {}
-  },
-  state: Snowflake.MODE.INIT
-};
-
-describe('Init', function() {
-
-  it('gives a dialog when closing, only while active', function() {
-    silenceNotifications = false;
-    snowflake.state = Snowflake.MODE.WEBRTC_READY;
-    var msg = window.onbeforeunload();
-    expect(snowflake.state).toBe(Snowflake.MODE.WEBRTC_READY);
-    expect(msg).toBe(Snowflake.MESSAGE.CONFIRMATION);
-    snowflake.state = Snowflake.MODE.INIT;
-    msg = window.onbeforeunload();
-    expect(snowflake.state).toBe(Snowflake.MODE.INIT);
-    expect(msg).toBe(null);
-  });
-
-  it('does not give a dialog when silent flag is on', function() {
-    silenceNotifications = true;
-    snowflake.state = Snowflake.MODE.WEBRTC_READY;
-    var msg = window.onbeforeunload();
-    expect(snowflake.state).toBe(Snowflake.MODE.WEBRTC_READY);
-    expect(msg).toBe(null);
-  });
-
-});
diff --git a/proxy/spec/proxypair.spec.js b/proxy/spec/proxypair.spec.js
deleted file mode 100644
index 3716f2d..0000000
--- a/proxy/spec/proxypair.spec.js
+++ /dev/null
@@ -1,162 +0,0 @@
-/* global expect, it, describe, spyOn */
-
-/*
-jasmine tests for Snowflake proxypair
-*/
-
-// Replacement for MessageEvent constructor.
-// https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent/MessageEvent
-var MessageEvent = function(type, init) {
-  return init;
-};
-
-// Asymmetic matcher that checks that two arrays have the same contents.
-var arrayMatching = function(sample) {
-  return {
-    asymmetricMatch: function(other) {
-      var _, a, b, i, j, len;
-      a = new Uint8Array(sample);
-      b = new Uint8Array(other);
-      if (a.length !== b.length) {
-        return false;
-      }
-      for (i = j = 0, len = a.length; j < len; i = ++j) {
-        _ = a[i];
-        if (a[i] !== b[i]) {
-          return false;
-        }
-      }
-      return true;
-    },
-    jasmineToString: function() {
-      return '';
-    }
-  };
-};
-
-describe('ProxyPair', function() {
-
-  var config, destination, fakeRelay, pp, rateLimit;
-  fakeRelay = Parse.address('0.0.0.0:12345');
-  rateLimit = new DummyRateLimit;
-  config = new Config;
-  destination = [];
-
-  // Using the mock PeerConnection definition from spec/snowflake.spec.js
-  var pp = new ProxyPair(fakeRelay, rateLimit, config.pcConfig);
-
-  beforeEach(function() {
-    return pp.begin();
-  });
-
-  it('begins webrtc connection', function() {
-    return expect(pp.pc).not.toBeNull();
-  });
-
-  describe('accepts WebRTC offer from some client', function() {
-
-    beforeEach(function() {
-      return pp.begin();
-    });
-
-    it('rejects invalid offers', function() {
-      expect(typeof pp.pc.setRemoteDescription).toBe("function");
-      expect(pp.pc).not.toBeNull();
-      expect(pp.receiveWebRTCOffer({})).toBe(false);
-      expect(pp.receiveWebRTCOffer({
-        type: 'answer'
-      })).toBe(false);
-    });
-
-    it('accepts valid offers', function() {
-      expect(pp.pc).not.toBeNull();
-      expect(pp.receiveWebRTCOffer({
-        type: 'offer',
-        sdp: 'foo'
-      })).toBe(true);
-    });
-
-  });
-
-  it('responds with a WebRTC answer correctly', function() {
-    spyOn(snowflake.broker, 'sendAnswer');
-    pp.pc.onicecandidate({
-      candidate: null
-    });
-    expect(snowflake.broker.sendAnswer).toHaveBeenCalled();
-  });
-
-  it('handles a new data channel correctly', function() {
-    expect(pp.client).toBeNull();
-    pp.pc.ondatachannel({
-      channel: {}
-    });
-    expect(pp.client).not.toBeNull();
-    expect(pp.client.onopen).not.toBeNull();
-    expect(pp.client.onclose).not.toBeNull();
-    expect(pp.client.onerror).not.toBeNull();
-    expect(pp.client.onmessage).not.toBeNull();
-  });
-
-  it('connects to the relay once datachannel opens', function() {
-    spyOn(pp, 'connectRelay');
-    pp.client.onopen();
-    expect(pp.connectRelay).toHaveBeenCalled();
-  });
-
-  it('connects to a relay', function() {
-    pp.connectRelay();
-    expect(pp.relay.onopen).not.toBeNull();
-    expect(pp.relay.onclose).not.toBeNull();
-    expect(pp.relay.onerror).not.toBeNull();
-    expect(pp.relay.onmessage).not.toBeNull();
-  });
-
-  describe('flushes data between client and relay', function() {
-
-    it('proxies data from client to relay', function() {
-      var msg;
-      pp.pc.ondatachannel({
-        channel: {
-          bufferedAmount: 0,
-          readyState: "open",
-          send: function(data) {}
-        }
-      });
-      spyOn(pp.client, 'send');
-      spyOn(pp.relay, 'send');
-      msg = new MessageEvent("message", {
-        data: Uint8Array.from([1, 2, 3]).buffer
-      });
-      pp.onClientToRelayMessage(msg);
-      pp.flush();
-      expect(pp.client.send).not.toHaveBeenCalled();
-      expect(pp.relay.send).toHaveBeenCalledWith(arrayMatching([1, 2, 3]));
-    });
-
-    it('proxies data from relay to client', function() {
-      var msg;
-      spyOn(pp.client, 'send');
-      spyOn(pp.relay, 'send');
-      msg = new MessageEvent("message", {
-        data: Uint8Array.from([4, 5, 6]).buffer
-      });
-      pp.onRelayToClientMessage(msg);
-      pp.flush();
-      expect(pp.client.send).toHaveBeenCalledWith(arrayMatching([4, 5, 6]));
-      expect(pp.relay.send).not.toHaveBeenCalled();
-    });
-
-    it('sends nothing with nothing to flush', function() {
-      spyOn(pp.client, 'send');
-      spyOn(pp.relay, 'send');
-      pp.flush();
-      expect(pp.client.send).not.toHaveBeenCalled();
-      expect(pp.relay.send).not.toHaveBeenCalled();
-    });
-
-  });
-
-});
-
-// TODO: rate limit tests
diff --git a/proxy/spec/snowflake.spec.js b/proxy/spec/snowflake.spec.js
deleted file mode 100644
index 970947b..0000000
--- a/proxy/spec/snowflake.spec.js
+++ /dev/null
@@ -1,103 +0,0 @@
-/* global expect, it, describe, spyOn, Snowflake, Config, UI */
-
-/*
-jasmine tests for Snowflake
-*/
-
-// Fake browser functionality:
-class PeerConnection {
-  setRemoteDescription() {
-    return true;
-  }
-  send() {}
-}
-
-class SessionDescription {}
-SessionDescription.prototype.type = 'offer';
-
-class WebSocket {
-  constructor() {
-    this.bufferedAmount = 0;
-  }
-  send() {}
-}
-WebSocket.prototype.OPEN = 1;
-WebSocket.prototype.CLOSED = 0;
-
-var log = function() {};
-
-var config = new Config();
-
-var ui = new UI();
-
-class FakeBroker {
-  getClientOffer() {
-    return new Promise(function() {
-      return {};
-    });
-  }
-}
-
-describe('Snowflake', function() {
-
-  it('constructs correctly', function() {
-    var s;
-    s = new Snowflake(config, ui, {
-      fake: 'broker'
-    });
-    expect(s.rateLimit).not.toBeNull();
-    expect(s.broker).toEqual({
-      fake: 'broker'
-    });
-    expect(s.ui).not.toBeNull();
-    expect(s.retries).toBe(0);
-  });
-
-  it('sets relay address correctly', function() {
-    var s;
-    s = new Snowflake(config, ui, null);
-    s.setRelayAddr('foo');
-    expect(s.relayAddr).toEqual('foo');
-  });
-
-  it('initalizes WebRTC connection', function() {
-    var s;
-    s = new Snowflake(config, ui, new FakeBroker());
-    spyOn(s.broker, 'getClientOffer').and.callThrough();
-    s.beginWebRTC();
-    expect(s.retries).toBe(1);
-    expect(s.broker.getClientOffer).toHaveBeenCalled();
-  });
-
-  it('receives SDP offer and sends answer', function() {
-    var pair, s;
-    s = new Snowflake(config, ui, new FakeBroker());
-    pair = {
-      receiveWebRTCOffer: function() {}
-    };
-    spyOn(pair, 'receiveWebRTCOffer').and.returnValue(true);
-    spyOn(s, 'sendAnswer');
-    s.receiveOffer(pair, '{"type":"offer","sdp":"foo"}');
-    expect(s.sendAnswer).toHaveBeenCalled();
-  });
-
-  it('does not send answer when receiving invalid offer', function() {
-    var pair, s;
-    s = new Snowflake(config, ui, new FakeBroker());
-    pair = {
-      receiveWebRTCOffer: function() {}
-    };
-    spyOn(pair, 'receiveWebRTCOffer').and.returnValue(false);
-    spyOn(s, 'sendAnswer');
-    s.receiveOffer(pair, '{"type":"not a good offer","sdp":"foo"}');
-    expect(s.sendAnswer).not.toHaveBeenCalled();
-  });
-
-  it('can make a proxypair', function() {
-    var s;
-    s = new Snowflake(config, ui, new FakeBroker());
-    s.makeProxyPair();
-    expect(s.proxyPairs.length).toBe(1);
-  });
-
-});
diff --git a/proxy/spec/ui.spec.js b/proxy/spec/ui.spec.js
deleted file mode 100644
index 3386a2d..0000000
--- a/proxy/spec/ui.spec.js
+++ /dev/null
@@ -1,92 +0,0 @@
-/* global expect, it, describe, spyOn, DebugUI, BadgeUI */
-/* eslint no-redeclare: 0 */
-
-/*
-jasmine tests for Snowflake UI
-*/
-
-var document = {
-  getElementById: function() {
-    return {};
-  },
-  createTextNode: function(txt) {
-    return txt;
-  }
-};
-
-describe('UI', function() {
-
-  it('activates debug mode when badge does not exist', function() {
-    var u;
-    spyOn(document, 'getElementById').and.callFake(function(id) {
-      if ('badge' === id) {
-        return null;
-      }
-      return {};
-    });
-    u = new DebugUI();
-    expect(document.getElementById.calls.count()).toEqual(2);
-    expect(u.$status).not.toBeNull();
-    expect(u.$msglog).not.toBeNull();
-  });
-
-  it('is not debug mode when badge exists', function() {
-    var u;
-    spyOn(document, 'getElementById').and.callFake(function(id) {
-      if ('badge' === id) {
-        return {};
-      }
-      return null;
-    });
-    u = new BadgeUI();
-    expect(document.getElementById).toHaveBeenCalled();
-    expect(document.getElementById.calls.count()).toEqual(1);
-    expect(u.$badge).not.toBeNull();
-  });
-
-  it('sets status message when in debug mode', function() {
-    var u;
-    u = new DebugUI();
-    u.$status = {
-      innerHTML: '',
-      appendChild: function(txt) {
-        return this.innerHTML = txt;
-      }
-    };
-    u.setStatus('test');
-    expect(u.$status.innerHTML).toEqual('Status: test');
-  });
-
-  it('sets message log css correctly for debug mode', function() {
-    var u;
-    u = new DebugUI();
-    u.setActive(true);
-    expect(u.$msglog.className).toEqual('active');
-    u.setActive(false);
-    expect(u.$msglog.className).toEqual('');
-  });
-
-  it('sets badge css correctly for non-debug mode', function() {
-    var u;
-    u = new BadgeUI();
-    u.$badge = {};
-    u.setActive(true);
-    expect(u.$badge.className).toEqual('active');
-    u.setActive(false);
-    expect(u.$badge.className).toEqual('');
-  });
-
-  it('logs to the textarea correctly when debug mode', function() {
-    var u;
-    u = new DebugUI();
-    u.$msglog = {
-      value: '',
-      scrollTop: 0,
-      scrollHeight: 1337
-    };
-    u.log('test');
-    expect(u.$msglog.value).toEqual('test\n');
-    expect(u.$msglog.scrollTop).toEqual(1337);
-  });
-
-});
diff --git a/proxy/spec/util.spec.js b/proxy/spec/util.spec.js
deleted file mode 100644
index 6eb5be4..0000000
--- a/proxy/spec/util.spec.js
+++ /dev/null
@@ -1,252 +0,0 @@
-/* global expect, it, describe, Parse, Params */
-
-/*
-jasmine tests for Snowflake utils
-*/
-
-describe('Parse', function() {
-
-  describe('cookie', function() {
-
-    it('parses correctly', function() {
-      expect(Parse.cookie('')).toEqual({});
-      expect(Parse.cookie('a=b')).toEqual({
-        a: 'b'
-      });
-      expect(Parse.cookie('a=b=c')).toEqual({
-        a: 'b=c'
-      });
-      expect(Parse.cookie('a=b; c=d')).toEqual({
-        a: 'b',
-        c: 'd'
-      });
-      expect(Parse.cookie('a=b ; c=d')).toEqual({
-        a: 'b',
-        c: 'd'
-      });
-      expect(Parse.cookie('a= b')).toEqual({
-        a: 'b'
-      });
-      expect(Parse.cookie('a=')).toEqual({
-        a: ''
-      });
-      expect(Parse.cookie('key')).toBeNull();
-      expect(Parse.cookie('key=%26%20')).toEqual({
-        key: '& '
-      });
-      expect(Parse.cookie('a=\'\'')).toEqual({
-        a: '\'\''
-      });
-    });
-
-  });
-
-  describe('address', function() {
-
-    it('parses IPv4', function() {
-      expect(Parse.address('')).toBeNull();
-      expect(Parse.address('3.3.3.3:4444')).toEqual({
-        host: '3.3.3.3',
-        port: 4444
-      });
-      expect(Parse.address('3.3.3.3')).toBeNull();
-      expect(Parse.address('3.3.3.3:0x1111')).toBeNull();
-      expect(Parse.address('3.3.3.3:-4444')).toBeNull();
-      expect(Parse.address('3.3.3.3:65536')).toBeNull();
-    });
-
-    it('parses IPv6', function() {
-      expect(Parse.address('[1:2::a:f]:4444')).toEqual({
-        host: '1:2::a:f',
-        port: 4444
-      });
-      expect(Parse.address('[1:2::a:f]')).toBeNull();
-      expect(Parse.address('[1:2::a:f]:0x1111')).toBeNull();
-      expect(Parse.address('[1:2::a:f]:-4444')).toBeNull();
-      expect(Parse.address('[1:2::a:f]:65536')).toBeNull();
-      expect(Parse.address('[1:2::ffff:1.2.3.4]:4444')).toEqual({
-        host: '1:2::ffff:1.2.3.4',
-        port: 4444
-      });
-    });
-
-  });
-
-  describe('byte count', function() {
-
-    it('returns null for bad inputs', function() {
-      expect(Parse.byteCount("")).toBeNull();
-      expect(Parse.byteCount("x")).toBeNull();
-      expect(Parse.byteCount("1x")).toBeNull();
-      expect(Parse.byteCount("1.x")).toBeNull();
-      expect(Parse.byteCount("1.2x")).toBeNull();
-      expect(Parse.byteCount("toString")).toBeNull();
-      expect(Parse.byteCount("1toString")).toBeNull();
-      expect(Parse.byteCount("1.toString")).toBeNull();
-      expect(Parse.byteCount("1.2toString")).toBeNull();
-      expect(Parse.byteCount("k")).toBeNull();
-      expect(Parse.byteCount("m")).toBeNull();
-      expect(Parse.byteCount("g")).toBeNull();
-      expect(Parse.byteCount("K")).toBeNull();
-      expect(Parse.byteCount("M")).toBeNull();
-      expect(Parse.byteCount("G")).toBeNull();
-      expect(Parse.byteCount("-1")).toBeNull();
-      expect(Parse.byteCount("-1k")).toBeNull();
-      expect(Parse.byteCount("1.2.3")).toBeNull();
-      expect(Parse.byteCount("1.2.3k")).toBeNull();
-    });
-
-    it('handles numbers without a suffix', function() {
-      expect(Parse.byteCount("10")).toEqual(10);
-      expect(Parse.byteCount("10.")).toEqual(10);
-      expect(Parse.byteCount("1.5")).toEqual(1.5);
-    });
-
-    it('handles lowercase suffixes', function() {
-      expect(Parse.byteCount("10k")).toEqual(10*1024);
-      expect(Parse.byteCount("10m")).toEqual(10*1024*1024);
-      expect(Parse.byteCount("10g")).toEqual(10*1024*1024*1024);
-      expect(Parse.byteCount("10.k")).toEqual(10*1024);
-      expect(Parse.byteCount("10.m")).toEqual(10*1024*1024);
-      expect(Parse.byteCount("10.g")).toEqual(10*1024*1024*1024);
-      expect(Parse.byteCount("1.5k")).toEqual(1.5*1024);
-      expect(Parse.byteCount("1.5m")).toEqual(1.5*1024*1024);
-      expect(Parse.byteCount("1.5G")).toEqual(1.5*1024*1024*1024);
-    });
-
-    it('handles uppercase suffixes', function() {
-      expect(Parse.byteCount("10K")).toEqual(10*1024);
-      expect(Parse.byteCount("10M")).toEqual(10*1024*1024);
-      expect(Parse.byteCount("10G")).toEqual(10*1024*1024*1024);
-      expect(Parse.byteCount("10.K")).toEqual(10*1024);
-      expect(Parse.byteCount("10.M")).toEqual(10*1024*1024);
-      expect(Parse.byteCount("10.G")).toEqual(10*1024*1024*1024);
-      expect(Parse.byteCount("1.5K")).toEqual(1.5*1024);
-      expect(Parse.byteCount("1.5M")).toEqual(1.5*1024*1024);
-      expect(Parse.byteCount("1.5G")).toEqual(1.5*1024*1024*1024);
-    });
-
-  });
-
-  describe('ipFromSDP', function() {
-
-    var testCases = [
-      {
-        // https://tools.ietf.org/html/rfc4566#section-5
-        sdp: "v=0\no=jdoe 2890844526 2890842807 IN IP4 10.47.16.5\ns=SDP Seminar\ni=A Seminar on the session description protocol\nu=http://www.example.com/seminars/sdp.pdf\ne=j.doe@example.com (Jane Doe)\nc=IN IP4 224.2.17.12/127\nt=2873397496 2873404696\na=recvonly\nm=audio 49170 RTP/AVP 0\nm=video 51372 RTP/AVP 99\na=rtpmap:99 h263-1998/90000",
-        expected: '224.2.17.12'
-      },
-      {
-        // Missing c= line
-        sdp: "v=0\no=jdoe 2890844526 2890842807 IN IP4 10.47.16.5\ns=SDP Seminar\ni=A Seminar on the session description protocol\nu=http://www.example.com/seminars/sdp.pdf\ne=j.doe@example.com (Jane Doe)\nt=2873397496 2873404696\na=recvonly\nm=audio 49170 RTP/AVP 0\nm=video 51372 RTP/AVP 99\na=rtpmap:99 h263-1998/90000",
-        expected: void 0
-      },
-      {
-        // Single line, IP address only
-        sdp: "c=IN IP4 224.2.1.1\n",
-        expected: '224.2.1.1'
-      },
-      {
-        // Same, with TTL
-        sdp: "c=IN IP4 224.2.1.1/127\n",
-        expected: '224.2.1.1'
-      },
-      {
-        // Same, with TTL and multicast addresses
-        sdp: "c=IN IP4 224.2.1.1/127/3\n",
-        expected: '224.2.1.1'
-      },
-      {
-        // IPv6, address only
-        sdp: "c=IN IP6 FF15::101\n",
-        expected: 'ff15::101'
-      },
-      {
-        // Same, with multicast addresses
-        sdp: "c=IN IP6 FF15::101/3\n",
-        expected: 'ff15::101'
-      },
-      {
-        // Multiple c= lines
-        sdp: "c=IN IP4 1.2.3.4\nc=IN IP4 5.6.7.8",
-        expected: '1.2.3.4'
-      },
-      {
-        // Modified from SDP sent by snowflake-client.
-        sdp: "v=0\no=- 7860378660295630295 2 IN IP4 127.0.0.1\ns=-\nt=0 0\na=group:BUNDLE data\na=msid-semantic: WMS\nm=application 54653 DTLS/SCTP 5000\nc=IN IP4 1.2.3.4\na=candidate:3581707038 1 udp 2122260223 192.168.0.1 54653 typ host generation 0 network-id 1 network-cost 50\na=candidate:2617212910 1 tcp 1518280447 192.168.0.1 59673 typ host tcptype passive generation 0 network-id 1 network-cost 50\na=candidate:2082671819 1 udp 1686052607 1.2.3.4 54653 typ srflx raddr 192.168.0.1 rport 54653 generation 0 network-id 1 network-cost 50\na=ice-ufrag:IBdf\na=ice-pwd:G3lTrrC9gmhQx481AowtkhYz\na=fingerprint:sha-256 53:F8:84:D9:3C:1F:A0:44:AA:D6:3C:65:80:D3:CB:6F:23:90:17:41:06:F9:9C:10:D8:48:4A:A8:B6:FA:14:A1\na=setup:actpass\na=mid:data\na=sctpmap:5000 webrtc-datachannel 1024",
-        expected: '1.2.3.4'
-      },
-      {
-        // Improper character within IPv4
-        sdp: "c=IN IP4 224.2z.1.1",
-        expected: void 0
-      },
-      {
-        // Improper character within IPv6
-        sdp: "c=IN IP6 ff15:g::101",
-        expected: void 0
-      },
-      {
-        // Bogus "IP7" addrtype
-        sdp: "c=IN IP7 1.2.3.4\n",
-        expected: void 0
-      }
-    ];
-
-    it('parses SDP', function() {
-      var i, len, ref, ref1, results, test;
-      results = [];
-      for (i = 0, len = testCases.length; i < len; i++) {
-        test = testCases[i];
-        // https://tools.ietf.org/html/rfc4566#section-5: "The sequence # CRLF
-        // (0x0d0a) is used to end a record, although parsers SHOULD be tolerant
-        // and also accept records terminated with a single newline character."
-        // We represent the test cases with LF line endings for convenience, and
-        // test them both that way and with CRLF line endings.
-        expect((ref = Parse.ipFromSDP(test.sdp)) != null ? ref.toLowerCase() : void 0).toEqual(test.expected);
-        results.push(expect((ref1 = Parse.ipFromSDP(test.sdp.replace(/\n/, "\r\n"))) != null ? ref1.toLowerCase() : void 0).toEqual(test.expected));
-      }
-      return results;
-    });
-
-  });
-
-});
-
-describe('Params', function() {
-
-  describe('bool', function() {
-
-    var getBool = function(query) {
-      return Params.getBool(new URLSearchParams(query), 'param', false);
-    };
-
-    it('parses correctly', function() {
-      expect(getBool('param=true')).toBe(true);
-      expect(getBool('param')).toBe(true);
-      expect(getBool('param=')).toBe(true);
-      expect(getBool('param=1')).toBe(true);
-      expect(getBool('param=0')).toBe(false);
-      expect(getBool('param=false')).toBe(false);
-      expect(getBool('param=unexpected')).toBeNull();
-      expect(getBool('pram=true')).toBe(false);
-    });
-
-  });
-
-  describe('byteCount', function() {
-
-    var DEFAULT = 77;
-    var getByteCount = function(query) {
-      return Params.getByteCount(new URLSearchParams(query), 'param', DEFAULT);
-    };
-
-    it('supports default values', function() {
-      expect(getByteCount('param=x')).toBeNull();
-      expect(getByteCount('param=10')).toEqual(10);
-      expect(getByteCount('foo=10k')).toEqual(DEFAULT);
-    });
-
-  });
-
-});
diff --git a/proxy/spec/websocket.spec.js b/proxy/spec/websocket.spec.js
deleted file mode 100644
index 6c2ef2e..0000000
--- a/proxy/spec/websocket.spec.js
+++ /dev/null
@@ -1,41 +0,0 @@
-/* global expect, it, describe, WS */
-
-/*
-jasmine tests for Snowflake websocket
-*/
-
-describe('BuildUrl', function() {
-
-  it('should parse just protocol and host', function() {
-    expect(WS.buildUrl('http', 'example.com')).toBe('http://example.com');
-  });
-
-  it('should handle different ports', function() {
-    expect(WS.buildUrl('http', 'example.com', 80)).toBe('http://example.com');
-    expect(WS.buildUrl('http', 'example.com', 81)).toBe('http://example.com:81');
-    expect(WS.buildUrl('http', 'example.com', 443)).toBe('http://example.com:443');
-    expect(WS.buildUrl('http', 'example.com', 444)).toBe('http://example.com:444');
-  });
-
-  it('should handle paths', function() {
-    expect(WS.buildUrl('http', 'example.com', 80, '/')).toBe('http://example.com/');
-    expect(WS.buildUrl('http', 'example.com', 80, '/test?k=%#v')).toBe('http://example.com/test%3Fk%3D%25%23v');
-    expect(WS.buildUrl('http', 'example.com', 80, '/test')).toBe('http://example.com/test');
-  });
-
-  it('should handle params', function() {
-    expect(WS.buildUrl('http', 'example.com', 80, '/test', [['k', '%#v']])).toBe('http://example.com/test?k=%25%23v');
-    expect(WS.buildUrl('http', 'example.com', 80, '/test', [['a', 'b'], ['c', 'd']])).toBe('http://example.com/test?a=b&c=d');
-  });
-
-  it('should handle ips', function() {
-    expect(WS.buildUrl('http', '1.2.3.4')).toBe('http://1.2.3.4');
-    expect(WS.buildUrl('http', '1:2::3:4')).toBe('http://[1:2::3:4]');
-  });
-
-  it('should handle bogus', function() {
-    expect(WS.buildUrl('http', 'bog][us')).toBe('http://bog%5D%5Bus');
-    expect(WS.buildUrl('http', 'bog:u]s')).toBe('http://bog%3Au%5Ds');
-  });
-
-});
diff --git a/proxy/static/.htaccess b/proxy/static/.htaccess
deleted file mode 100644
index 3dd217d..0000000
--- a/proxy/static/.htaccess
+++ /dev/null
@@ -1,4 +0,0 @@
-
-    Header always unset X-Frame-Options
-
-Redirect permanent /options.html /index.html
diff --git a/proxy/static/SourceSansPro-Regular.ttf b/proxy/static/SourceSansPro-Regular.ttf
deleted file mode 100644
index 278ad8a..0000000
Binary files a/proxy/static/SourceSansPro-Regular.ttf and /dev/null differ
diff --git a/proxy/static/chrome150.jpg b/proxy/static/chrome150.jpg
deleted file mode 100644
index fc8a83f..0000000
Binary files a/proxy/static/chrome150.jpg and /dev/null differ
diff --git a/proxy/static/embed.html b/proxy/static/embed.html
deleted file mode 100644
index 32c26ca..0000000
--- a/proxy/static/embed.html
+++ /dev/null
@@ -1,52 +0,0 @@
-
-
-
-  
-  
-  
-  
-
-
-  
-    
- Internet Freedom -
-
- - diff --git a/proxy/static/firefox150.jpg b/proxy/static/firefox150.jpg deleted file mode 100644 index 1eda543..0000000 Binary files a/proxy/static/firefox150.jpg and /dev/null differ diff --git a/proxy/static/index.css b/proxy/static/index.css deleted file mode 100644 index 70b5a24..0000000 --- a/proxy/static/index.css +++ /dev/null @@ -1,78 +0,0 @@ -@font-face { - font-family: Source Sans Pro; - src: url("SourceSansPro-Regular.ttf"); -} - -body { - margin: 0; - font-family: "Source Sans Pro", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; - font-size: 1.3rem; - font-weight: 400; - line-height: 1.5; - background-color: #59316B; - color: #212529; -} - -#header { - margin: 0; - line-height: 0; - padding: 1.3rem 2.6rem; -} - -#header a { - border-style: none; - display: inline-block; - line-height: 0; -} - -#content { - margin: 0 0 2.6rem 0; - padding: 2.6rem 5.2rem; - background-color: #FFFFFF; -} - -h1 { - margin: 0; - font-size: 2.6rem; - color: #7D4698; - text-align: center; -} - -h3 { - margin: 0; - font-size: 2rem; - color: #7D4698; -} - -.clear { - margin: 2.6rem 0; - overflow: auto; -} - -@media only screen and (min-width: 600px) { - .browser, .extension { - float: left; - width: 50%; - } - .padding { - padding: 0 1.3rem; - } -} - -.addon { - margin-top: 2.6rem 0; - text-align: center; -} - -.addon a { - display: inline-block; - padding: 0 1.3rem; -} - -.diagram, .screenshot { - text-align: center; -} - -.diagram img, .screenshot img { - max-width: 100%; -} diff --git a/proxy/static/index.html b/proxy/static/index.html deleted file mode 100644 index e46c373..0000000 --- a/proxy/static/index.html +++ /dev/null @@ -1,82 +0,0 @@ - - - - - - Snowflake - - - - -
-

SNOWFLAKE

- -

Diagram

- -

Snowflake is a system to defeat internet censorship. People who are - censored can use Snowflake to access the internet. Their connection goes - through Snowflake proxies, which are run by volunteers. For more detailed - information about how Snowflake works see our - documentation wiki.

- -
- -
-

Browser

- -

If your internet access is censored, you should download - Tor Browser.

- -

Tor Browser screenshot

-
- -
-

Extension

- -

If your internet access is not censored, you should - consider installing the Snowflake extension to help users in censored - networks. There is no need to worry about which websites people are - accessing through your proxy. Their visible browsing IP address will - match their Tor exit node, not yours.

- -

- - Install in Firefox
- Install in Firefox -
- - Install in Chrome
- Install in Chrome -
- -

-
- -
- -

REPORTING BUGS

- -

If you encounter problems with Snowflake as a client or a proxy, - please consider filing a bug. To do so, you will have to,

- -
    -
  1. - Either create an - account or log in - using the shared cypherpunks account with password writecode.
  2. -
  3. File a ticket - using our bug tracker.
  4. -
- -

Please try to be as descriptive as possible with your ticket and if - possible include log messages that will help us reproduce the bug. - Consider adding keywords snowflake-webextension or snowflake-client - to let us know how which part of the Snowflake system is experiencing - problems.

-
- - diff --git a/proxy/static/koch.jpg b/proxy/static/koch.jpg deleted file mode 100644 index 1a3c1e3..0000000 Binary files a/proxy/static/koch.jpg and /dev/null differ diff --git a/proxy/static/screenshot.png b/proxy/static/screenshot.png deleted file mode 100644 index 58c0540..0000000 Binary files a/proxy/static/screenshot.png and /dev/null differ diff --git a/proxy/static/snowflake.html b/proxy/static/snowflake.html deleted file mode 100644 index 84790ed..0000000 --- a/proxy/static/snowflake.html +++ /dev/null @@ -1,60 +0,0 @@ - - - - Snowflake - - - - - -
-
- Timeout... -
- -
- - - diff --git a/proxy/static/tor-logo@2x.png b/proxy/static/tor-logo@2x.png deleted file mode 100644 index 5a459de..0000000 Binary files a/proxy/static/tor-logo@2x.png and /dev/null differ diff --git a/proxy/ui.js b/proxy/ui.js deleted file mode 100644 index 54e0897..0000000 --- a/proxy/ui.js +++ /dev/null @@ -1,178 +0,0 @@ -/* global chrome, log, update */ - -/* -All of Snowflake's DOM manipulation and inputs. -*/ - -class UI { - - setStatus() {} - - setActive(connected) { - return this.active = connected; - } - - log() {} - -} - -UI.prototype.active = false; - -UI.prototype.enabled = true; - - -class BadgeUI extends UI { - - constructor() { - super(); - this.$badge = document.getElementById('badge'); - } - - setActive(connected) { - super.setActive(connected); - return this.$badge.className = connected ? 'active' : ''; - } - -} - -BadgeUI.prototype.$badge = null; - - -class DebugUI extends UI { - - constructor() { - super(); - // Setup other DOM handlers if it's debug mode. - this.$status = document.getElementById('status'); - this.$msglog = document.getElementById('msglog'); - this.$msglog.value = ''; - } - - // Status bar - setStatus(msg) { - var txt; - txt = document.createTextNode('Status: ' + msg); - while (this.$status.firstChild) { - this.$status.removeChild(this.$status.firstChild); - } - return this.$status.appendChild(txt); - } - - setActive(connected) { - super.setActive(connected); - return this.$msglog.className = connected ? 'active' : ''; - } - - log(msg) { - // Scroll to latest - this.$msglog.value += msg + '\n'; - return this.$msglog.scrollTop = this.$msglog.scrollHeight; - } - -} - -// DOM elements references. -DebugUI.prototype.$msglog = null; - -DebugUI.prototype.$status = null; - - -class WebExtUI extends UI { - - constructor() { - super(); - this.onConnect = this.onConnect.bind(this); - this.onMessage = this.onMessage.bind(this); - this.onDisconnect = this.onDisconnect.bind(this); - this.initStats(); - chrome.runtime.onConnect.addListener(this.onConnect); - } - - initStats() { - this.stats = [0]; - return setInterval((() => { - this.stats.unshift(0); - this.stats.splice(24); - return this.postActive(); - }), 60 * 60 * 1000); - } - - initToggle() { - chrome.storage.local.get("snowflake-enabled", (result) => { - if (result['snowflake-enabled'] !== void 0) { - this.enabled = result['snowflake-enabled']; - } else { - log("Toggle state not yet saved"); - } - this.setEnabled(this.enabled); - }); - } - - postActive() { - var ref; - return (ref = this.port) != null ? ref.postMessage({ - active: this.active, - total: this.stats.reduce((function(t, c) { - return t + c; - }), 0), - enabled: this.enabled - }) : void 0; - } - - onConnect(port) { - this.port = port; - port.onDisconnect.addListener(this.onDisconnect); - port.onMessage.addListener(this.onMessage); - return this.postActive(); - } - - onMessage(m) { - this.enabled = m.enabled; - this.setEnabled(this.enabled); - this.postActive(); - chrome.storage.local.set({ - "snowflake-enabled": this.enabled - }, function() { - log("Stored toggle state"); - }); - } - - onDisconnect() { - this.port = null; - } - - setActive(connected) { - super.setActive(connected); - if (connected) { - this.stats[0] += 1; - } - this.postActive(); - if (this.active) { - return chrome.browserAction.setIcon({ - path: { - 32: "icons/status-running.png" - } - }); - } else { - return chrome.browserAction.setIcon({ - path: { - 32: "icons/status-on.png" - } - }); - } - } - - setEnabled(enabled) { - update(); - return chrome.browserAction.setIcon({ - path: { - 32: "icons/status-" + (enabled ? "on" : "off") + ".png" - } - }); - } - -} - -WebExtUI.prototype.port = null; - -WebExtUI.prototype.stats = null; diff --git a/proxy/util.js b/proxy/util.js deleted file mode 100644 index 387f0a1..0000000 --- a/proxy/util.js +++ /dev/null @@ -1,243 +0,0 @@ -/* global log */ -/* exported Params, DummyRateLimit */ - -/* -A JavaScript WebRTC snowflake proxy - -Contains helpers for parsing query strings and other utilities. -*/ - -class Util { - - static mightBeTBB() { - return Util.TBB_UAS.indexOf(window.navigator.userAgent) > -1 && (window.navigator.mimeTypes && window.navigator.mimeTypes.length === 0); - } - - static genSnowflakeID() { - return Math.random().toString(36).substring(2); - } - - static snowflakeIsDisabled(cookieName) { - var cookies; - cookies = Parse.cookie(document.cookie); - // Do nothing if snowflake has not been opted in by user. - if (cookies[cookieName] !== '1') { - log('Not opted-in. Please click the badge to change options.'); - return true; - } - // Also do nothing if running in Tor Browser. - if (Util.mightBeTBB()) { - log('Will not run within Tor Browser.'); - return true; - } - return false; - } - - static featureDetect() { - return typeof PeerConnection === 'function'; - } - -} - -// It would not be effective for Tor Browser users to run the proxy. -// Do we seem to be running in Tor Browser? Check the user-agent string and for -// no listing of supported MIME types. -Util.TBB_UAS = [ - 'Mozilla/5.0 (Windows NT 6.1; rv:10.0) Gecko/20100101 Firefox/10.0', - 'Mozilla/5.0 (Windows NT 6.1; rv:17.0) Gecko/20100101 Firefox/17.0', - 'Mozilla/5.0 (Windows NT 6.1; rv:24.0) Gecko/20100101 Firefox/24.0', - 'Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0' -]; - - -class Parse { - - // Parse a cookie data string (usually document.cookie). The return type is an - // object mapping cookies names to values. Returns null on error. - // http://www.w3.org/TR/DOM-Level-2-HTML/html.html#ID-8747038 - static cookie(cookies) { - var i, j, len, name, result, string, strings, value; - result = {}; - strings = []; - if (cookies) { - strings = cookies.split(';'); - } - for (i = 0, len = strings.length; i < len; i++) { - string = strings[i]; - j = string.indexOf('='); - if (-1 === j) { - return null; - } - name = decodeURIComponent(string.substr(0, j).trim()); - value = decodeURIComponent(string.substr(j + 1).trim()); - if (!(name in result)) { - result[name] = value; - } - } - return result; - } - - // Parse an address in the form 'host:port'. Returns an Object with keys 'host' - // (String) and 'port' (int). Returns null on error. - static address(spec) { - var host, m, port; - m = null; - if (!m) { - // IPv6 syntax. - m = spec.match(/^\[([\0-9a-fA-F:.]+)\]:([0-9]+)$/); - } - if (!m) { - // IPv4 syntax. - m = spec.match(/^([0-9.]+):([0-9]+)$/); - } - if (!m) { - // TODO: Domain match - return null; - } - host = m[1]; - port = parseInt(m[2], 10); - if (isNaN(port) || port < 0 || port > 65535) { - return null; - } - return { - host: host, - port: port - }; - } - - // Parse a count of bytes. A suffix of 'k', 'm', or 'g' (or uppercase) - // does what you would think. Returns null on error. - static byteCount(spec) { - let matches = spec.match(/^(\d+(?:\.\d*)?)(\w*)$/); - if (matches === null) { - return null; - } - let count = Number(matches[1]); - if (isNaN(count)) { - return null; - } - const UNITS = new Map([ - ['', 1], - ['k', 1024], - ['m', 1024*1024], - ['g', 1024*1024*1024], - ]); - let unit = matches[2].toLowerCase(); - if (!UNITS.has(unit)) { - return null; - } - let multiplier = UNITS.get(unit); - return count * multiplier; - } - - // Parse a connection-address out of the "c=" Connection Data field of a - // session description. Return undefined if none is found. - // https://tools.ietf.org/html/rfc4566#section-5.7 - static ipFromSDP(sdp) { - var i, len, m, pattern, ref; - ref = [/^c=IN IP4 ([\d.]+)(?:(?:\/\d+)?\/\d+)?(:? |$)/m, /^c=IN IP6 ([0-9A-Fa-f:.]+)(?:\/\d+)?(:? |$)/m]; - for (i = 0, len = ref.length; i < len; i++) { - pattern = ref[i]; - m = pattern.exec(sdp); - if (m != null) { - return m[1]; - } - } - } - -} - - -class Params { - - static getBool(query, param, defaultValue) { - if (!query.has(param)) { - return defaultValue; - } - var val; - val = query.get(param); - if ('true' === val || '1' === val || '' === val) { - return true; - } - if ('false' === val || '0' === val) { - return false; - } - return null; - } - - // Get an object value and parse it as a byte count. Example byte counts are - // '100' and '1.3m'. Returns |defaultValue| if param is not a key. Return null - // on a parsing error. - static getByteCount(query, param, defaultValue) { - if (!query.has(param)) { - return defaultValue; - } - return Parse.byteCount(query.get(param)); - } - -} - - -class BucketRateLimit { - - constructor(capacity, time) { - this.capacity = capacity; - this.time = time; - } - - age() { - var delta, now; - now = new Date(); - delta = (now - this.lastUpdate) / 1000.0; - this.lastUpdate = now; - this.amount -= delta * this.capacity / this.time; - if (this.amount < 0.0) { - return this.amount = 0.0; - } - } - - update(n) { - this.age(); - this.amount += n; - return this.amount <= this.capacity; - } - - // How many seconds in the future will the limit expire? - when() { - this.age(); - return (this.amount - this.capacity) / (this.capacity / this.time); - } - - isLimited() { - this.age(); - return this.amount > this.capacity; - } - -} - -BucketRateLimit.prototype.amount = 0.0; - -BucketRateLimit.prototype.lastUpdate = new Date(); - - -// A rate limiter that never limits. -class DummyRateLimit { - - constructor(capacity, time) { - this.capacity = capacity; - this.time = time; - } - - update() { - return true; - } - - when() { - return 0.0; - } - - isLimited() { - return false; - } - -} diff --git a/proxy/webext/README.md b/proxy/webext/README.md deleted file mode 100644 index cd53ff1..0000000 --- a/proxy/webext/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Build it, - -``` -cd .. -npm install -npm run webext -``` - -and then load this directory as an unpacked extension. - * https://developer.mozilla.org/en-US/docs/Tools/about:debugging#Loading_a_temporary_extension - * https://developer.chrome.com/extensions/getstarted#manifest diff --git a/proxy/webext/icons/arrowhead-right-12.svg b/proxy/webext/icons/arrowhead-right-12.svg deleted file mode 100644 index 54afc2e..0000000 --- a/proxy/webext/icons/arrowhead-right-12.svg +++ /dev/null @@ -1,4 +0,0 @@ - - \ No newline at end of file diff --git a/proxy/webext/icons/status-off.png b/proxy/webext/icons/status-off.png deleted file mode 100644 index dd4065e..0000000 Binary files a/proxy/webext/icons/status-off.png and /dev/null differ diff --git a/proxy/webext/icons/status-off.svg b/proxy/webext/icons/status-off.svg deleted file mode 100644 index f6605ea..0000000 --- a/proxy/webext/icons/status-off.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - status-off - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/proxy/webext/icons/status-off@2x.png b/proxy/webext/icons/status-off@2x.png deleted file mode 100644 index 902d1b5..0000000 Binary files a/proxy/webext/icons/status-off@2x.png and /dev/null differ diff --git a/proxy/webext/icons/status-off@3x.png b/proxy/webext/icons/status-off@3x.png deleted file mode 100644 index 71a6dc8..0000000 Binary files a/proxy/webext/icons/status-off@3x.png and /dev/null differ diff --git a/proxy/webext/icons/status-on.png b/proxy/webext/icons/status-on.png deleted file mode 100644 index ee1856b..0000000 Binary files a/proxy/webext/icons/status-on.png and /dev/null differ diff --git a/proxy/webext/icons/status-on.svg b/proxy/webext/icons/status-on.svg deleted file mode 100644 index 6c7df00..0000000 --- a/proxy/webext/icons/status-on.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - status-on - Created with Sketch. - - - - - - \ No newline at end of file diff --git a/proxy/webext/icons/status-on@2x.png b/proxy/webext/icons/status-on@2x.png deleted file mode 100644 index 619feac..0000000 Binary files a/proxy/webext/icons/status-on@2x.png and /dev/null differ diff --git a/proxy/webext/icons/status-on@3x.png b/proxy/webext/icons/status-on@3x.png deleted file mode 100644 index cb86fe0..0000000 Binary files a/proxy/webext/icons/status-on@3x.png and /dev/null differ diff --git a/proxy/webext/icons/status-running.png b/proxy/webext/icons/status-running.png deleted file mode 100644 index 96ff7b2..0000000 Binary files a/proxy/webext/icons/status-running.png and /dev/null differ diff --git a/proxy/webext/icons/status-running.svg b/proxy/webext/icons/status-running.svg deleted file mode 100644 index 4cc9602..0000000 --- a/proxy/webext/icons/status-running.svg +++ /dev/null @@ -1,11 +0,0 @@ - - - - status-on - Created with Sketch. - - - - - - diff --git a/proxy/webext/manifest.json b/proxy/webext/manifest.json deleted file mode 100644 index fa96a99..0000000 --- a/proxy/webext/manifest.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "manifest_version": 2, - "name": "Snowflake", - "version": "0.0.7", - "description": "Snowflake is a WebRTC pluggable transport for Tor.", - "background": { - "scripts": ["snowflake.js"], - "persistent": true - }, - "browser_action": { - "default_icon": { - "32": "icons/status-on.png" - }, - "default_title": "Snowflake", - "default_popup": "popup.html" - }, - "permissions": ["storage"] -} diff --git a/proxy/webext/popup.css b/proxy/webext/popup.css deleted file mode 100644 index 856e855..0000000 --- a/proxy/webext/popup.css +++ /dev/null @@ -1,108 +0,0 @@ -body { - margin: 10px; - font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen-Sans, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; - width: 300px; - font-size:12px; -} - -#active { - margin: 20px 0; - text-align: center; -} - -.b { - border-top: 1px solid gainsboro; - padding: 10px; - position: relative; -} - -.b a { - color: black; - display: inline-block; - text-decoration: none; -} - -.learn:before { - content : " "; - display: block; - position: absolute; - top: 12px; - background-image: url(../icons/arrowhead-right-12.svg); - width: 12px; - height: 12px; - opacity : 0.6; - z-index: 9999; - right: 0px; - margin-right: 10px; - -} - -/* Snowflake Status */ - -.transfering { - -webkit-animation:spin 8s linear infinite; - -moz-animation:spin 8s linear infinite; - animation:spin 8s linear infinite; - - fill: BlueViolet; -} -@-moz-keyframes spin { 100% { -moz-transform: rotate(360deg); } } -@-webkit-keyframes spin { 100% { -webkit-transform: rotate(360deg); } } -@keyframes spin { 100% { -webkit-transform: rotate(360deg); transform:rotate(360deg); } } - - -/* Toggle */ - -.switch { - position: relative; - display: inline-block; - width: 30px; - height: 17px; - float: right; -} - -.switch input { - opacity: 0; - width: 0; - height: 0; -} - -.slider { - position: absolute; - cursor: pointer; - top: 0; - left: 0; - right: 0; - bottom: 0; - background-color: #ccc; - -webkit-transition: .4s; - transition: .4s; - border-radius: 17px; -} - -.slider:before { - position: absolute; - content: ""; - height: 13px; - width: 13px; - left: 2px; - bottom: 2px; - background-color: white; - -webkit-transition: .4s; - transition: .4s; - border-radius: 50%; -} - -input:checked + .slider { - background-color: BlueViolet; -} - -input:focus + .slider { - box-shadow: 0 0 1px BlueViolet; -} - -input:checked + .slider:before { - -webkit-transform: translateX(13px); - -ms-transform: translateX(13px); - transform: translateX(13px); -} diff --git a/proxy/webext/popup.html b/proxy/webext/popup.html deleted file mode 100644 index e3ba2f5..0000000 --- a/proxy/webext/popup.html +++ /dev/null @@ -1,25 +0,0 @@ - - - - - - - - -
- -

-

-
-
- - -
- - - diff --git a/proxy/webext/popup.js b/proxy/webext/popup.js deleted file mode 100644 index d8d6464..0000000 --- a/proxy/webext/popup.js +++ /dev/null @@ -1,65 +0,0 @@ -/* global chrome */ - -const port = chrome.runtime.connect({ - name: "popup" -}); - -class Popup { - constructor() { - this.div = document.getElementById('active'); - this.ps = this.div.querySelectorAll('p'); - this.img = this.div.querySelector('img'); - } - setImgSrc(src) { - this.img.src = `icons/status-${src}.png`; - } - setStatusText(txt) { - this.ps[0].innerText = txt; - } - setStatusDesc(desc, color) { - this.ps[1].innerText = desc; - this.ps[1].style.color = color || 'black'; - } - hideButton() { - document.querySelector('.button').style.display = 'none'; - } - setChecked(checked) { - document.getElementById('enabled').checked = checked; - } - setToggleText(txt) { - document.getElementById('toggle').innerText = txt; - } -} - -port.onMessage.addListener((m) => { - const { active, enabled, total, missingFeature } = m; - const popup = new Popup(); - - if (missingFeature) { - popup.setImgSrc('off'); - popup.setStatusText("Snowflake is off"); - popup.setStatusDesc("WebRTC feature is not detected.", 'firebrick'); - popup.hideButton(); - return; - } - - const clients = active ? 1 : 0; - - if (enabled) { - popup.setChecked(true); - popup.setToggleText('Turn Off'); - popup.setStatusText(`${clients} client${(clients !== 1) ? 's' : ''} connected.`); - popup.setStatusDesc(`Your snowflake has helped ${total} user${(total !== 1) ? 's' : ''} circumvent censorship in the last 24 hours.`); - } else { - popup.setChecked(false); - popup.setToggleText('Turn On'); - popup.setStatusText("Snowflake is off"); - popup.setStatusDesc(""); - } - - popup.setImgSrc(active ? "running" : enabled ? "on" : "off"); -}); - -document.addEventListener('change', (event) => { - port.postMessage({ enabled: event.target.checked }); -}) diff --git a/proxy/websocket.js b/proxy/websocket.js deleted file mode 100644 index 410fdc0..0000000 --- a/proxy/websocket.js +++ /dev/null @@ -1,64 +0,0 @@ -/* -Only websocket-specific stuff. -*/ - -class WS { - - // Build an escaped URL string from unescaped components. Only scheme and host - // are required. See RFC 3986, section 3. - static buildUrl(scheme, host, port, path, params) { - var parts; - parts = []; - parts.push(encodeURIComponent(scheme)); - parts.push('://'); - // If it contains a colon but no square brackets, treat it as IPv6. - if (host.match(/:/) && !host.match(/[[\]]/)) { - parts.push('['); - parts.push(host); - parts.push(']'); - } else { - parts.push(encodeURIComponent(host)); - } - if (void 0 !== port && this.DEFAULT_PORTS[scheme] !== port) { - parts.push(':'); - parts.push(encodeURIComponent(port.toString())); - } - if (void 0 !== path && '' !== path) { - if (!path.match(/^\//)) { - path = '/' + path; - } - path = path.replace(/[^/]+/, function(m) { - return encodeURIComponent(m); - }); - parts.push(path); - } - if (void 0 !== params) { - parts.push('?'); - parts.push(new URLSearchParams(params).toString()); - } - return parts.join(''); - } - - static makeWebsocket(addr, params) { - var url, ws, wsProtocol; - wsProtocol = this.WSS_ENABLED ? 'wss' : 'ws'; - url = this.buildUrl(wsProtocol, addr.host, addr.port, '/', params); - ws = new WebSocket(url); - /* - 'User agents can use this as a hint for how to handle incoming binary data: - if the attribute is set to 'blob', it is safe to spool it to disk, and if it - is set to 'arraybuffer', it is likely more efficient to keep the data in - memory.' - */ - ws.binaryType = 'arraybuffer'; - return ws; - } - -} - -WS.WSS_ENABLED = true; - -WS.DEFAULT_PORTS = { - http: 80, - https: 443 -}; diff --git a/renovate.json b/renovate.json new file mode 100644 index 0000000..c78f869 --- /dev/null +++ b/renovate.json @@ -0,0 +1,8 @@ +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "constraints": { + "go": "1.21" + }, + "postUpdateOptions": ["gomodTidy", "gomodUpdateImportPaths"], + "osvVulnerabilityAlerts": true +} diff --git a/server-webrtc/README.md b/server-webrtc/README.md deleted file mode 100644 index 53cad14..0000000 --- a/server-webrtc/README.md +++ /dev/null @@ -1,26 +0,0 @@ -Ordinarily, the WebRTC client plugin speaks with a Broker which helps -match and signal with a browser proxy, which ultimately speaks with a default -websocket server. - - -However, this directory contains a WebRTC server plugin which uses an -HTTP server that simulates the interaction that a client would have with -the broker, for direct testing. - -Edit server-webrtc/torrc and add "-http 127.0.0.1:8080" to the end of the -ServerTransportPlugin line: -``` -ServerTransportPlugin snowflake exec ./server-webrtc -http 127.0.0.1:8080 -``` - -``` -cd server-webrtc/ -go build -tor -f torrc -``` - -Edit client/torrc and add "-url http://127.0.0.1:8080" to the end of the -ClientTransportPlugin line: -``` -ClientTransportPlugin snowflake exec ./client -url http://127.0.0.1:8080/ -``` diff --git a/server-webrtc/http.go b/server-webrtc/http.go deleted file mode 100644 index f0ecb88..0000000 --- a/server-webrtc/http.go +++ /dev/null @@ -1,67 +0,0 @@ -// An HTTP-based signaling channel for the WebRTC server. It imitates the -// broker as seen by clients, but it doesn't connect them to an -// intermediate WebRTC proxy, rather connects them directly to this WebRTC -// server. This code should be deleted when we have proxies in place. - -package main - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - - "github.com/keroserene/go-webrtc" -) - -type httpHandler struct { - config *webrtc.Configuration -} - -func (h *httpHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - switch req.Method { - case "GET": - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.WriteHeader(http.StatusOK) - w.Write([]byte(`HTTP signaling channel - -Send a POST request containing an SDP offer. The response will -contain an SDP answer. -`)) - return - case "POST": - break - default: - http.Error(w, "Bad request.", http.StatusBadRequest) - return - } - - // POST handling begins here. - body, err := ioutil.ReadAll(http.MaxBytesReader(w, req.Body, 100000)) - if err != nil { - http.Error(w, "Bad request.", http.StatusBadRequest) - return - } - offer := webrtc.DeserializeSessionDescription(string(body)) - if offer == nil { - http.Error(w, "Bad request.", http.StatusBadRequest) - return - } - - pc, err := makePeerConnectionFromOffer(offer, h.config) - if err != nil { - http.Error(w, fmt.Sprintf("Cannot create offer: %s", err), http.StatusInternalServerError) - return - } - - log.Println("answering HTTP POST") - - w.WriteHeader(http.StatusOK) - w.Write([]byte(pc.LocalDescription().Serialize())) -} - -func receiveSignalsHTTP(addr string, config *webrtc.Configuration) error { - http.Handle("/", &httpHandler{config}) - log.Printf("listening HTTP on %s", addr) - return http.ListenAndServe(addr, nil) -} diff --git a/server-webrtc/snowflake.go b/server-webrtc/snowflake.go deleted file mode 100644 index 5923d6b..0000000 --- a/server-webrtc/snowflake.go +++ /dev/null @@ -1,258 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "os" - "os/signal" - "sync" - "syscall" - "time" - - "git.torproject.org/pluggable-transports/goptlib.git" - "github.com/keroserene/go-webrtc" -) - -var ptMethodName = "snowflake" -var ptInfo pt.ServerInfo -var logFile *os.File - -// When a datachannel handler starts, +1 is written to this channel; -// when it ends, -1 is written. -var handlerChan = make(chan int) - -func copyLoop(a, b net.Conn) { - var wg sync.WaitGroup - wg.Add(2) - go func() { - io.Copy(b, a) - wg.Done() - }() - go func() { - io.Copy(a, b) - wg.Done() - }() - wg.Wait() -} - -type webRTCConn struct { - dc *webrtc.DataChannel - pc *webrtc.PeerConnection - pr *io.PipeReader - - lock sync.Mutex // Synchronization for DataChannel destruction - once sync.Once // Synchronization for PeerConnection destruction -} - -func (c *webRTCConn) Read(b []byte) (int, error) { - return c.pr.Read(b) -} - -func (c *webRTCConn) Write(b []byte) (int, error) { - c.lock.Lock() - defer c.lock.Unlock() - // log.Printf("webrtc Write %d %+q", len(b), string(b)) - log.Printf("Write %d bytes --> WebRTC", len(b)) - if c.dc != nil { - c.dc.Send(b) - } - return len(b), nil -} - -func (c *webRTCConn) Close() (err error) { - c.once.Do(func() { - err = c.pc.Destroy() - }) - return -} - -func (c *webRTCConn) LocalAddr() net.Addr { - return nil -} - -func (c *webRTCConn) RemoteAddr() net.Addr { - return nil -} - -func (c *webRTCConn) SetDeadline(t time.Time) error { - return fmt.Errorf("SetDeadline not implemented") -} - -func (c *webRTCConn) SetReadDeadline(t time.Time) error { - return fmt.Errorf("SetReadDeadline not implemented") -} - -func (c *webRTCConn) SetWriteDeadline(t time.Time) error { - return fmt.Errorf("SetWriteDeadline not implemented") -} - -func datachannelHandler(conn *webRTCConn) { - defer conn.Close() - - handlerChan <- 1 - defer func() { - handlerChan <- -1 - }() - - or, err := pt.DialOr(&ptInfo, "", ptMethodName) // TODO: Extended OR - if err != nil { - log.Printf("Failed to connect to ORPort: " + err.Error()) - return - } - defer or.Close() - - copyLoop(conn, or) -} - -// Create a PeerConnection from an SDP offer. Blocks until the gathering of ICE -// candidates is complete and the answer is available in LocalDescription. -// Installs an OnDataChannel callback that creates a webRTCConn and passes it to -// datachannelHandler. -func makePeerConnectionFromOffer(sdp *webrtc.SessionDescription, config *webrtc.Configuration) (*webrtc.PeerConnection, error) { - pc, err := webrtc.NewPeerConnection(config) - if err != nil { - return nil, fmt.Errorf("accept: NewPeerConnection: %s", err) - } - pc.OnNegotiationNeeded = func() { - panic("OnNegotiationNeeded") - } - pc.OnDataChannel = func(dc *webrtc.DataChannel) { - log.Println("OnDataChannel") - - pr, pw := io.Pipe() - conn := &webRTCConn{pc: pc, dc: dc, pr: pr} - - dc.OnOpen = func() { - log.Println("OnOpen channel") - } - dc.OnClose = func() { - conn.lock.Lock() - defer conn.lock.Unlock() - log.Println("OnClose channel") - conn.dc = nil - pc.DeleteDataChannel(dc) - pw.Close() - } - dc.OnMessage = func(msg []byte) { - log.Printf("OnMessage <--- %d bytes", len(msg)) - n, err := pw.Write(msg) - if err != nil { - pw.CloseWithError(err) - } - if n != len(msg) { - panic("short write") - } - } - - go datachannelHandler(conn) - } - - err = pc.SetRemoteDescription(sdp) - if err != nil { - pc.Destroy() - return nil, fmt.Errorf("accept: SetRemoteDescription: %s", err) - } - log.Println("sdp offer successfully received.") - - log.Println("Generating answer...") - answer, err := pc.CreateAnswer() - if err != nil { - pc.Destroy() - return nil, err - } - - if answer == nil { - pc.Destroy() - return nil, fmt.Errorf("Failed gathering ICE candidates.") - } - - err = pc.SetLocalDescription(answer) - if err != nil { - pc.Destroy() - return nil, err - } - - return pc, nil -} - -func main() { - var err error - var httpAddr string - var logFilename string - - flag.StringVar(&httpAddr, "http", "", "listen for HTTP signaling") - flag.StringVar(&logFilename, "log", "", "log file to write to") - flag.Parse() - - log.SetFlags(log.LstdFlags | log.LUTC) - if logFilename != "" { - f, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) - if err != nil { - log.Fatalf("can't open log file: %s", err) - } - defer logFile.Close() - log.SetOutput(f) - } - - log.Println("starting") - webrtc.SetLoggingVerbosity(1) - - ptInfo, err = pt.ServerSetup(nil) - if err != nil { - log.Fatal(err) - } - - webRTCConfig := webrtc.NewConfiguration(webrtc.OptionIceServer("stun:stun.l.google.com:19302")) - - // Start HTTP-based signaling receiver. - go func() { - err := receiveSignalsHTTP(httpAddr, webRTCConfig) - if err != nil { - log.Printf("receiveSignalsHTTP: %s", err) - } - }() - - for _, bindaddr := range ptInfo.Bindaddrs { - switch bindaddr.MethodName { - case ptMethodName: - bindaddr.Addr.Port = 12345 // lies!!! - pt.Smethod(bindaddr.MethodName, bindaddr.Addr) - default: - pt.SmethodError(bindaddr.MethodName, "no such method") - } - } - pt.SmethodsDone() - - var numHandlers int = 0 - var sig os.Signal - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGTERM) - - if os.Getenv("TOR_PT_EXIT_ON_STDIN_CLOSE") == "1" { - // This environment variable means we should treat EOF on stdin - // just like SIGTERM: https://bugs.torproject.org/15435. - go func() { - io.Copy(ioutil.Discard, os.Stdin) - log.Printf("synthesizing SIGTERM because of stdin close") - sigChan <- syscall.SIGTERM - }() - } - - // keep track of handlers and wait for a signal - sig = nil - for sig == nil { - select { - case n := <-handlerChan: - numHandlers += n - case sig = <-sigChan: - } - } - - for numHandlers > 0 { - numHandlers += <-handlerChan - } -} diff --git a/server-webrtc/torrc b/server-webrtc/torrc deleted file mode 100644 index e037c97..0000000 --- a/server-webrtc/torrc +++ /dev/null @@ -1,8 +0,0 @@ -BridgeRelay 1 -ORPort 9001 -ExtORPort auto -SocksPort 0 -ExitPolicy reject *:* -DataDirectory datadir - -ServerTransportPlugin snowflake exec ./server-webrtc diff --git a/server/README.md b/server/README.md index 312a506..2b1fb7a 100644 --- a/server/README.md +++ b/server/README.md @@ -1,3 +1,12 @@ + + +**Table of Contents** + +- [Setup](#setup) +- [TLS](#tls) + + + This is the server transport plugin for Snowflake. The actual transport protocol it uses is [WebSocket](https://tools.ietf.org/html/rfc6455). @@ -59,3 +68,47 @@ without having to run as root: ``` setcap 'cap_net_bind_service=+ep' /usr/local/bin/snowflake-server ``` + + +# Multiple KCP state machines + +The server internally uses a network protocol called KCP +to manage and persist client sessions. +Each KCP scheduler runs on a single thread. +When there are many simultaneous users (thousands), +a single KCP scheduler can be a bottleneck. +The `num-turbotunnel` pluggable transport option +lets you control the number of KCP instances, +which can help with CPU scaling: +https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/40200 + +There is currently no way to set this option automatically. +You have to tune it manually. + +``` +ServerTransportOptions snowflake num-turbotunnel=2 +``` + + +# Controlling source addresses + +Use the `orport-srcaddr` pluggable transport option to control what source addresses +are used when connecting to the upstream Tor ExtORPort or ORPort. +The value of the option may be a single IP address (e.g. "127.0.0.2") +or a CIDR range (e.g. "127.0.2.0/24"). If a range is given, +an IP address from the range is randomly chosen for each new connection. + +Use `ServerTransportOptions` in torrc to set the option: +``` +ServerTransportOptions snowflake orport-srcaddr=127.0.2.0/24 +``` + +You can combine it with other options: +``` +ServerTransportOptions snowflake num-turbotunnel=2 orport-srcaddr=127.0.2.0/24 +``` + +Specifying a source address range other than the default 127.0.0.1 +can help with conserving localhost ephemeral ports on servers +that receive a lot of connections: +https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/40198 diff --git a/server/dial.go b/server/dial.go new file mode 100644 index 0000000..7e514db --- /dev/null +++ b/server/dial.go @@ -0,0 +1,14 @@ +//go:build !linux +// +build !linux + +package main + +import "syscall" + +// dialerControl does nothing. +// +// On Linux, this function would set the IP_BIND_ADDRESS_NO_PORT socket option +// in preparation for a future bind-before-connect. +func dialerControl(network, address string, c syscall.RawConn) error { + return nil +} diff --git a/server/dial_linux.go b/server/dial_linux.go new file mode 100644 index 0000000..b5a53c1 --- /dev/null +++ b/server/dial_linux.go @@ -0,0 +1,45 @@ +//go:build linux +// +build linux + +package main + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// dialerControl prepares a syscall.RawConn for a future bind-before-connect by +// setting the IP_BIND_ADDRESS_NO_PORT socket option. +// +// On Linux, setting the IP_BIND_ADDRESS_NO_PORT socket option helps conserve +// ephemeral ports when binding to a specific IP addresses before connecting +// (bind before connect), by not assigning the port number when bind is called, +// but waiting until connect. But problems arise if there are multiple processes +// doing bind-before-connect, and some of them use IP_BIND_ADDRESS_NO_PORT and +// some of them do not. When there is a mix, the ones that do will have their +// ephemeral ports reserved by the ones that do not, leading to EADDRNOTAVAIL +// errors. +// +// tor does bind-before-connect when the OutboundBindAddress option is set in +// torrc. Since version 0.4.7.13 (January 2023), tor sets +// IP_BIND_ADDRESS_NO_PORT unconditionally on platforms that support it, and +// therefore we must do the same, to avoid EADDRNOTAVAIL errors. +// +// # References +// +// https://bugs.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/40201#note_2839472 +// https://forum.torproject.net/t/tor-relays-inet-csk-bind-conflict/5757/10 +// https://blog.cloudflare.com/how-to-stop-running-out-of-ephemeral-ports-and-start-to-love-long-lived-connections/ +// https://blog.cloudflare.com/the-quantum-state-of-a-tcp-port/ +// https://forum.torproject.net/t/stable-release-0-4-5-16-and-0-4-7-13/6216 +func dialerControl(network, address string, c syscall.RawConn) error { + var sockErr error + err := c.Control(func(fd uintptr) { + sockErr = syscall.SetsockoptInt(int(fd), unix.SOL_IP, unix.IP_BIND_ADDRESS_NO_PORT, 1) + }) + if err == nil { + err = sockErr + } + return err +} diff --git a/server/lib/http.go b/server/lib/http.go new file mode 100644 index 0000000..403aeb1 --- /dev/null +++ b/server/lib/http.go @@ -0,0 +1,252 @@ +package snowflake_server + +import ( + "bufio" + "bytes" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/binary" + "fmt" + "io" + "log" + "net" + "net/http" + "sync" + "time" + + "github.com/gorilla/websocket" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/encapsulation" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/turbotunnel" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/websocketconn" +) + +const requestTimeout = 10 * time.Second + +// How long to remember outgoing packets for a client, when we don't currently +// have an active WebSocket connection corresponding to that client. Because a +// client session may span multiple WebSocket connections, we keep packets we +// aren't able to send immediately in memory, for a little while but not +// indefinitely. +const clientMapTimeout = 1 * time.Minute + +// How big to make the map of ClientIDs to IP addresses. The map is used in +// turbotunnelMode to store a reasonable IP address for a client session that +// may outlive any single WebSocket connection. +const clientIDAddrMapCapacity = 98304 + +// How long to wait for ListenAndServe or ListenAndServeTLS to return an error +// before deciding that it's not going to return. +const listenAndServeErrorTimeout = 100 * time.Millisecond + +var upgrader = websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { return true }, +} + +// clientIDAddrMap stores short-term mappings from ClientIDs to IP addresses. +// When we call pt.DialOr, tor wants us to provide a USERADDR string that +// represents the remote IP address of the client (for metrics purposes, etc.). +// This data structure bridges the gap between ServeHTTP, which knows about IP +// addresses, and handleStream, which is what calls pt.DialOr. The common piece +// of information linking both ends of the chain is the ClientID, which is +// attached to the WebSocket connection and every session. +var clientIDAddrMap = newClientIDMap(clientIDAddrMapCapacity) + +type httpHandler struct { + // pconns is the adapter layer between stream-oriented WebSocket + // connections and the packet-oriented KCP layer. There are multiple of + // these, corresponding to the multiple kcp.ServeConn in + // Transport.Listen. Clients are assigned to a particular instance by a + // hash of ClientID, indexed by a hash of the ClientID, in order to + // distribute KCP processing load across CPU cores. + pconns []*turbotunnel.QueuePacketConn + + // clientIDLookupKey is a secret key used to tweak the hash-based + // assignment of ClientID to pconn, in order to avoid manipulation of + // hash assignments. + clientIDLookupKey []byte +} + +// newHTTPHandler creates a new http.Handler that exchanges encapsulated packets +// over incoming WebSocket connections. +func newHTTPHandler(localAddr net.Addr, numInstances int, mtu int) *httpHandler { + pconns := make([]*turbotunnel.QueuePacketConn, 0, numInstances) + for i := 0; i < numInstances; i++ { + pconns = append(pconns, turbotunnel.NewQueuePacketConn(localAddr, clientMapTimeout, mtu)) + } + + clientIDLookupKey := make([]byte, 16) + _, err := rand.Read(clientIDLookupKey) + if err != nil { + panic(err) + } + + return &httpHandler{ + pconns: pconns, + clientIDLookupKey: clientIDLookupKey, + } +} + +// lookupPacketConn returns the element of pconns that corresponds to client ID, +// according to the hash-based mapping. +func (handler *httpHandler) lookupPacketConn(clientID turbotunnel.ClientID) *turbotunnel.QueuePacketConn { + s := hmac.New(sha256.New, handler.clientIDLookupKey).Sum(clientID[:]) + return handler.pconns[binary.LittleEndian.Uint64(s)%uint64(len(handler.pconns))] +} + +func (handler *httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ws, err := upgrader.Upgrade(w, r, nil) + if err != nil { + log.Println(err) + return + } + + conn := websocketconn.New(ws) + defer conn.Close() + + // Pass the address of client as the remote address of incoming connection + clientIPParam := r.URL.Query().Get("client_ip") + addr := clientAddr(clientIPParam) + + var token [len(turbotunnel.Token)]byte + _, err = io.ReadFull(conn, token[:]) + if err != nil { + // Don't bother logging EOF: that happens with an unused + // connection, which clients make frequently as they maintain a + // pool of proxies. + if err != io.EOF { + log.Printf("reading token: %v", err) + } + return + } + + switch { + case bytes.Equal(token[:], turbotunnel.Token[:]): + err = handler.turbotunnelMode(conn, addr) + default: + // We didn't find a matching token, which means that we are + // dealing with a client that doesn't know about such things. + // Close the conn as we no longer support the old + // one-session-per-WebSocket mode. + log.Println("Received unsupported oneshot connection") + return + } + if err != nil { + log.Println(err) + return + } +} + +// turbotunnelMode handles clients that sent turbotunnel.Token at the start of +// their stream. These clients expect to send and receive encapsulated packets, +// with a long-lived session identified by ClientID. +func (handler *httpHandler) turbotunnelMode(conn net.Conn, addr net.Addr) error { + // Read the ClientID prefix. Every packet encapsulated in this WebSocket + // connection pertains to the same ClientID. + var clientID turbotunnel.ClientID + _, err := io.ReadFull(conn, clientID[:]) + if err != nil { + return fmt.Errorf("reading ClientID: %w", err) + } + + // Store a short-term mapping from the ClientID to the client IP + // address attached to this WebSocket connection. tor will want us to + // provide a client IP address when we call pt.DialOr. But a KCP session + // does not necessarily correspond to any single IP address--it's + // composed of packets that are carried in possibly multiple WebSocket + // streams. We apply the heuristic that the IP address of the most + // recent WebSocket connection that has had to do with a session, at the + // time the session is established, is the IP address that should be + // credited for the entire KCP session. + clientIDAddrMap.Set(clientID, addr) + + pconn := handler.lookupPacketConn(clientID) + + var wg sync.WaitGroup + wg.Add(2) + done := make(chan struct{}) + + // The remainder of the WebSocket stream consists of encapsulated + // packets. We read them one by one and feed them into the + // QueuePacketConn on which kcp.ServeConn was set up, which eventually + // leads to KCP-level sessions in the acceptSessions function. + go func() { + defer wg.Done() + defer close(done) // Signal the write loop to finish + var p [2048]byte + for { + n, err := encapsulation.ReadData(conn, p[:]) + if err == io.ErrShortBuffer { + err = nil + } + if err != nil { + return + } + pconn.QueueIncoming(p[:n], clientID) + } + }() + + // At the same time, grab packets addressed to this ClientID and + // encapsulate them into the downstream. + go func() { + defer wg.Done() + defer conn.Close() // Signal the read loop to finish + + // Buffer encapsulation.WriteData operations to keep length + // prefixes in the same send as the data that follows. + bw := bufio.NewWriter(conn) + for { + select { + case <-done: + return + case p, ok := <-pconn.OutgoingQueue(clientID): + if !ok { + return + } + _, err := encapsulation.WriteData(bw, p) + pconn.Restore(p) + if err == nil { + err = bw.Flush() + } + if err != nil { + return + } + } + } + }() + + wg.Wait() + + return nil +} + +// ClientMapAddr is a string that represents a connecting client. +type ClientMapAddr string + +func (addr ClientMapAddr) Network() string { + return "snowflake" +} + +func (addr ClientMapAddr) String() string { + return string(addr) +} + +// Return a client address +func clientAddr(clientIPParam string) net.Addr { + if clientIPParam == "" { + return ClientMapAddr("") + } + // Check if client addr is a valid IP + clientIP := net.ParseIP(clientIPParam) + if clientIP == nil { + return ClientMapAddr("") + } + // Check if client addr is 0.0.0.0 or [::]. Some proxies erroneously + // report an address of 0.0.0.0: https://bugs.torproject.org/33157. + if clientIP.IsUnspecified() { + return ClientMapAddr("") + } + // Add a stub port number. USERADDR requires a port number. + return ClientMapAddr((&net.TCPAddr{IP: clientIP, Port: 1, Zone: ""}).String()) +} diff --git a/server/lib/server_test.go b/server/lib/server_test.go new file mode 100644 index 0000000..8e0deb4 --- /dev/null +++ b/server/lib/server_test.go @@ -0,0 +1,55 @@ +package snowflake_server + +import ( + "net" + "strconv" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestClientAddr(t *testing.T) { + Convey("Testing clientAddr", t, func() { + // good tests + for _, test := range []struct { + input string + expected net.IP + }{ + {"1.2.3.4", net.ParseIP("1.2.3.4")}, + {"1:2::3:4", net.ParseIP("1:2::3:4")}, + } { + useraddr := clientAddr(test.input).String() + host, port, err := net.SplitHostPort(useraddr) + if err != nil { + t.Errorf("clientAddr(%q) → SplitHostPort error %v", test.input, err) + continue + } + if !test.expected.Equal(net.ParseIP(host)) { + t.Errorf("clientAddr(%q) → host %q, not %v", test.input, host, test.expected) + } + portNo, err := strconv.Atoi(port) + if err != nil { + t.Errorf("clientAddr(%q) → port %q", test.input, port) + continue + } + if portNo == 0 { + t.Errorf("clientAddr(%q) → port %d", test.input, portNo) + } + } + + // bad tests + for _, input := range []string{ + "", + "abc", + "1.2.3.4.5", + "[12::34]", + "0.0.0.0", + "[::]", + } { + useraddr := clientAddr(input).String() + if useraddr != "" { + t.Errorf("clientAddr(%q) → %q, not %q", input, useraddr, "") + } + } + }) +} diff --git a/server/lib/snowflake.go b/server/lib/snowflake.go new file mode 100644 index 0000000..bcf9dd6 --- /dev/null +++ b/server/lib/snowflake.go @@ -0,0 +1,319 @@ +/* +Package snowflake_server implements the functionality necessary to accept Snowflake +connections from Snowflake clients. + +Included in the package is a Transport type that implements the Pluggable Transports v2.1 Go API +specification. To start a TLS Snowflake server using the golang.org/x/crypto/acme/autocert +library, configure a certificate manager for the server's domain name and then create a new +Transport as follows: + + // The snowflake server runs a websocket server. To run this securely, you will + // need a valid certificate. + certManager := &autocert.Manager{ + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist("snowflake.yourdomain.com"), + Email: "you@yourdomain.com", + } + + transport := snowflake_server.NewSnowflakeServer(certManager.GetCertificate) + +The Listen function starts a new listener, and Accept will return incoming Snowflake connections: + + ln, err := transport.Listen(addr) + if err != nil { + // handle error + } + for { + conn, err := ln.Accept() + if err != nil { + // handle error + } + // handle conn + } +*/ +package snowflake_server + +import ( + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "net" + "net/http" + "sync" + "time" + + "github.com/xtaci/kcp-go/v5" + "github.com/xtaci/smux" + "golang.org/x/net/http2" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/turbotunnel" +) + +const ( + // WindowSize is the number of packets in the send and receive window of a KCP connection. + WindowSize = 65535 + // StreamSize controls the maximum amount of in flight data between a client and server. + StreamSize = 1048576 // 1MB +) + +// Transport is a structure with methods that conform to the Go PT v2.1 API +// https://github.com/Pluggable-Transports/Pluggable-Transports-spec/blob/master/releases/PTSpecV2.1/Pluggable%20Transport%20Specification%20v2.1%20-%20Go%20Transport%20API.pdf +type Transport struct { + getCertificate func(*tls.ClientHelloInfo) (*tls.Certificate, error) +} + +// NewSnowflakeServer returns a new server-side Transport for Snowflake. +func NewSnowflakeServer(getCertificate func(*tls.ClientHelloInfo) (*tls.Certificate, error)) *Transport { + return &Transport{getCertificate: getCertificate} +} + +// Listen starts a listener on addr that will accept both turbotunnel +// and legacy Snowflake connections. +func (t *Transport) Listen(addr net.Addr, numKCPInstances int) (*SnowflakeListener, error) { + listener := &SnowflakeListener{ + addr: addr, + queue: make(chan net.Conn, 65534), + closed: make(chan struct{}), + ln: make([]*kcp.Listener, 0, numKCPInstances), + } + + // kcp-go doesn't provide an accessor for the current MTU setting (and + // anyway we could not create a kcp.Listener without creating a + // net.PacketConn for it first), so assume the default kcp.IKCP_MTU_DEF + // (1400 bytes) and don't increase it elsewhere. + handler := newHTTPHandler(addr, numKCPInstances, kcp.IKCP_MTU_DEF) + server := &http.Server{ + Addr: addr.String(), + Handler: handler, + ReadTimeout: requestTimeout, + } + // We need to override server.TLSConfig.GetCertificate--but first + // server.TLSConfig needs to be non-nil. If we just create our own new + // &tls.Config, it will lack the default settings that the net/http + // package sets up for things like HTTP/2. Therefore we first call + // http2.ConfigureServer for its side effect of initializing + // server.TLSConfig properly. An alternative would be to make a dummy + // net.Listener, call Serve on it, and let it return. + // https://github.com/golang/go/issues/16588#issuecomment-237386446 + err := http2.ConfigureServer(server, nil) + if err != nil { + return nil, err + } + server.TLSConfig.GetCertificate = t.getCertificate + + // Another unfortunate effect of the inseparable net/http ListenAndServe + // is that we can't check for Listen errors like "permission denied" and + // "address already in use" without potentially entering the infinite + // loop of Serve. The hack we apply here is to wait a short time, + // listenAndServeErrorTimeout, to see if an error is returned (because + // it's better if the error message goes to the tor log through + // SMETHOD-ERROR than if it only goes to the snowflake log). + errChan := make(chan error) + go func() { + if t.getCertificate == nil { + // TLS is disabled + log.Printf("listening with plain HTTP on %s", addr) + err := server.ListenAndServe() + if err != nil { + log.Printf("error in ListenAndServe: %s", err) + } + errChan <- err + } else { + log.Printf("listening with HTTPS on %s", addr) + err := server.ListenAndServeTLS("", "") + if err != nil { + log.Printf("error in ListenAndServeTLS: %s", err) + } + errChan <- err + } + }() + select { + case err = <-errChan: + break + case <-time.After(listenAndServeErrorTimeout): + break + } + if err != nil { + return nil, err + } + + listener.server = server + + // Start the KCP engines, set up to read and write its packets over the + // WebSocket connections that arrive at the web server. + // handler.ServeHTTP is responsible for encapsulation/decapsulation of + // packets on behalf of KCP. KCP takes those packets and turns them into + // sessions which appear in the acceptSessions function. + for i, pconn := range handler.pconns { + ln, err := kcp.ServeConn(nil, 0, 0, pconn) + if err != nil { + server.Close() + return nil, err + } + go func() { + defer ln.Close() + err := listener.acceptSessions(ln) + if err != nil { + log.Printf("acceptSessions %d: %v", i, err) + } + }() + listener.ln = append(listener.ln, ln) + } + + return listener, nil +} + +type SnowflakeListener struct { + addr net.Addr + queue chan net.Conn + server *http.Server + ln []*kcp.Listener + closed chan struct{} + closeOnce sync.Once +} + +// Accept allows the caller to accept incoming Snowflake connections. +// We accept connections from a queue to accommodate both incoming +// smux Streams and legacy non-turbotunnel connections. +func (l *SnowflakeListener) Accept() (net.Conn, error) { + select { + case <-l.closed: + // channel has been closed, no longer accepting connections + return nil, io.ErrClosedPipe + case conn := <-l.queue: + return conn, nil + } +} + +// Addr returns the address of the SnowflakeListener +func (l *SnowflakeListener) Addr() net.Addr { + return l.addr +} + +// Close closes the Snowflake connection. +func (l *SnowflakeListener) Close() error { + // Close our HTTP server and our KCP listener + l.closeOnce.Do(func() { + close(l.closed) + l.server.Close() + for _, ln := range l.ln { + ln.Close() + } + }) + return nil +} + +// acceptStreams layers an smux.Session on the KCP connection and awaits streams +// on it. Passes each stream to our SnowflakeListener accept queue. +func (l *SnowflakeListener) acceptStreams(conn *kcp.UDPSession) error { + // Look up the IP address associated with this KCP session, via the + // ClientID that is returned by the session's RemoteAddr method. + addr, ok := clientIDAddrMap.Get(conn.RemoteAddr().(turbotunnel.ClientID)) + if !ok { + // This means that the map is tending to run over capacity, not + // just that there was not client_ip on the incoming connection. + // We store "" in the map in the absence of client_ip. This log + // message means you should increase clientIDAddrMapCapacity. + log.Printf("no address in clientID-to-IP map (capacity %d)", clientIDAddrMapCapacity) + } + + smuxConfig := smux.DefaultConfig() + smuxConfig.Version = 2 + smuxConfig.KeepAliveTimeout = 4 * time.Minute + smuxConfig.MaxStreamBuffer = StreamSize + sess, err := smux.Server(conn, smuxConfig) + if err != nil { + return err + } + + for { + stream, err := sess.AcceptStream() + if err != nil { + if err, ok := err.(net.Error); ok && err.Temporary() { + continue + } + return err + } + l.queueConn(&SnowflakeClientConn{stream: stream, address: addr}) + } +} + +// acceptSessions listens for incoming KCP connections and passes them to +// acceptStreams. It is handler.ServeHTTP that provides the network interface +// that drives this function. +func (l *SnowflakeListener) acceptSessions(ln *kcp.Listener) error { + for { + conn, err := ln.AcceptKCP() + if err != nil { + if err, ok := err.(net.Error); ok && err.Temporary() { + continue + } + return err + } + // Permit coalescing the payloads of consecutive sends. + conn.SetStreamMode(true) + // Set the maximum send and receive window sizes to a high number + // Removes KCP bottlenecks: https://gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/-/issues/40026 + conn.SetWindowSize(WindowSize, WindowSize) + // Disable the dynamic congestion window (limit only by the + // maximum of local and remote static windows). + conn.SetNoDelay( + 0, // default nodelay + 0, // default interval + 0, // default resend + 1, // nc=1 => congestion window off + ) + go func() { + defer conn.Close() + err := l.acceptStreams(conn) + if err != nil && !errors.Is(err, io.ErrClosedPipe) { + log.Printf("acceptStreams: %v", err) + } + }() + } +} + +func (l *SnowflakeListener) queueConn(conn net.Conn) error { + select { + case <-l.closed: + return fmt.Errorf("accepted connection on closed listener") + case l.queue <- conn: + return nil + } +} + +// SnowflakeClientConn is a wrapper for the underlying turbotunnel conn +// (smux.Stream). It implements the net.Conn and io.WriterTo interfaces. The +// RemoteAddr method is overridden to refer to a real IP address, looked up from +// the client address map, rather than an abstract client ID. +type SnowflakeClientConn struct { + stream *smux.Stream + address net.Addr +} + +// Forward net.Conn methods, other than RemoteAddr, to the inner stream. +func (conn *SnowflakeClientConn) Read(b []byte) (int, error) { return conn.stream.Read(b) } +func (conn *SnowflakeClientConn) Write(b []byte) (int, error) { return conn.stream.Write(b) } +func (conn *SnowflakeClientConn) Close() error { return conn.stream.Close() } +func (conn *SnowflakeClientConn) LocalAddr() net.Addr { return conn.stream.LocalAddr() } +func (conn *SnowflakeClientConn) SetDeadline(t time.Time) error { return conn.stream.SetDeadline(t) } +func (conn *SnowflakeClientConn) SetReadDeadline(t time.Time) error { + return conn.stream.SetReadDeadline(t) +} + +func (conn *SnowflakeClientConn) SetWriteDeadline(t time.Time) error { + return conn.stream.SetWriteDeadline(t) +} + +// RemoteAddr returns the mapped client address of the Snowflake connection. +func (conn *SnowflakeClientConn) RemoteAddr() net.Addr { + return conn.address +} + +// WriteTo implements the io.WriterTo interface by passing the call to the +// underlying smux.Stream. +func (conn *SnowflakeClientConn) WriteTo(w io.Writer) (int64, error) { + return conn.stream.WriteTo(w) +} diff --git a/server/lib/turbotunnel.go b/server/lib/turbotunnel.go new file mode 100644 index 0000000..994b61e --- /dev/null +++ b/server/lib/turbotunnel.go @@ -0,0 +1,86 @@ +package snowflake_server + +import ( + "net" + "sync" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/turbotunnel" +) + +// clientIDMap is a fixed-capacity mapping from ClientIDs to a net.Addr. +// Adding a new entry using the Set method causes the oldest existing entry to +// be forgotten. +// +// This data type is meant to be used to remember the IP address associated with +// a ClientID, during the short period of time between when a WebSocket +// connection with that ClientID began, and when a KCP session is established. +// +// The design requirements of this type are that it needs to remember a mapping +// for only a short time, and old entries should expire so as not to consume +// unbounded memory. It is not a critical error if an entry is forgotten before +// it is needed; better to forget entries than to use too much memory. +type clientIDMap struct { + lock sync.Mutex + // entries is a circular buffer of (ClientID, addr) pairs. + entries []struct { + clientID turbotunnel.ClientID + addr net.Addr + } + // oldest is the index of the oldest member of the entries buffer, the + // one that will be overwritten at the next call to Set. + oldest int + // current points to the index of the most recent entry corresponding to + // each ClientID. + current map[turbotunnel.ClientID]int +} + +// newClientIDMap makes a new clientIDMap with the given capacity. +func newClientIDMap(capacity int) *clientIDMap { + return &clientIDMap{ + entries: make([]struct { + clientID turbotunnel.ClientID + addr net.Addr + }, capacity), + oldest: 0, + current: make(map[turbotunnel.ClientID]int), + } +} + +// Set adds a mapping from clientID to addr, replacing any previous mapping for +// clientID. It may also cause the clientIDMap to forget at most one other +// mapping, the oldest one. +func (m *clientIDMap) Set(clientID turbotunnel.ClientID, addr net.Addr) { + m.lock.Lock() + defer m.lock.Unlock() + if len(m.entries) == 0 { + // The invariant m.oldest < len(m.entries) does not hold in this + // special case. + return + } + // m.oldest is the index of the entry we're about to overwrite. If it's + // the current entry for any ClientID, we need to delete that clientID + // from the current map (that ClientID is now forgotten). + if i, ok := m.current[m.entries[m.oldest].clientID]; ok && i == m.oldest { + delete(m.current, m.entries[m.oldest].clientID) + } + // Overwrite the oldest entry. + m.entries[m.oldest].clientID = clientID + m.entries[m.oldest].addr = addr + // Add the overwritten entry to the quick-lookup map. + m.current[clientID] = m.oldest + // What was the oldest entry is now the newest. + m.oldest = (m.oldest + 1) % len(m.entries) +} + +// Get returns a previously stored mapping. The second return value indicates +// whether clientID was actually present in the map. If it is false, then the +// returned address will be nil. +func (m *clientIDMap) Get(clientID turbotunnel.ClientID) (net.Addr, bool) { + m.lock.Lock() + defer m.lock.Unlock() + if i, ok := m.current[clientID]; ok { + return m.entries[i].addr, true + } else { + return nil, false + } +} diff --git a/server/lib/turbotunnel_test.go b/server/lib/turbotunnel_test.go new file mode 100644 index 0000000..26053ff --- /dev/null +++ b/server/lib/turbotunnel_test.go @@ -0,0 +1,129 @@ +package snowflake_server + +import ( + "encoding/binary" + "net" + "testing" + + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/turbotunnel" +) + +func TestClientIDMap(t *testing.T) { + // Convert a uint64 into a ClientID. + id := func(n uint64) turbotunnel.ClientID { + var clientID turbotunnel.ClientID + binary.PutUvarint(clientID[:], n) + return clientID + } + + // Does m.Get(key) and checks that the output matches what is expected. + expectGet := func(m *clientIDMap, clientID turbotunnel.ClientID, expectedAddr string, expectedOK bool) { + t.Helper() + addr, ok := m.Get(clientID) + if (ok && addr.String() != expectedAddr) || ok != expectedOK { + t.Errorf("expected (%+q, %v), got (%+q, %v)", expectedAddr, expectedOK, addr, ok) + } + } + + // Checks that the len of m.current is as expected. + expectSize := func(m *clientIDMap, expectedLen int) { + t.Helper() + if len(m.current) != expectedLen { + t.Errorf("expected map len %d, got %d %+v", expectedLen, len(m.current), m.current) + } + } + + // Convert a string to a net.Addr + ip := func(addr string) net.Addr { + ret, err := net.ResolveIPAddr("ip", addr) + if err != nil { + t.Errorf("received error: %s", err.Error()) + } + return ret + } + + // Zero-capacity map can't remember anything. + { + m := newClientIDMap(0) + expectSize(m, 0) + expectGet(m, id(0), "", false) + expectGet(m, id(1234), "", false) + + m.Set(id(0), ip("1.1.1.1")) + expectSize(m, 0) + expectGet(m, id(0), "", false) + expectGet(m, id(1234), "", false) + + m.Set(id(1234), ip("1.1.1.1")) + expectSize(m, 0) + expectGet(m, id(0), "", false) + expectGet(m, id(1234), "", false) + } + + { + m := newClientIDMap(1) + expectSize(m, 0) + expectGet(m, id(0), "", false) + expectGet(m, id(1), "", false) + + m.Set(id(0), ip("1.1.1.1")) + expectSize(m, 1) + expectGet(m, id(0), "1.1.1.1", true) + expectGet(m, id(1), "", false) + + m.Set(id(1), ip("1.1.1.2")) // forgets the (0, "1.1.1.1") entry + expectSize(m, 1) + expectGet(m, id(0), "", false) + expectGet(m, id(1), "1.1.1.2", true) + + m.Set(id(1), ip("1.1.1.3")) // forgets the (1, "1.1.1.2") entry + expectSize(m, 1) + expectGet(m, id(0), "", false) + expectGet(m, id(1), "1.1.1.3", true) + } + + { + m := newClientIDMap(5) + m.Set(id(0), ip("1.1.1.1")) + m.Set(id(1), ip("1.1.1.2")) + m.Set(id(2), ip("1.1.1.3")) + m.Set(id(0), ip("1.1.1.4")) // shadows the (0, "1.1.1.1") entry + m.Set(id(3), ip("1.1.1.5")) + expectSize(m, 4) + expectGet(m, id(0), "1.1.1.4", true) + expectGet(m, id(1), "1.1.1.2", true) + expectGet(m, id(2), "1.1.1.3", true) + expectGet(m, id(3), "1.1.1.5", true) + expectGet(m, id(4), "", false) + + m.Set(id(4), ip("1.1.1.6")) // forgets the (0, "1.1.1.1") entry but should preserve (0, "1.1.1.4") + expectSize(m, 5) + expectGet(m, id(0), "1.1.1.4", true) + expectGet(m, id(1), "1.1.1.2", true) + expectGet(m, id(2), "1.1.1.3", true) + expectGet(m, id(3), "1.1.1.5", true) + expectGet(m, id(4), "1.1.1.6", true) + + m.Set(id(5), ip("1.1.1.7")) // forgets the (1, "1.1.1.2") entry + m.Set(id(0), ip("1.1.1.8")) // forgets the (2, "1.1.1.3") entry and shadows (0, "1.1.1.4") + expectSize(m, 4) + expectGet(m, id(0), "1.1.1.8", true) + expectGet(m, id(1), "", false) + expectGet(m, id(2), "", false) + expectGet(m, id(3), "1.1.1.5", true) + expectGet(m, id(4), "1.1.1.6", true) + expectGet(m, id(5), "1.1.1.7", true) + + m.Set(id(0), ip("1.1.1.9")) // forgets the (0, "1.1.1.4") entry and shadows (0, "1.1.1.8") + m.Set(id(0), ip("1.1.1.10")) // forgets the (3, "1.1.1.5") entry and shadows (0, "1.1.1.9") + m.Set(id(0), ip("1.1.1.11")) // forgets the (4, "1.1.1.6") entry and shadows (0, "1.1.1.10") + m.Set(id(0), ip("1.1.1.12")) // forgets the (5, "1.1.1.7") entry and shadows (0, "1.1.1.11") + expectSize(m, 1) + expectGet(m, id(0), "1.1.1.12", true) + expectGet(m, id(1), "", false) + expectGet(m, id(2), "", false) + expectGet(m, id(3), "", false) + expectGet(m, id(4), "", false) + expectGet(m, id(5), "", false) + } +} diff --git a/server/randaddr.go b/server/randaddr.go new file mode 100644 index 0000000..d739154 --- /dev/null +++ b/server/randaddr.go @@ -0,0 +1,41 @@ +package main + +import ( + "crypto/rand" + "fmt" + "net" +) + +// randIPAddr generates a random IP address within the network represented by +// ipnet. +func randIPAddr(ipnet *net.IPNet) (net.IP, error) { + if len(ipnet.IP) != len(ipnet.Mask) { + return nil, fmt.Errorf("IP and mask have unequal lengths (%v and %v)", len(ipnet.IP), len(ipnet.Mask)) + } + ip := make(net.IP, len(ipnet.IP)) + _, err := rand.Read(ip) + if err != nil { + return nil, err + } + for i := 0; i < len(ipnet.IP); i++ { + ip[i] = (ipnet.IP[i] & ipnet.Mask[i]) | (ip[i] & ^ipnet.Mask[i]) + } + return ip, nil +} + +// parseIPCIDR parses a CIDR-notation IP address and prefix length; or if that +// fails, as a plain IP address (with the prefix length equal to the address +// length). +func parseIPCIDR(s string) (*net.IPNet, error) { + _, ipnet, err := net.ParseCIDR(s) + if err == nil { + return ipnet, nil + } + // IP/mask failed; try just IP now, but remember err, to return it in + // case that fails too. + ip := net.ParseIP(s) + if ip != nil { + return &net.IPNet{IP: ip, Mask: net.CIDRMask(len(ip)*8, len(ip)*8)}, nil + } + return nil, err +} diff --git a/server/randaddr_test.go b/server/randaddr_test.go new file mode 100644 index 0000000..31bc97b --- /dev/null +++ b/server/randaddr_test.go @@ -0,0 +1,159 @@ +package main + +import ( + "bytes" + "net" + "testing" +) + +func mustParseCIDR(s string) *net.IPNet { + _, ipnet, err := net.ParseCIDR(s) + if err != nil { + panic(err) + } + return ipnet +} + +func TestRandAddr(t *testing.T) { +outer: + for _, ipnet := range []*net.IPNet{ + mustParseCIDR("127.0.0.1/0"), + mustParseCIDR("127.0.0.1/24"), + mustParseCIDR("127.0.0.55/32"), + mustParseCIDR("2001:db8::1234/0"), + mustParseCIDR("2001:db8::1234/32"), + mustParseCIDR("2001:db8::1234/128"), + // Non-canonical masks (that don't consist of 1s followed by 0s) + // work too, why not. + &net.IPNet{ + IP: net.IP{1, 2, 3, 4}, + Mask: net.IPMask{0x00, 0x07, 0xff, 0xff}, + }, + } { + for i := 0; i < 100; i++ { + ip, err := randIPAddr(ipnet) + if err != nil { + t.Errorf("%v returned error %v", ipnet, err) + continue outer + } + if !ipnet.Contains(ip) { + t.Errorf("%v does not contain %v", ipnet, ip) + continue outer + } + } + } +} + +func TestRandAddrUnequalLengths(t *testing.T) { + for _, ipnet := range []*net.IPNet{ + &net.IPNet{ + IP: net.IP{1, 2, 3, 4}, + Mask: net.CIDRMask(32, 128), + }, + &net.IPNet{ + IP: net.IP{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + Mask: net.CIDRMask(24, 32), + }, + &net.IPNet{ + IP: net.IP{1, 2, 3, 4}, + Mask: net.IPMask{}, + }, + &net.IPNet{ + IP: net.IP{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + Mask: net.IPMask{}, + }, + } { + _, err := randIPAddr(ipnet) + if err == nil { + t.Errorf("%v did not result in error, but should have", ipnet) + } + } +} + +func BenchmarkRandAddr(b *testing.B) { + for _, test := range []struct { + label string + ipnet net.IPNet + }{ + {"IPv4/32", net.IPNet{IP: net.IP{127, 0, 0, 1}, Mask: net.CIDRMask(32, 32)}}, + {"IPv4/24", net.IPNet{IP: net.IP{127, 0, 0, 1}, Mask: net.CIDRMask(32, 32)}}, + {"IPv6/64", net.IPNet{ + IP: net.IP{0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x12, 0x34}, + Mask: net.CIDRMask(64, 128), + }}, + {"IPv6/128", net.IPNet{ + IP: net.IP{0x20, 0x01, 0x0d, 0xb8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x12, 0x34}, + Mask: net.CIDRMask(128, 128), + }}, + } { + b.Run(test.label, func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, err := randIPAddr(&test.ipnet) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +func ipNetEqual(a, b *net.IPNet) bool { + if !a.IP.Equal(b.IP) { + return false + } + // Comparing masks for equality is a little tricky because they may be + // different lengths. For masks in canonical form (those for which + // Size() returns other than (0, 0)), we consider two masks equal if the + // numbers of bits *not* covered by the prefix are equal; e.g. + // (120, 128) is equal to (24, 32), because they both have 8 bits not in + // the prefix. If either mask is not in canonical form, we require them + // to be equal as byte arrays (which includes length). + aOnes, aBits := a.Mask.Size() + bOnes, bBits := b.Mask.Size() + if aBits == 0 || bBits == 0 { + return bytes.Equal(a.Mask, b.Mask) + } else { + return aBits-aOnes == bBits-bOnes + } +} + +func TestParseIPCIDR(t *testing.T) { + // Well-formed inputs. + for _, test := range []struct { + input string + expected *net.IPNet + }{ + {"127.0.0.123", mustParseCIDR("127.0.0.123/32")}, + {"127.0.0.123/0", mustParseCIDR("127.0.0.123/0")}, + {"127.0.0.123/24", mustParseCIDR("127.0.0.123/24")}, + {"127.0.0.123/32", mustParseCIDR("127.0.0.123/32")}, + {"2001:db8::1234", mustParseCIDR("2001:db8::1234/128")}, + {"2001:db8::1234/0", mustParseCIDR("2001:db8::1234/0")}, + {"2001:db8::1234/32", mustParseCIDR("2001:db8::1234/32")}, + {"2001:db8::1234/128", mustParseCIDR("2001:db8::1234/128")}, + } { + ipnet, err := parseIPCIDR(test.input) + if err != nil { + t.Errorf("%q returned error %v", test.input, err) + continue + } + if !ipNetEqual(ipnet, test.expected) { + t.Errorf("%q → %v, expected %v", test.input, ipnet, test.expected) + } + } + + // Bad inputs. + for _, input := range []string{ + "", + "1.2.3", + "1.2.3/16", + "2001:db8:1234", + "2001:db8:1234/64", + "localhost", + } { + _, err := parseIPCIDR(input) + if err == nil { + t.Errorf("%q did not result in error, but should have", input) + } + } +} diff --git a/server/server.go b/server/server.go index cb58efa..188fe6e 100644 --- a/server/server.go +++ b/server/server.go @@ -3,45 +3,33 @@ package main import ( - "crypto/tls" "errors" "flag" "fmt" "io" - "io/ioutil" "log" "net" "net/http" "os" "os/signal" "path/filepath" + "strconv" "strings" "sync" "syscall" - "time" - "git.torproject.org/pluggable-transports/goptlib.git" - "git.torproject.org/pluggable-transports/snowflake.git/common/safelog" - "git.torproject.org/pluggable-transports/websocket.git/websocket" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/ptutil/safelog" + "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/version" "golang.org/x/crypto/acme/autocert" - "golang.org/x/net/http2" + + pt "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/goptlib" + sf "gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/server/lib" ) const ptMethodName = "snowflake" -const requestTimeout = 10 * time.Second - -const maxMessageSize = 64 * 1024 - -// How long to wait for ListenAndServe or ListenAndServeTLS to return an error -// before deciding that it's not going to return. -const listenAndServeErrorTimeout = 100 * time.Millisecond var ptInfo pt.ServerInfo -// When a connection handler starts, +1 is written to this channel; when it -// ends, -1 is written. -var handlerChan = make(chan int) - func usage() { fmt.Fprintf(os.Stderr, `Usage: %s [OPTIONS] @@ -55,76 +43,22 @@ additional HTTP listener on port 80 to work with ACME. flag.PrintDefaults() } -// An abstraction that makes an underlying WebSocket connection look like an -// io.ReadWriteCloser. -type webSocketConn struct { - Ws *websocket.WebSocket - messageBuf []byte -} - -// Implements io.Reader. -func (conn *webSocketConn) Read(b []byte) (n int, err error) { - for len(conn.messageBuf) == 0 { - var m websocket.Message - m, err = conn.Ws.ReadMessage() - if err != nil { - return - } - if m.Opcode == 8 { - err = io.EOF - return - } - if m.Opcode != 2 { - err = errors.New(fmt.Sprintf("got non-binary opcode %d", m.Opcode)) - return - } - conn.messageBuf = m.Payload - } - - n = copy(b, conn.messageBuf) - conn.messageBuf = conn.messageBuf[n:] - - return -} - -// Implements io.Writer. -func (conn *webSocketConn) Write(b []byte) (int, error) { - err := conn.Ws.WriteMessage(2, b) - return len(b), err -} - -// Implements io.Closer. -func (conn *webSocketConn) Close() error { - // Ignore any error in trying to write a Close frame. - _ = conn.Ws.WriteFrame(8, nil) - return conn.Ws.Conn.Close() -} - -// Create a new webSocketConn. -func newWebSocketConn(ws *websocket.WebSocket) webSocketConn { - var conn webSocketConn - conn.Ws = ws - return conn -} - -// Copy from WebSocket to socket and vice versa. -func proxy(local *net.TCPConn, conn *webSocketConn) { +// proxy copies data bidirectionally from one connection to another. +func proxy(local *net.TCPConn, conn net.Conn) { var wg sync.WaitGroup wg.Add(2) go func() { - _, err := io.Copy(conn, local) - if err != nil { - log.Printf("error copying ORPort to WebSocket") + if _, err := io.Copy(conn, local); err != nil && !errors.Is(err, io.ErrClosedPipe) { + log.Printf("error copying ORPort to WebSocket %v", err) } local.CloseRead() conn.Close() wg.Done() }() go func() { - _, err := io.Copy(local, conn) - if err != nil { - log.Printf("error copying WebSocket to ORPort") + if _, err := io.Copy(local, conn); err != nil && !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrClosedPipe) { + log.Printf("error copying WebSocket to ORPort %v", err) } local.CloseWrite() conn.Close() @@ -134,123 +68,55 @@ func proxy(local *net.TCPConn, conn *webSocketConn) { wg.Wait() } -// Return an address string suitable to pass into pt.DialOr. -func clientAddr(clientIPParam string) string { - if clientIPParam == "" { - return "" +// handleConn bidirectionally connects a client snowflake connection with the +// ORPort. If orPortSrcAddr is not nil, addresses from the given range are used +// when dialing the ORPOrt. +func handleConn(conn net.Conn, orPortSrcAddr *net.IPNet) error { + addr := conn.RemoteAddr().String() + statsChannel <- addr != "" + + dialer := net.Dialer{ + Control: dialerControl, } - // Check if client addr is a valid IP - clientIP := net.ParseIP(clientIPParam) - if clientIP == nil { - return "" + if orPortSrcAddr != nil { + // Use a random source IP address in the given range. + ip, err := randIPAddr(orPortSrcAddr) + if err != nil { + return err + } + dialer.LocalAddr = &net.TCPAddr{IP: ip} } - // Add a dummy port number. USERADDR requires a port number. - return (&net.TCPAddr{IP: clientIP, Port: 1, Zone: ""}).String() -} - -func webSocketHandler(ws *websocket.WebSocket) { - // Undo timeouts on HTTP request handling. - ws.Conn.SetDeadline(time.Time{}) - conn := newWebSocketConn(ws) - defer conn.Close() - - handlerChan <- 1 - defer func() { - handlerChan <- -1 - }() - - // Pass the address of client as the remote address of incoming connection - clientIPParam := ws.Request().URL.Query().Get("client_ip") - addr := clientAddr(clientIPParam) - if addr == "" { - statsChannel <- false - } else { - statsChannel <- true - } - or, err := pt.DialOr(&ptInfo, addr, ptMethodName) - + or, err := pt.DialOrWithDialer(&dialer, &ptInfo, addr, ptMethodName) if err != nil { - log.Printf("failed to connect to ORPort: %s", err) - return + return fmt.Errorf("failed to connect to ORPort: %s", err) } defer or.Close() - proxy(or, &conn) + proxy(or.(*net.TCPConn), conn) + return nil } -func initServer(addr *net.TCPAddr, - getCertificate func(*tls.ClientHelloInfo) (*tls.Certificate, error), - listenAndServe func(*http.Server, chan<- error)) (*http.Server, error) { - // We're not capable of listening on port 0 (i.e., an ephemeral port - // unknown in advance). The reason is that while the net/http package - // exposes ListenAndServe and ListenAndServeTLS, those functions never - // return, so there's no opportunity to find out what the port number - // is, in between the Listen and Serve steps. - // https://groups.google.com/d/msg/Golang-nuts/3F1VRCCENp8/3hcayZiwYM8J - if addr.Port == 0 { - return nil, fmt.Errorf("cannot listen on port %d; configure a port using ServerTransportListenAddr", addr.Port) - } - - var config websocket.Config - config.MaxMessageSize = maxMessageSize - server := &http.Server{ - Addr: addr.String(), - Handler: config.Handler(webSocketHandler), - ReadTimeout: requestTimeout, - } - // We need to override server.TLSConfig.GetCertificate--but first - // server.TLSConfig needs to be non-nil. If we just create our own new - // &tls.Config, it will lack the default settings that the net/http - // package sets up for things like HTTP/2. Therefore we first call - // http2.ConfigureServer for its side effect of initializing - // server.TLSConfig properly. An alternative would be to make a dummy - // net.Listener, call Serve on it, and let it return. - // https://github.com/golang/go/issues/16588#issuecomment-237386446 - err := http2.ConfigureServer(server, nil) - if err != nil { - return server, err - } - server.TLSConfig.GetCertificate = getCertificate - - // Another unfortunate effect of the inseparable net/http ListenAndServe - // is that we can't check for Listen errors like "permission denied" and - // "address already in use" without potentially entering the infinite - // loop of Serve. The hack we apply here is to wait a short time, - // listenAndServeErrorTimeout, to see if an error is returned (because - // it's better if the error message goes to the tor log through - // SMETHOD-ERROR than if it only goes to the snowflake log). - errChan := make(chan error) - go listenAndServe(server, errChan) - select { - case err = <-errChan: - break - case <-time.After(listenAndServeErrorTimeout): - break - } - - return server, err -} - -func startServer(addr *net.TCPAddr) (*http.Server, error) { - return initServer(addr, nil, func(server *http.Server, errChan chan<- error) { - log.Printf("listening with plain HTTP on %s", addr) - err := server.ListenAndServe() +// acceptLoop accepts incoming client snowflake connections and passes them to +// handleConn. If orPortSrcAddr is not nil, addresses from the given range are +// used when dialing the ORPOrt. +func acceptLoop(ln net.Listener, orPortSrcAddr *net.IPNet) { + for { + conn, err := ln.Accept() if err != nil { - log.Printf("error in ListenAndServe: %s", err) + if err, ok := err.(net.Error); ok && err.Temporary() { + continue + } + log.Printf("Snowflake accept error: %s", err) + break } - errChan <- err - }) -} - -func startServerTLS(addr *net.TCPAddr, getCertificate func(*tls.ClientHelloInfo) (*tls.Certificate, error)) (*http.Server, error) { - return initServer(addr, getCertificate, func(server *http.Server, errChan chan<- error) { - log.Printf("listening with HTTPS on %s", addr) - err := server.ListenAndServeTLS("", "") - if err != nil { - log.Printf("error in ListenAndServeTLS: %s", err) - } - errChan <- err - }) + go func() { + defer conn.Close() + err := handleConn(conn, orPortSrcAddr) + if err != nil { + log.Printf("handleConn: %v", err) + } + }() + } } func getCertificateCacheDir() (string, error) { @@ -266,14 +132,23 @@ func main() { var acmeHostnamesCommas string var disableTLS bool var logFilename string + var unsafeLogging bool + var versionFlag bool flag.Usage = usage flag.StringVar(&acmeEmail, "acme-email", "", "optional contact email for Let's Encrypt notifications") flag.StringVar(&acmeHostnamesCommas, "acme-hostnames", "", "comma-separated hostnames for TLS certificate") flag.BoolVar(&disableTLS, "disable-tls", false, "don't use HTTPS") flag.StringVar(&logFilename, "log", "", "log file to write to") + flag.BoolVar(&unsafeLogging, "unsafe-logging", false, "prevent logs from being scrubbed") + flag.BoolVar(&versionFlag, "version", false, "display version info to stderr and quit") flag.Parse() + if versionFlag { + fmt.Fprintf(os.Stderr, "snowflake-server %s", version.ConstructResult()) + os.Exit(0) + } + log.SetFlags(log.LstdFlags | log.LUTC) var logOutput io.Writer = os.Stderr @@ -285,8 +160,14 @@ func main() { defer f.Close() logOutput = f } - //We want to send the log output through our scrubber first - log.SetOutput(&safelog.LogScrubber{Output: logOutput}) + if unsafeLogging { + log.SetOutput(logOutput) + } else { + // We want to send the log output through our scrubber first + log.SetOutput(&safelog.LogScrubber{Output: logOutput}) + } + + log.Printf("snowflake-server %s\n", version.GetVersion()) if !disableTLS && acmeHostnamesCommas == "" { log.Fatal("the --acme-hostnames option is required") @@ -299,6 +180,7 @@ func main() { if err != nil { log.Fatalf("error in setup: %s", err) } + pt.ReportVersion("snowflake-server", version.GetVersion()) go statsThread() @@ -329,7 +211,7 @@ func main() { // https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http-challenge needHTTP01Listener := !disableTLS - servers := make([]*http.Server, 0) + listeners := make([]net.Listener, 0) for _, bindaddr := range ptInfo.Bindaddrs { if bindaddr.MethodName != ptMethodName { pt.SmethodError(bindaddr.MethodName, "no such method") @@ -340,6 +222,7 @@ func main() { addr := *bindaddr.Addr addr.Port = 80 log.Printf("Starting HTTP-01 ACME listener") + var lnHTTP01 *net.TCPListener lnHTTP01, err := net.ListenTCP("tcp", &addr) if err != nil { log.Printf("error opening HTTP-01 ACME listener: %s", err) @@ -353,34 +236,82 @@ func main() { go func() { log.Fatal(server.Serve(lnHTTP01)) }() - servers = append(servers, server) + listeners = append(listeners, lnHTTP01) needHTTP01Listener = false } - var server *http.Server + // We're not capable of listening on port 0 (i.e., an ephemeral port + // unknown in advance). The reason is that while the net/http package + // exposes ListenAndServe and ListenAndServeTLS, those functions never + // return, so there's no opportunity to find out what the port number + // is, in between the Listen and Serve steps. + // https://groups.google.com/d/msg/Golang-nuts/3F1VRCCENp8/3hcayZiwYM8J + if bindaddr.Addr.Port == 0 { + err := fmt.Errorf( + "cannot listen on port %d; configure a port using ServerTransportListenAddr", + bindaddr.Addr.Port) + log.Printf("error opening listener: %s", err) + pt.SmethodError(bindaddr.MethodName, err.Error()) + continue + } + + var transport *sf.Transport args := pt.Args{} if disableTLS { args.Add("tls", "no") - server, err = startServer(bindaddr.Addr) + transport = sf.NewSnowflakeServer(nil) } else { args.Add("tls", "yes") for _, hostname := range acmeHostnames { args.Add("hostname", hostname) } - server, err = startServerTLS(bindaddr.Addr, certManager.GetCertificate) + transport = sf.NewSnowflakeServer(certManager.GetCertificate) } + + // Are we requested to use source addresses from a particular + // range when dialing the ORPort for this transport? + var orPortSrcAddr *net.IPNet + if orPortSrcAddrCIDR, ok := bindaddr.Options.Get("orport-srcaddr"); ok { + ipnet, err := parseIPCIDR(orPortSrcAddrCIDR) + if err != nil { + err = fmt.Errorf("parsing srcaddr: %w", err) + log.Println(err) + pt.SmethodError(bindaddr.MethodName, err.Error()) + continue + } + orPortSrcAddr = ipnet + } + + numKCPInstances := 1 + // Are we requested to run a certain number of KCP state + // machines? + if value, ok := bindaddr.Options.Get("num-turbotunnel"); ok { + n, err := strconv.Atoi(value) + if err == nil && n < 1 { + err = fmt.Errorf("cannot be less than 1") + } + if err != nil { + err = fmt.Errorf("parsing num-turbotunnel: %w", err) + log.Println(err) + pt.SmethodError(bindaddr.MethodName, err.Error()) + continue + } + numKCPInstances = n + } + + ln, err := transport.Listen(bindaddr.Addr, numKCPInstances) if err != nil { log.Printf("error opening listener: %s", err) pt.SmethodError(bindaddr.MethodName, err.Error()) continue } + defer ln.Close() + go acceptLoop(ln, orPortSrcAddr) pt.SmethodArgs(bindaddr.MethodName, bindaddr.Addr, args) - servers = append(servers, server) + listeners = append(listeners, ln) } pt.SmethodsDone() - var numHandlers int = 0 - var sig os.Signal sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGTERM) @@ -388,28 +319,20 @@ func main() { // This environment variable means we should treat EOF on stdin // just like SIGTERM: https://bugs.torproject.org/15435. go func() { - io.Copy(ioutil.Discard, os.Stdin) + if _, err := io.Copy(io.Discard, os.Stdin); err != nil { + log.Printf("error copying os.Stdin to io.Discard: %v", err) + } log.Printf("synthesizing SIGTERM because of stdin close") sigChan <- syscall.SIGTERM }() } - // keep track of handlers and wait for a signal - sig = nil - for sig == nil { - select { - case n := <-handlerChan: - numHandlers += n - case sig = <-sigChan: - } - } + // Wait for a signal. + sig := <-sigChan - // signal received, shut down + // Signal received, shut down. log.Printf("caught signal %q, exiting", sig) - for _, server := range servers { - server.Close() - } - for numHandlers > 0 { - numHandlers += <-handlerChan + for _, ln := range listeners { + ln.Close() } } diff --git a/server/server_test.go b/server/server_test.go deleted file mode 100644 index 84ac7ba..0000000 --- a/server/server_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package main - -import ( - "net" - "strconv" - "testing" -) - -func TestClientAddr(t *testing.T) { - // good tests - for _, test := range []struct { - input string - expected net.IP - }{ - {"1.2.3.4", net.ParseIP("1.2.3.4")}, - {"1:2::3:4", net.ParseIP("1:2::3:4")}, - } { - useraddr := clientAddr(test.input) - host, port, err := net.SplitHostPort(useraddr) - if err != nil { - t.Errorf("clientAddr(%q) → SplitHostPort error %v", test.input, err) - continue - } - if !test.expected.Equal(net.ParseIP(host)) { - t.Errorf("clientAddr(%q) → host %q, not %v", test.input, host, test.expected) - } - portNo, err := strconv.Atoi(port) - if err != nil { - t.Errorf("clientAddr(%q) → port %q", test.input, port) - continue - } - if portNo == 0 { - t.Errorf("clientAddr(%q) → port %d", test.input, portNo) - } - } - - // bad tests - for _, input := range []string{ - "", - "abc", - "1.2.3.4.5", - "[12::34]", - } { - useraddr := clientAddr(input) - if useraddr != "" { - t.Errorf("clientAddr(%q) → %q, not %q", input, useraddr, "") - } - } -} diff --git a/server/stats.go b/server/stats.go index 204e587..80e9e49 100644 --- a/server/stats.go +++ b/server/stats.go @@ -1,6 +1,6 @@ package main -// This code handled periodic statistics logging. +// This code handles periodic statistics logging. // // The only thing it keeps track of is how many connections had the client_ip // parameter. Write true to statsChannel to record a connection with client_ip; @@ -27,9 +27,9 @@ func statsThread() { select { case v := <-statsChannel: if v { - numClientIP += 1 + numClientIP++ } - numConnections += 1 + numConnections++ case <-deadline: now := time.Now() log.Printf("in the past %.f s, %d/%d connections had client_ip",