Remove proxy churn measurements from broker.

We've done the analysis we planned to do on these measurements.

A program to analyze the proxy churn and extract hour-by-hour
intersections is available at:
https://github.com/turfed/snowflake-paper/tree/main/figures/proxy-churn

Closes #40280.
This commit is contained in:
David Fifield 2023-09-05 05:42:15 +00:00 committed by Shelikhoo
parent a615e8b1ab
commit 6393af6bab
No known key found for this signature in database
GPG key ID: C4D5E79D22B25316
12 changed files with 0 additions and 362 deletions

View file

@ -1,55 +0,0 @@
package ipsetsink
import (
"bytes"
"crypto/hmac"
"encoding/binary"
"hash"
"github.com/clarkduvall/hyperloglog"
"golang.org/x/crypto/sha3"
)
func NewIPSetSink(maskingKey string) *IPSetSink {
countDistinct, _ := hyperloglog.NewPlus(18)
return &IPSetSink{
ipMaskingKey: maskingKey,
countDistinct: countDistinct,
}
}
type IPSetSink struct {
ipMaskingKey string
countDistinct *hyperloglog.HyperLogLogPlus
}
func (s *IPSetSink) maskIPAddress(ipAddress string) []byte {
hmacIPMasker := hmac.New(func() hash.Hash {
return sha3.New256()
}, []byte(s.ipMaskingKey))
hmacIPMasker.Write([]byte(ipAddress))
return hmacIPMasker.Sum(nil)
}
func (s *IPSetSink) AddIPToSet(ipAddress string) {
s.countDistinct.Add(truncatedHash64FromBytes{hashValue(s.maskIPAddress(ipAddress))})
}
func (s *IPSetSink) Dump() ([]byte, error) {
return s.countDistinct.GobEncode()
}
func (s *IPSetSink) Reset() {
s.countDistinct.Clear()
}
type hashValue []byte
type truncatedHash64FromBytes struct {
hashValue
}
func (c truncatedHash64FromBytes) Sum64() uint64 {
var value uint64
binary.Read(bytes.NewReader(c.hashValue), binary.BigEndian, &value)
return value
}

View file

@ -1,47 +0,0 @@
package ipsetsink
import (
"fmt"
"github.com/clarkduvall/hyperloglog"
"testing"
)
import . "github.com/smartystreets/goconvey/convey"
func TestSinkInit(t *testing.T) {
Convey("Context", t, func() {
sink := NewIPSetSink("demo")
sink.AddIPToSet("test1")
sink.AddIPToSet("test2")
data, err := sink.Dump()
So(err, ShouldBeNil)
structure, err := hyperloglog.NewPlus(18)
So(err, ShouldBeNil)
err = structure.GobDecode(data)
So(err, ShouldBeNil)
count := structure.Count()
So(count, ShouldBeBetweenOrEqual, 1, 3)
})
}
func TestSinkCounting(t *testing.T) {
Convey("Context", t, func() {
for itemCount := 300; itemCount <= 10000; itemCount += 200 {
sink := NewIPSetSink("demo")
for i := 0; i <= itemCount; i++ {
sink.AddIPToSet(fmt.Sprintf("demo%v", i))
}
for i := 0; i <= itemCount; i++ {
sink.AddIPToSet(fmt.Sprintf("demo%v", i))
}
data, err := sink.Dump()
So(err, ShouldBeNil)
structure, err := hyperloglog.NewPlus(18)
So(err, ShouldBeNil)
err = structure.GobDecode(data)
So(err, ShouldBeNil)
count := structure.Count()
So((float64(count)/float64(itemCount))-1.0, ShouldAlmostEqual, 0, 0.01)
}
})
}

View file

@ -1,24 +0,0 @@
package sinkcluster
/* ClusterWriter, and (ClusterCountResult).Count output a streamed IP set journal file to remember distinct IP address
its format is as follows:
This file should be in newline-delimited JSON format(https://jsonlines.org/).
For each line, the format of json data should be in the format of:
{"recordingStart":"2022-05-30T14:38:44.678610091Z","recordingEnd":"2022-05-30T14:39:48.157630926Z","recorded":""}
recordingStart:datetime is the time this chunk of recording start.
recordingEnd:datetime is the time this chunk of recording end.
recorded is the checkpoint data generated by hyperloglog.
*/
import "time"
type SinkEntry struct {
RecordingStart time.Time `json:"recordingStart"`
RecordingEnd time.Time `json:"recordingEnd"`
Recorded []byte `json:"recorded"`
}

View file

@ -1,64 +0,0 @@
package sinkcluster
import (
"bufio"
"encoding/json"
"github.com/clarkduvall/hyperloglog"
"io"
"time"
)
func NewClusterCounter(from time.Time, to time.Time) *ClusterCounter {
return &ClusterCounter{from: from, to: to}
}
type ClusterCounter struct {
from time.Time
to time.Time
}
type ClusterCountResult struct {
Sum uint64
ChunkIncluded int64
}
func (c ClusterCounter) Count(reader io.Reader) (*ClusterCountResult, error) {
result := ClusterCountResult{}
counter, err := hyperloglog.NewPlus(18)
if err != nil {
return nil, err
}
inputScanner := bufio.NewScanner(reader)
for inputScanner.Scan() {
inputLine := inputScanner.Bytes()
sinkInfo := SinkEntry{}
if err := json.Unmarshal(inputLine, &sinkInfo); err != nil {
return nil, err
}
if (sinkInfo.RecordingStart.Before(c.from) && !sinkInfo.RecordingStart.Equal(c.from)) ||
sinkInfo.RecordingEnd.After(c.to) {
continue
}
restoredCounter, err := hyperloglog.NewPlus(18)
if err != nil {
return nil, err
}
err = restoredCounter.GobDecode(sinkInfo.Recorded)
if err != nil {
return nil, err
}
result.ChunkIncluded++
err = counter.Merge(restoredCounter)
if err != nil {
return nil, err
}
}
err = inputScanner.Err()
if err != nil {
return nil, err
}
result.Sum = counter.Count()
return &result, nil
}

View file

@ -1,68 +0,0 @@
package sinkcluster
import (
"bytes"
"encoding/json"
"io"
"log"
"time"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/ipsetsink"
)
func NewClusterWriter(writer WriteSyncer, writeInterval time.Duration, sink *ipsetsink.IPSetSink) *ClusterWriter {
c := &ClusterWriter{
writer: writer,
lastWriteTime: time.Now(),
writeInterval: writeInterval,
current: sink,
}
return c
}
type ClusterWriter struct {
writer WriteSyncer
lastWriteTime time.Time
writeInterval time.Duration
current *ipsetsink.IPSetSink
}
type WriteSyncer interface {
Sync() error
io.Writer
}
func (c *ClusterWriter) WriteIPSetToDisk() {
currentTime := time.Now()
data, err := c.current.Dump()
if err != nil {
log.Println("unable able to write ipset to file:", err)
return
}
entry := &SinkEntry{
RecordingStart: c.lastWriteTime,
RecordingEnd: currentTime,
Recorded: data,
}
jsonData, err := json.Marshal(entry)
if err != nil {
log.Println("unable able to write ipset to file:", err)
return
}
jsonData = append(jsonData, byte('\n'))
_, err = io.Copy(c.writer, bytes.NewReader(jsonData))
if err != nil {
log.Println("unable able to write ipset to file:", err)
return
}
c.writer.Sync()
c.lastWriteTime = currentTime
c.current.Reset()
}
func (c *ClusterWriter) AddIPToSet(ipAddress string) {
if c.lastWriteTime.Add(c.writeInterval).Before(time.Now()) {
c.WriteIPSetToDisk()
}
c.current.AddIPToSet(ipAddress)
}

View file

@ -1,33 +0,0 @@
package sinkcluster
import (
"bytes"
"io"
"testing"
"time"
"gitlab.torproject.org/tpo/anti-censorship/pluggable-transports/snowflake/v2/common/ipsetsink"
. "github.com/smartystreets/goconvey/convey"
)
type writerStub struct {
io.Writer
}
func (w writerStub) Sync() error {
return nil
}
func TestSinkWriter(t *testing.T) {
Convey("Context", t, func() {
buffer := bytes.NewBuffer(nil)
writerStubInst := &writerStub{buffer}
sink := ipsetsink.NewIPSetSink("demo")
clusterWriter := NewClusterWriter(writerStubInst, time.Minute, sink)
clusterWriter.AddIPToSet("1")
clusterWriter.WriteIPSetToDisk()
So(buffer.Bytes(), ShouldNotBeNil)
})
}