From 518621220d0f4dd9aea2daacafeb5785e84b2f93 Mon Sep 17 00:00:00 2001 From: Edwin Buck Date: Thu, 17 Oct 2024 12:54:56 -0500 Subject: [PATCH 1/2] Implement cache update deduplication per fetch cycle (#5509) Signed-off-by: Edwin Buck --- .../telemetry/server/datastore/event.go | 16 +- .../telemetry/server/datastore/wrapper.go | 24 +- .../server/datastore/wrapper_test.go | 20 +- pkg/server/authorizedentries/cache.go | 6 +- pkg/server/authorizedentries/cache_test.go | 16 +- pkg/server/datastore/datastore.go | 16 +- pkg/server/datastore/sqlstore/sqlstore.go | 36 +- .../datastore/sqlstore/sqlstore_test.go | 40 +- .../endpoints/authorized_entryfetcher.go | 18 +- .../authorized_entryfetcher_attested_nodes.go | 345 +-- ...orized_entryfetcher_attested_nodes_test.go | 1583 ++++++++++++- ...rized_entryfetcher_registration_entries.go | 361 +-- ..._entryfetcher_registration_entries_test.go | 1951 ++++++++++++++++- .../endpoints/authorized_entryfetcher_test.go | 64 +- pkg/server/endpoints/entryfetcher.go | 6 +- pkg/server/endpoints/eventTracker.go | 79 + pkg/server/endpoints/eventTracker_test.go | 247 +++ pkg/server/endpoints/middleware_test.go | 1 - test/fakes/fakedatastore/fakedatastore.go | 16 +- 19 files changed, 4181 insertions(+), 664 deletions(-) create mode 100644 pkg/server/endpoints/eventTracker.go create mode 100644 pkg/server/endpoints/eventTracker_test.go diff --git a/pkg/common/telemetry/server/datastore/event.go b/pkg/common/telemetry/server/datastore/event.go index b1fdc59913..b331ee3c3d 100644 --- a/pkg/common/telemetry/server/datastore/event.go +++ b/pkg/common/telemetry/server/datastore/event.go @@ -4,15 +4,15 @@ import ( "github.com/spiffe/spire/pkg/common/telemetry" ) -// StartListRegistrationEntriesEventsCall return metric +// StartListRegistrationEntryEventsCall return metric // for server's datastore, on listing registration entry events. -func StartListRegistrationEntriesEventsCall(m telemetry.Metrics) *telemetry.CallCounter { +func StartListRegistrationEntryEventsCall(m telemetry.Metrics) *telemetry.CallCounter { return telemetry.StartCall(m, telemetry.Datastore, telemetry.RegistrationEntryEvent, telemetry.List) } -// StartPruneRegistrationEntriesEventsCall return metric +// StartPruneRegistrationEntryEventsCall return metric // for server's datastore, on pruning registration entry events. -func StartPruneRegistrationEntriesEventsCall(m telemetry.Metrics) *telemetry.CallCounter { +func StartPruneRegistrationEntryEventsCall(m telemetry.Metrics) *telemetry.CallCounter { return telemetry.StartCall(m, telemetry.Datastore, telemetry.RegistrationEntryEvent, telemetry.Prune) } @@ -34,15 +34,15 @@ func StartFetchRegistrationEntryEventCall(m telemetry.Metrics) *telemetry.CallCo return telemetry.StartCall(m, telemetry.Datastore, telemetry.RegistrationEntryEvent, telemetry.Fetch) } -// StartListAttestedNodesEventsCall return metric +// StartListAttestedNodeEventsCall return metric // for server's datastore, on listing attested node events. -func StartListAttestedNodesEventsCall(m telemetry.Metrics) *telemetry.CallCounter { +func StartListAttestedNodeEventsCall(m telemetry.Metrics) *telemetry.CallCounter { return telemetry.StartCall(m, telemetry.Datastore, telemetry.NodeEvent, telemetry.List) } -// StartPruneAttestedNodesEventsCall return metric +// StartPruneAttestedNodeEventsCall return metric // for server's datastore, on pruning attested node events. -func StartPruneAttestedNodesEventsCall(m telemetry.Metrics) *telemetry.CallCounter { +func StartPruneAttestedNodeEventsCall(m telemetry.Metrics) *telemetry.CallCounter { return telemetry.StartCall(m, telemetry.Datastore, telemetry.NodeEvent, telemetry.Prune) } diff --git a/pkg/common/telemetry/server/datastore/wrapper.go b/pkg/common/telemetry/server/datastore/wrapper.go index 96f84bd0b0..d9f06e218b 100644 --- a/pkg/common/telemetry/server/datastore/wrapper.go +++ b/pkg/common/telemetry/server/datastore/wrapper.go @@ -179,10 +179,10 @@ func (w metricsWrapper) ListAttestedNodes(ctx context.Context, req *datastore.Li return w.ds.ListAttestedNodes(ctx, req) } -func (w metricsWrapper) ListAttestedNodesEvents(ctx context.Context, req *datastore.ListAttestedNodesEventsRequest) (_ *datastore.ListAttestedNodesEventsResponse, err error) { - callCounter := StartListAttestedNodesEventsCall(w.m) +func (w metricsWrapper) ListAttestedNodeEvents(ctx context.Context, req *datastore.ListAttestedNodeEventsRequest) (_ *datastore.ListAttestedNodeEventsResponse, err error) { + callCounter := StartListAttestedNodeEventsCall(w.m) defer callCounter.Done(&err) - return w.ds.ListAttestedNodesEvents(ctx, req) + return w.ds.ListAttestedNodeEvents(ctx, req) } func (w metricsWrapper) ListBundles(ctx context.Context, req *datastore.ListBundlesRequest) (_ *datastore.ListBundlesResponse, err error) { @@ -203,10 +203,10 @@ func (w metricsWrapper) ListRegistrationEntries(ctx context.Context, req *datast return w.ds.ListRegistrationEntries(ctx, req) } -func (w metricsWrapper) ListRegistrationEntriesEvents(ctx context.Context, req *datastore.ListRegistrationEntriesEventsRequest) (_ *datastore.ListRegistrationEntriesEventsResponse, err error) { - callCounter := StartListRegistrationEntriesEventsCall(w.m) +func (w metricsWrapper) ListRegistrationEntryEvents(ctx context.Context, req *datastore.ListRegistrationEntryEventsRequest) (_ *datastore.ListRegistrationEntryEventsResponse, err error) { + callCounter := StartListRegistrationEntryEventsCall(w.m) defer callCounter.Done(&err) - return w.ds.ListRegistrationEntriesEvents(ctx, req) + return w.ds.ListRegistrationEntryEvents(ctx, req) } func (w metricsWrapper) CountAttestedNodes(ctx context.Context, req *datastore.CountAttestedNodesRequest) (_ int32, err error) { @@ -227,10 +227,10 @@ func (w metricsWrapper) CountRegistrationEntries(ctx context.Context, req *datas return w.ds.CountRegistrationEntries(ctx, req) } -func (w metricsWrapper) PruneAttestedNodesEvents(ctx context.Context, olderThan time.Duration) (err error) { - callCounter := StartPruneAttestedNodesEventsCall(w.m) +func (w metricsWrapper) PruneAttestedNodeEvents(ctx context.Context, olderThan time.Duration) (err error) { + callCounter := StartPruneAttestedNodeEventsCall(w.m) defer callCounter.Done(&err) - return w.ds.PruneAttestedNodesEvents(ctx, olderThan) + return w.ds.PruneAttestedNodeEvents(ctx, olderThan) } func (w metricsWrapper) PruneBundle(ctx context.Context, trustDomainID string, expiresBefore time.Time) (_ bool, err error) { @@ -251,10 +251,10 @@ func (w metricsWrapper) PruneRegistrationEntries(ctx context.Context, expiresBef return w.ds.PruneRegistrationEntries(ctx, expiresBefore) } -func (w metricsWrapper) PruneRegistrationEntriesEvents(ctx context.Context, olderThan time.Duration) (err error) { - callCounter := StartPruneRegistrationEntriesEventsCall(w.m) +func (w metricsWrapper) PruneRegistrationEntryEvents(ctx context.Context, olderThan time.Duration) (err error) { + callCounter := StartPruneRegistrationEntryEventsCall(w.m) defer callCounter.Done(&err) - return w.ds.PruneRegistrationEntriesEvents(ctx, olderThan) + return w.ds.PruneRegistrationEntryEvents(ctx, olderThan) } func (w metricsWrapper) SetBundle(ctx context.Context, bundle *common.Bundle) (_ *common.Bundle, err error) { diff --git a/pkg/common/telemetry/server/datastore/wrapper_test.go b/pkg/common/telemetry/server/datastore/wrapper_test.go index a0b44885a2..79c1a87f8e 100644 --- a/pkg/common/telemetry/server/datastore/wrapper_test.go +++ b/pkg/common/telemetry/server/datastore/wrapper_test.go @@ -151,7 +151,7 @@ func TestWithMetrics(t *testing.T) { }, { key: "datastore.node_event.list", - methodName: "ListAttestedNodesEvents", + methodName: "ListAttestedNodeEvents", }, { key: "datastore.bundle.list", @@ -167,7 +167,7 @@ func TestWithMetrics(t *testing.T) { }, { key: "datastore.registration_entry_event.list", - methodName: "ListRegistrationEntriesEvents", + methodName: "ListRegistrationEntryEvents", }, { key: "datastore.federation_relationship.list", @@ -175,7 +175,7 @@ func TestWithMetrics(t *testing.T) { }, { key: "datastore.node_event.prune", - methodName: "PruneAttestedNodesEvents", + methodName: "PruneAttestedNodeEvents", }, { key: "datastore.bundle.prune", @@ -191,7 +191,7 @@ func TestWithMetrics(t *testing.T) { }, { key: "datastore.registration_entry_event.prune", - methodName: "PruneRegistrationEntriesEvents", + methodName: "PruneRegistrationEntryEvents", }, { key: "datastore.bundle.set", @@ -445,8 +445,8 @@ func (ds *fakeDataStore) ListAttestedNodes(context.Context, *datastore.ListAttes return &datastore.ListAttestedNodesResponse{}, ds.err } -func (ds *fakeDataStore) ListAttestedNodesEvents(context.Context, *datastore.ListAttestedNodesEventsRequest) (*datastore.ListAttestedNodesEventsResponse, error) { - return &datastore.ListAttestedNodesEventsResponse{}, ds.err +func (ds *fakeDataStore) ListAttestedNodeEvents(context.Context, *datastore.ListAttestedNodeEventsRequest) (*datastore.ListAttestedNodeEventsResponse, error) { + return &datastore.ListAttestedNodeEventsResponse{}, ds.err } func (ds *fakeDataStore) ListBundles(context.Context, *datastore.ListBundlesRequest) (*datastore.ListBundlesResponse, error) { @@ -461,11 +461,11 @@ func (ds *fakeDataStore) ListRegistrationEntries(context.Context, *datastore.Lis return &datastore.ListRegistrationEntriesResponse{}, ds.err } -func (ds *fakeDataStore) ListRegistrationEntriesEvents(context.Context, *datastore.ListRegistrationEntriesEventsRequest) (*datastore.ListRegistrationEntriesEventsResponse, error) { - return &datastore.ListRegistrationEntriesEventsResponse{}, ds.err +func (ds *fakeDataStore) ListRegistrationEntryEvents(context.Context, *datastore.ListRegistrationEntryEventsRequest) (*datastore.ListRegistrationEntryEventsResponse, error) { + return &datastore.ListRegistrationEntryEventsResponse{}, ds.err } -func (ds *fakeDataStore) PruneAttestedNodesEvents(context.Context, time.Duration) error { +func (ds *fakeDataStore) PruneAttestedNodeEvents(context.Context, time.Duration) error { return ds.err } @@ -481,7 +481,7 @@ func (ds *fakeDataStore) PruneRegistrationEntries(context.Context, time.Time) er return ds.err } -func (ds *fakeDataStore) PruneRegistrationEntriesEvents(context.Context, time.Duration) error { +func (ds *fakeDataStore) PruneRegistrationEntryEvents(context.Context, time.Duration) error { return ds.err } diff --git a/pkg/server/authorizedentries/cache.go b/pkg/server/authorizedentries/cache.go index 77e2d7aaf5..bbf3c464cb 100644 --- a/pkg/server/authorizedentries/cache.go +++ b/pkg/server/authorizedentries/cache.go @@ -267,8 +267,8 @@ func (c *Cache) removeEntry(entryID string) { } } -func (c *Cache) Stats() cacheStats { - return cacheStats{ +func (c *Cache) Stats() CacheStats { + return CacheStats{ AgentsByID: c.agentsByID.Len(), AgentsByExpiresAt: c.agentsByExpiresAt.Len(), AliasesByEntryID: c.aliasesByEntryID.Len(), @@ -286,7 +286,7 @@ func isNodeAlias(e *types.Entry) bool { return e.ParentId.Path == idutil.ServerIDPath } -type cacheStats struct { +type CacheStats struct { AgentsByID int AgentsByExpiresAt int AliasesByEntryID int diff --git a/pkg/server/authorizedentries/cache_test.go b/pkg/server/authorizedentries/cache_test.go index 86315bece5..f16c9d8b08 100644 --- a/pkg/server/authorizedentries/cache_test.go +++ b/pkg/server/authorizedentries/cache_test.go @@ -186,19 +186,19 @@ func TestCacheInternalStats(t *testing.T) { cache := NewCache(clk) cache.UpdateEntry(entry1) - require.Equal(t, cacheStats{ + require.Equal(t, CacheStats{ EntriesByEntryID: 1, EntriesByParentID: 1, }, cache.Stats()) cache.UpdateEntry(entry2a) - require.Equal(t, cacheStats{ + require.Equal(t, CacheStats{ EntriesByEntryID: 2, EntriesByParentID: 2, }, cache.Stats()) cache.UpdateEntry(entry2b) - require.Equal(t, cacheStats{ + require.Equal(t, CacheStats{ EntriesByEntryID: 1, EntriesByParentID: 1, AliasesByEntryID: 2, // one for each selector @@ -206,7 +206,7 @@ func TestCacheInternalStats(t *testing.T) { }, cache.Stats()) cache.RemoveEntry(entry1.Id) - require.Equal(t, cacheStats{ + require.Equal(t, CacheStats{ AliasesByEntryID: 2, // one for each selector AliasesBySelector: 2, // one for each selector }, cache.Stats()) @@ -222,25 +222,25 @@ func TestCacheInternalStats(t *testing.T) { t.Run("agents", func(t *testing.T) { cache := NewCache(clk) cache.UpdateAgent(agent1.String(), now.Add(time.Hour), []*types.Selector{sel1}) - require.Equal(t, cacheStats{ + require.Equal(t, CacheStats{ AgentsByID: 1, AgentsByExpiresAt: 1, }, cache.Stats()) cache.UpdateAgent(agent2.String(), now.Add(time.Hour*2), []*types.Selector{sel2}) - require.Equal(t, cacheStats{ + require.Equal(t, CacheStats{ AgentsByID: 2, AgentsByExpiresAt: 2, }, cache.Stats()) cache.UpdateAgent(agent2.String(), now.Add(time.Hour*3), []*types.Selector{sel2}) - require.Equal(t, cacheStats{ + require.Equal(t, CacheStats{ AgentsByID: 2, AgentsByExpiresAt: 2, }, cache.Stats()) cache.RemoveAgent(agent1.String()) - require.Equal(t, cacheStats{ + require.Equal(t, CacheStats{ AgentsByID: 1, AgentsByExpiresAt: 1, }, cache.Stats()) diff --git a/pkg/server/datastore/datastore.go b/pkg/server/datastore/datastore.go index 6cc3cfca5a..1f89841210 100644 --- a/pkg/server/datastore/datastore.go +++ b/pkg/server/datastore/datastore.go @@ -40,8 +40,8 @@ type DataStore interface { UpdateRegistrationEntry(context.Context, *common.RegistrationEntry, *common.RegistrationEntryMask) (*common.RegistrationEntry, error) // Entries Events - ListRegistrationEntriesEvents(ctx context.Context, req *ListRegistrationEntriesEventsRequest) (*ListRegistrationEntriesEventsResponse, error) - PruneRegistrationEntriesEvents(ctx context.Context, olderThan time.Duration) error + ListRegistrationEntryEvents(ctx context.Context, req *ListRegistrationEntryEventsRequest) (*ListRegistrationEntryEventsResponse, error) + PruneRegistrationEntryEvents(ctx context.Context, olderThan time.Duration) error FetchRegistrationEntryEvent(ctx context.Context, eventID uint) (*RegistrationEntryEvent, error) CreateRegistrationEntryEventForTesting(ctx context.Context, event *RegistrationEntryEvent) error DeleteRegistrationEntryEventForTesting(ctx context.Context, eventID uint) error @@ -55,8 +55,8 @@ type DataStore interface { UpdateAttestedNode(context.Context, *common.AttestedNode, *common.AttestedNodeMask) (*common.AttestedNode, error) // Nodes Events - ListAttestedNodesEvents(ctx context.Context, req *ListAttestedNodesEventsRequest) (*ListAttestedNodesEventsResponse, error) - PruneAttestedNodesEvents(ctx context.Context, olderThan time.Duration) error + ListAttestedNodeEvents(ctx context.Context, req *ListAttestedNodeEventsRequest) (*ListAttestedNodeEventsResponse, error) + PruneAttestedNodeEvents(ctx context.Context, olderThan time.Duration) error FetchAttestedNodeEvent(ctx context.Context, eventID uint) (*AttestedNodeEvent, error) CreateAttestedNodeEventForTesting(ctx context.Context, event *AttestedNodeEvent) error DeleteAttestedNodeEventForTesting(ctx context.Context, eventID uint) error @@ -169,7 +169,7 @@ type ListAttestedNodesResponse struct { Pagination *Pagination } -type ListAttestedNodesEventsRequest struct { +type ListAttestedNodeEventsRequest struct { GreaterThanEventID uint LessThanEventID uint } @@ -179,7 +179,7 @@ type AttestedNodeEvent struct { SpiffeID string } -type ListAttestedNodesEventsResponse struct { +type ListAttestedNodeEventsResponse struct { Events []AttestedNodeEvent } @@ -223,7 +223,7 @@ type ListRegistrationEntriesResponse struct { Pagination *Pagination } -type ListRegistrationEntriesEventsRequest struct { +type ListRegistrationEntryEventsRequest struct { GreaterThanEventID uint LessThanEventID uint } @@ -233,7 +233,7 @@ type RegistrationEntryEvent struct { EntryID string } -type ListRegistrationEntriesEventsResponse struct { +type ListRegistrationEntryEventsResponse struct { Events []RegistrationEntryEvent } diff --git a/pkg/server/datastore/sqlstore/sqlstore.go b/pkg/server/datastore/sqlstore/sqlstore.go index f1645e7bd4..21ebeda1cb 100644 --- a/pkg/server/datastore/sqlstore/sqlstore.go +++ b/pkg/server/datastore/sqlstore/sqlstore.go @@ -376,10 +376,10 @@ func (ds *Plugin) DeleteAttestedNode(ctx context.Context, spiffeID string) (atte return attestedNode, nil } -// ListAttestedNodesEvents lists all attested node events -func (ds *Plugin) ListAttestedNodesEvents(ctx context.Context, req *datastore.ListAttestedNodesEventsRequest) (resp *datastore.ListAttestedNodesEventsResponse, err error) { +// ListAttestedNodeEvents lists all attested node events +func (ds *Plugin) ListAttestedNodeEvents(ctx context.Context, req *datastore.ListAttestedNodeEventsRequest) (resp *datastore.ListAttestedNodeEventsResponse, err error) { if err = ds.withReadTx(ctx, func(tx *gorm.DB) (err error) { - resp, err = listAttestedNodesEvents(tx, req) + resp, err = listAttestedNodeEvents(tx, req) return err }); err != nil { return nil, err @@ -387,10 +387,10 @@ func (ds *Plugin) ListAttestedNodesEvents(ctx context.Context, req *datastore.Li return resp, nil } -// PruneAttestedNodesEvents deletes all attested node events older than a specified duration (i.e. more than 24 hours old) -func (ds *Plugin) PruneAttestedNodesEvents(ctx context.Context, olderThan time.Duration) (err error) { +// PruneAttestedNodeEvents deletes all attested node events older than a specified duration (i.e. more than 24 hours old) +func (ds *Plugin) PruneAttestedNodeEvents(ctx context.Context, olderThan time.Duration) (err error) { return ds.withWriteTx(ctx, func(tx *gorm.DB) (err error) { - err = pruneAttestedNodesEvents(tx, olderThan) + err = pruneAttestedNodeEvents(tx, olderThan) return err }) } @@ -574,10 +574,10 @@ func (ds *Plugin) PruneRegistrationEntries(ctx context.Context, expiresBefore ti }) } -// ListRegistrationEntriesEvents lists all registration entry events -func (ds *Plugin) ListRegistrationEntriesEvents(ctx context.Context, req *datastore.ListRegistrationEntriesEventsRequest) (resp *datastore.ListRegistrationEntriesEventsResponse, err error) { +// ListRegistrationEntryEvents lists all registration entry events +func (ds *Plugin) ListRegistrationEntryEvents(ctx context.Context, req *datastore.ListRegistrationEntryEventsRequest) (resp *datastore.ListRegistrationEntryEventsResponse, err error) { if err = ds.withReadTx(ctx, func(tx *gorm.DB) (err error) { - resp, err = listRegistrationEntriesEvents(tx, req) + resp, err = listRegistrationEntryEvents(tx, req) return err }); err != nil { return nil, err @@ -585,10 +585,10 @@ func (ds *Plugin) ListRegistrationEntriesEvents(ctx context.Context, req *datast return resp, nil } -// PruneRegistrationEntriesEvents deletes all registration entry events older than a specified duration (i.e. more than 24 hours old) -func (ds *Plugin) PruneRegistrationEntriesEvents(ctx context.Context, olderThan time.Duration) (err error) { +// PruneRegistrationEntryEvents deletes all registration entry events older than a specified duration (i.e. more than 24 hours old) +func (ds *Plugin) PruneRegistrationEntryEvents(ctx context.Context, olderThan time.Duration) (err error) { return ds.withWriteTx(ctx, func(tx *gorm.DB) (err error) { - err = pruneRegistrationEntriesEvents(tx, olderThan) + err = pruneRegistrationEntryEvents(tx, olderThan) return err }) } @@ -1701,7 +1701,7 @@ func createAttestedNodeEvent(tx *gorm.DB, event *datastore.AttestedNodeEvent) er return nil } -func listAttestedNodesEvents(tx *gorm.DB, req *datastore.ListAttestedNodesEventsRequest) (*datastore.ListAttestedNodesEventsResponse, error) { +func listAttestedNodeEvents(tx *gorm.DB, req *datastore.ListAttestedNodeEventsRequest) (*datastore.ListAttestedNodeEventsResponse, error) { var events []AttestedNodeEvent if req.GreaterThanEventID != 0 || req.LessThanEventID != 0 { @@ -1719,7 +1719,7 @@ func listAttestedNodesEvents(tx *gorm.DB, req *datastore.ListAttestedNodesEvents } } - resp := &datastore.ListAttestedNodesEventsResponse{ + resp := &datastore.ListAttestedNodeEventsResponse{ Events: make([]datastore.AttestedNodeEvent, len(events)), } for i, event := range events { @@ -1730,7 +1730,7 @@ func listAttestedNodesEvents(tx *gorm.DB, req *datastore.ListAttestedNodesEvents return resp, nil } -func pruneAttestedNodesEvents(tx *gorm.DB, olderThan time.Duration) error { +func pruneAttestedNodeEvents(tx *gorm.DB, olderThan time.Duration) error { if err := tx.Where("created_at < ?", time.Now().Add(-olderThan)).Delete(&AttestedNodeEvent{}).Error; err != nil { return sqlError.Wrap(err) } @@ -4086,7 +4086,7 @@ func deleteRegistrationEntryEvent(tx *gorm.DB, eventID uint) error { return nil } -func listRegistrationEntriesEvents(tx *gorm.DB, req *datastore.ListRegistrationEntriesEventsRequest) (*datastore.ListRegistrationEntriesEventsResponse, error) { +func listRegistrationEntryEvents(tx *gorm.DB, req *datastore.ListRegistrationEntryEventsRequest) (*datastore.ListRegistrationEntryEventsResponse, error) { var events []RegisteredEntryEvent if req.GreaterThanEventID != 0 || req.LessThanEventID != 0 { @@ -4104,7 +4104,7 @@ func listRegistrationEntriesEvents(tx *gorm.DB, req *datastore.ListRegistrationE } } - resp := &datastore.ListRegistrationEntriesEventsResponse{ + resp := &datastore.ListRegistrationEntryEventsResponse{ Events: make([]datastore.RegistrationEntryEvent, len(events)), } for i, event := range events { @@ -4115,7 +4115,7 @@ func listRegistrationEntriesEvents(tx *gorm.DB, req *datastore.ListRegistrationE return resp, nil } -func pruneRegistrationEntriesEvents(tx *gorm.DB, olderThan time.Duration) error { +func pruneRegistrationEntryEvents(tx *gorm.DB, olderThan time.Duration) error { if err := tx.Where("created_at < ?", time.Now().Add(-olderThan)).Delete(&RegisteredEntryEvent{}).Error; err != nil { return sqlError.Wrap(err) } diff --git a/pkg/server/datastore/sqlstore/sqlstore_test.go b/pkg/server/datastore/sqlstore/sqlstore_test.go index 979469298c..4f18f0c32c 100644 --- a/pkg/server/datastore/sqlstore/sqlstore_test.go +++ b/pkg/server/datastore/sqlstore/sqlstore_test.go @@ -1498,7 +1498,7 @@ func (s *PluginSuite) TestDeleteAttestedNode() { }) } -func (s *PluginSuite) TestListAttestedNodesEvents() { +func (s *PluginSuite) TestListAttestedNodeEvents() { var expectedEvents []datastore.AttestedNodeEvent // Create an attested node @@ -1601,7 +1601,7 @@ func (s *PluginSuite) TestListAttestedNodesEvents() { } for _, test := range tests { s.T().Run(test.name, func(t *testing.T) { - resp, err := s.ds.ListAttestedNodesEvents(ctx, &datastore.ListAttestedNodesEventsRequest{ + resp, err := s.ds.ListAttestedNodeEvents(ctx, &datastore.ListAttestedNodeEventsRequest{ GreaterThanEventID: test.greaterThanEventID, LessThanEventID: test.lessThanEventID, }) @@ -1620,7 +1620,7 @@ func (s *PluginSuite) TestListAttestedNodesEvents() { } } -func (s *PluginSuite) TestPruneAttestedNodesEvents() { +func (s *PluginSuite) TestPruneAttestedNodeEvents() { node, err := s.ds.CreateAttestedNode(ctx, &common.AttestedNode{ SpiffeId: "foo", AttestationDataType: "aws-tag", @@ -1629,7 +1629,7 @@ func (s *PluginSuite) TestPruneAttestedNodesEvents() { }) s.Require().NoError(err) - resp, err := s.ds.ListAttestedNodesEvents(ctx, &datastore.ListAttestedNodesEventsRequest{}) + resp, err := s.ds.ListAttestedNodeEvents(ctx, &datastore.ListAttestedNodeEventsRequest{}) s.Require().NoError(err) s.Require().Equal(node.SpiffeId, resp.Events[0].SpiffeID) @@ -1656,9 +1656,9 @@ func (s *PluginSuite) TestPruneAttestedNodesEvents() { } { s.T().Run(tt.name, func(t *testing.T) { s.Require().Eventuallyf(func() bool { - err = s.ds.PruneAttestedNodesEvents(ctx, tt.olderThan) + err = s.ds.PruneAttestedNodeEvents(ctx, tt.olderThan) s.Require().NoError(err) - resp, err := s.ds.ListAttestedNodesEvents(ctx, &datastore.ListAttestedNodesEventsRequest{}) + resp, err := s.ds.ListAttestedNodeEvents(ctx, &datastore.ListAttestedNodeEventsRequest{}) s.Require().NoError(err) return reflect.DeepEqual(tt.expectedEvents, resp.Events) }, 10*time.Second, 50*time.Millisecond, "Failed to prune entries correctly") @@ -2111,7 +2111,7 @@ func (s *PluginSuite) TestPruneRegistrationEntries() { } prunedLogMessage := "Pruned an expired registration" - resp, err := s.ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{}) + resp, err := s.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{}) s.Require().NoError(err) s.Require().Equal(1, len(resp.Events)) s.Require().Equal(createdRegistrationEntry.EntryId, resp.Events[0].EntryID) @@ -2152,7 +2152,7 @@ func (s *PluginSuite) TestPruneRegistrationEntries() { tt := tt s.T().Run(tt.name, func(t *testing.T) { // Get latest event id - resp, err := s.ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{}) + resp, err := s.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{}) require.NoError(t, err) require.Greater(t, len(resp.Events), 0) lastEventID := resp.Events[len(resp.Events)-1].EventID @@ -2165,7 +2165,7 @@ func (s *PluginSuite) TestPruneRegistrationEntries() { assert.Equal(t, tt.expectedRegistrationEntry, fetchedRegistrationEntry) // Verify pruning triggers event creation - resp, err = s.ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{ + resp, err = s.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{ GreaterThanEventID: lastEventID, }) require.NoError(t, err) @@ -3977,7 +3977,7 @@ func (s *PluginSuite) TestDeleteBundleDissociateRegistrationEntries() { s.Require().Empty(entry.FederatesWith) } -func (s *PluginSuite) TestListRegistrationEntriesEvents() { +func (s *PluginSuite) TestListRegistrationEntryEvents() { var expectedEvents []datastore.RegistrationEntryEvent var expectedEventID uint = 1 @@ -3995,7 +3995,7 @@ func (s *PluginSuite) TestListRegistrationEntriesEvents() { }) expectedEventID++ - resp, err := s.ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{}) + resp, err := s.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{}) s.Require().NoError(err) s.Require().Equal(expectedEvents, resp.Events) @@ -4013,7 +4013,7 @@ func (s *PluginSuite) TestListRegistrationEntriesEvents() { }) expectedEventID++ - resp, err = s.ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{}) + resp, err = s.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{}) s.Require().NoError(err) s.Require().Equal(expectedEvents, resp.Events) @@ -4026,7 +4026,7 @@ func (s *PluginSuite) TestListRegistrationEntriesEvents() { }) expectedEventID++ - resp, err = s.ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{}) + resp, err = s.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{}) s.Require().NoError(err) s.Require().Equal(expectedEvents, resp.Events) @@ -4037,7 +4037,7 @@ func (s *PluginSuite) TestListRegistrationEntriesEvents() { EntryID: entry2.EntryId, }) - resp, err = s.ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{}) + resp, err = s.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{}) s.Require().NoError(err) s.Require().Equal(expectedEvents, resp.Events) @@ -4086,7 +4086,7 @@ func (s *PluginSuite) TestListRegistrationEntriesEvents() { } for _, test := range tests { s.T().Run(test.name, func(t *testing.T) { - resp, err = s.ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{ + resp, err = s.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{ GreaterThanEventID: test.greaterThanEventID, LessThanEventID: test.lessThanEventID, }) @@ -4105,7 +4105,7 @@ func (s *PluginSuite) TestListRegistrationEntriesEvents() { } } -func (s *PluginSuite) TestPruneRegistrationEntriesEvents() { +func (s *PluginSuite) TestPruneRegistrationEntryEvents() { entry := &common.RegistrationEntry{ Selectors: []*common.Selector{ {Type: "Type1", Value: "Value1"}, @@ -4115,7 +4115,7 @@ func (s *PluginSuite) TestPruneRegistrationEntriesEvents() { } createdRegistrationEntry := s.createRegistrationEntry(entry) - resp, err := s.ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{}) + resp, err := s.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{}) s.Require().NoError(err) s.Require().Equal(createdRegistrationEntry.EntryId, resp.Events[0].EntryID) @@ -4142,9 +4142,9 @@ func (s *PluginSuite) TestPruneRegistrationEntriesEvents() { } { s.T().Run(tt.name, func(t *testing.T) { s.Require().Eventuallyf(func() bool { - err = s.ds.PruneRegistrationEntriesEvents(ctx, tt.olderThan) + err = s.ds.PruneRegistrationEntryEvents(ctx, tt.olderThan) s.Require().NoError(err) - resp, err := s.ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{}) + resp, err := s.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{}) s.Require().NoError(err) return reflect.DeepEqual(tt.expectedEvents, resp.Events) }, 10*time.Second, 50*time.Millisecond, "Failed to prune entries correctly") @@ -5287,7 +5287,7 @@ func (s *PluginSuite) checkAttestedNodeEvents(expectedEvents []datastore.Atteste SpiffeID: spiffeID, }) - resp, err := s.ds.ListAttestedNodesEvents(ctx, &datastore.ListAttestedNodesEventsRequest{}) + resp, err := s.ds.ListAttestedNodeEvents(ctx, &datastore.ListAttestedNodeEventsRequest{}) s.Require().NoError(err) s.Require().Equal(expectedEvents, resp.Events) diff --git a/pkg/server/endpoints/authorized_entryfetcher.go b/pkg/server/endpoints/authorized_entryfetcher.go index 27de3f14f3..0d31853129 100644 --- a/pkg/server/endpoints/authorized_entryfetcher.go +++ b/pkg/server/endpoints/authorized_entryfetcher.go @@ -34,12 +34,11 @@ type AuthorizedEntryFetcherWithEventsBasedCache struct { type eventsBasedCache interface { updateCache(ctx context.Context) error - pruneMissedEvents() } func NewAuthorizedEntryFetcherWithEventsBasedCache(ctx context.Context, log logrus.FieldLogger, metrics telemetry.Metrics, clk clock.Clock, ds datastore.DataStore, cacheReloadInterval, pruneEventsOlderThan, sqlTransactionTimeout time.Duration) (*AuthorizedEntryFetcherWithEventsBasedCache, error) { log.Info("Building event-based in-memory entry cache") - cache, registrationEntries, attestedNodes, err := buildCache(ctx, log, metrics, ds, clk, sqlTransactionTimeout) + cache, registrationEntries, attestedNodes, err := buildCache(ctx, log, metrics, ds, clk, cacheReloadInterval, sqlTransactionTimeout) if err != nil { return nil, err } @@ -96,13 +95,10 @@ func (a *AuthorizedEntryFetcherWithEventsBasedCache) PruneEventsTask(ctx context } func (a *AuthorizedEntryFetcherWithEventsBasedCache) pruneEvents(ctx context.Context, olderThan time.Duration) error { - pruneRegistrationEntriesEventsErr := a.ds.PruneRegistrationEntriesEvents(ctx, olderThan) - pruneAttestedNodesEventsErr := a.ds.PruneAttestedNodesEvents(ctx, olderThan) + pruneRegistrationEntryEventsErr := a.ds.PruneRegistrationEntryEvents(ctx, olderThan) + pruneAttestedNodeEventsErr := a.ds.PruneAttestedNodeEvents(ctx, olderThan) - a.registrationEntries.pruneMissedEvents() - a.attestedNodes.pruneMissedEvents() - - return errors.Join(pruneRegistrationEntriesEventsErr, pruneAttestedNodesEventsErr) + return errors.Join(pruneRegistrationEntryEventsErr, pruneAttestedNodeEventsErr) } func (a *AuthorizedEntryFetcherWithEventsBasedCache) updateCache(ctx context.Context) error { @@ -112,15 +108,15 @@ func (a *AuthorizedEntryFetcherWithEventsBasedCache) updateCache(ctx context.Con return errors.Join(updateRegistrationEntriesCacheErr, updateAttestedNodesCacheErr) } -func buildCache(ctx context.Context, log logrus.FieldLogger, metrics telemetry.Metrics, ds datastore.DataStore, clk clock.Clock, sqlTransactionTimeout time.Duration) (*authorizedentries.Cache, *registrationEntries, *attestedNodes, error) { +func buildCache(ctx context.Context, log logrus.FieldLogger, metrics telemetry.Metrics, ds datastore.DataStore, clk clock.Clock, cacheReloadInterval, sqlTransactionTimeout time.Duration) (*authorizedentries.Cache, *registrationEntries, *attestedNodes, error) { cache := authorizedentries.NewCache(clk) - registrationEntries, err := buildRegistrationEntriesCache(ctx, log, metrics, ds, clk, cache, buildCachePageSize, sqlTransactionTimeout) + registrationEntries, err := buildRegistrationEntriesCache(ctx, log, metrics, ds, clk, cache, buildCachePageSize, cacheReloadInterval, sqlTransactionTimeout) if err != nil { return nil, nil, nil, err } - attestedNodes, err := buildAttestedNodesCache(ctx, log, metrics, ds, clk, cache, sqlTransactionTimeout) + attestedNodes, err := buildAttestedNodesCache(ctx, log, metrics, ds, clk, cache, cacheReloadInterval, sqlTransactionTimeout) if err != nil { return nil, nil, nil, err } diff --git a/pkg/server/endpoints/authorized_entryfetcher_attested_nodes.go b/pkg/server/endpoints/authorized_entryfetcher_attested_nodes.go index 6d692eeba7..32b854f213 100644 --- a/pkg/server/endpoints/authorized_entryfetcher_attested_nodes.go +++ b/pkg/server/endpoints/authorized_entryfetcher_attested_nodes.go @@ -3,7 +3,6 @@ package endpoints import ( "context" "fmt" - "sync" "time" "github.com/andres-erbsen/clock" @@ -24,227 +23,235 @@ type attestedNodes struct { ds datastore.DataStore log logrus.FieldLogger metrics telemetry.Metrics - mu sync.RWMutex - - firstEventID uint - firstEventTime time.Time - lastEventID uint - missedEvents map[uint]time.Time - seenMissedStartupEvents map[uint]struct{} - sqlTransactionTimeout time.Duration + + eventsBeforeFirst map[uint]struct{} + + firstEvent uint + firstEventTime time.Time + lastEvent uint + + eventTracker *eventTracker + sqlTransactionTimeout time.Duration + + fetchNodes map[string]struct{} + + // metrics change detection + skippedNodeEvents int + lastCacheStats authorizedentries.CacheStats } -// buildAttestedNodesCache fetches all attested nodes and adds the unexpired ones to the cache. -// It runs once at startup. -func buildAttestedNodesCache(ctx context.Context, log logrus.FieldLogger, metrics telemetry.Metrics, ds datastore.DataStore, clk clock.Clock, cache *authorizedentries.Cache, sqlTransactionTimeout time.Duration) (*attestedNodes, error) { - resp, err := ds.ListAttestedNodesEvents(ctx, &datastore.ListAttestedNodesEventsRequest{}) - if err != nil { - return nil, err +func (a *attestedNodes) captureChangedNodes(ctx context.Context) error { + // first, reset what we might fetch + a.fetchNodes = make(map[string]struct{}) + + if err := a.searchBeforeFirstEvent(ctx); err != nil { + return err + } + a.selectPolledEvents(ctx) + if err := a.scanForNewEvents(ctx); err != nil { + return err } - // Gather any events that may have been skipped during restart - var firstEventID uint - var firstEventTime time.Time - var lastEventID uint - missedEvents := make(map[uint]time.Time) - for _, event := range resp.Events { - now := clk.Now() - if firstEventTime.IsZero() { - firstEventID = event.EventID - firstEventTime = now - } else { - // After getting the first event, search for any gaps in the event stream, from the first event to the last event. - // During each cache refresh cycle, we will check if any of these missed events get populated. - for i := lastEventID + 1; i < event.EventID; i++ { - missedEvents[i] = now + return nil +} + +func (a *attestedNodes) searchBeforeFirstEvent(ctx context.Context) error { + // First event detected, and startup was less than a transaction timout away. + if !a.firstEventTime.IsZero() && a.clk.Now().Sub(a.firstEventTime) <= a.sqlTransactionTimeout { + resp, err := a.ds.ListAttestedNodeEvents(ctx, &datastore.ListAttestedNodeEventsRequest{ + LessThanEventID: a.firstEvent, + }) + if err != nil { + return err + } + for _, event := range resp.Events { + // if we have seen it before, don't reload it. + if _, seen := a.eventsBeforeFirst[event.EventID]; !seen { + a.fetchNodes[event.SpiffeID] = struct{}{} + a.eventsBeforeFirst[event.EventID] = struct{}{} } } - lastEventID = event.EventID + return nil } - // Build the cache - nodesResp, err := ds.ListAttestedNodes(ctx, &datastore.ListAttestedNodesRequest{ - FetchSelectors: true, - }) - if err != nil { - return nil, fmt.Errorf("failed to list attested nodes: %w", err) + // zero out unused event tracker + if len(a.eventsBeforeFirst) != 0 { + a.eventsBeforeFirst = make(map[uint]struct{}) } - for _, node := range nodesResp.Nodes { - agentExpiresAt := time.Unix(node.CertNotAfter, 0) - if agentExpiresAt.Before(clk.Now()) { + return nil +} + +func (a *attestedNodes) selectPolledEvents(ctx context.Context) { + // check if the polled events have appeared out-of-order + selectedEvents := a.eventTracker.SelectEvents() + for _, eventID := range selectedEvents { + log := a.log.WithField(telemetry.EventID, eventID) + event, err := a.ds.FetchAttestedNodeEvent(ctx, eventID) + + switch status.Code(err) { + case codes.OK: + case codes.NotFound: + continue + default: + log.WithError(err).Errorf("Failed to fetch info about skipped node event %d", eventID) continue } - cache.UpdateAgent(node.SpiffeId, agentExpiresAt, api.ProtoFromSelectors(node.Selectors)) - } - - return &attestedNodes{ - cache: cache, - clk: clk, - ds: ds, - firstEventID: firstEventID, - firstEventTime: firstEventTime, - log: log, - metrics: metrics, - lastEventID: lastEventID, - missedEvents: missedEvents, - seenMissedStartupEvents: make(map[uint]struct{}), - sqlTransactionTimeout: sqlTransactionTimeout, - }, nil -} -// updateCache Fetches all the events since the last time this function was running and updates -// the cache with all the changes. -func (a *attestedNodes) updateCache(ctx context.Context) error { - // Process events skipped over previously - if err := a.missedStartupEvents(ctx); err != nil { - a.log.WithError(err).Error("Unable to process missed startup events") + a.fetchNodes[event.SpiffeID] = struct{}{} + a.eventTracker.StopTracking(eventID) } - a.replayMissedEvents(ctx) + a.eventTracker.FreeEvents(selectedEvents) +} - req := &datastore.ListAttestedNodesEventsRequest{ - GreaterThanEventID: a.lastEventID, +func (a *attestedNodes) scanForNewEvents(ctx context.Context) error { + // If we haven't seen an event, scan for all events; otherwise, scan from the last event. + var resp *datastore.ListAttestedNodeEventsResponse + var err error + if a.firstEventTime.IsZero() { + resp, err = a.ds.ListAttestedNodeEvents(ctx, &datastore.ListAttestedNodeEventsRequest{}) + } else { + resp, err = a.ds.ListAttestedNodeEvents(ctx, &datastore.ListAttestedNodeEventsRequest{ + GreaterThanEventID: a.lastEvent, + }) } - resp, err := a.ds.ListAttestedNodesEvents(ctx, req) if err != nil { return err } - seenMap := map[string]struct{}{} for _, event := range resp.Events { - // If there is a gap in the event stream, log the missed events for later processing. - // For example if the current event ID is 6 and the previous one was 3, events 4 and 5 - // were skipped over and need to be queued in case they show up later. - // This can happen when a long running transaction allocates an event ID but a shorter transaction - // comes in after, allocates and commits the ID first. If a read comes in at this moment, the event id for - // the longer running transaction will be skipped over. - if !a.firstEventTime.IsZero() { - for i := a.lastEventID + 1; i < event.EventID; i++ { - a.log.WithField(telemetry.EventID, i).Info("Detected skipped attested node event") - a.mu.Lock() - a.missedEvents[i] = a.clk.Now() - a.mu.Unlock() - } - } - - // Skip fetching entries we've already fetched this call - if _, seen := seenMap[event.SpiffeID]; seen { - a.lastEventID = event.EventID + // event time determines if we have seen the first event. + if a.firstEventTime.IsZero() { + a.firstEvent = event.EventID + a.lastEvent = event.EventID + a.fetchNodes[event.SpiffeID] = struct{}{} + a.firstEventTime = a.clk.Now() continue } - seenMap[event.SpiffeID] = struct{}{} - // Update the cache - if err := a.updateCacheEntry(ctx, event.SpiffeID); err != nil { - return err + // track any skipped event ids, should they appear later. + for skipped := a.lastEvent + 1; skipped < event.EventID; skipped++ { + a.eventTracker.StartTracking(skipped) } - if a.firstEventTime.IsZero() { - a.firstEventID = event.EventID - a.firstEventTime = a.clk.Now() - } - a.lastEventID = event.EventID + // every event adds its entry to the entry fetch list. + a.fetchNodes[event.SpiffeID] = struct{}{} + a.lastEvent = event.EventID + } + return nil +} + +func (a *attestedNodes) loadCache(ctx context.Context) error { + // TODO: determine if this needs paging + nodesResp, err := a.ds.ListAttestedNodes(ctx, &datastore.ListAttestedNodesRequest{ + FetchSelectors: true, + }) + if err != nil { + return fmt.Errorf("failed to list attested nodes: %w", err) } - // These two should be the same value but it's valuable to have them both be emitted for incident triage. - server_telemetry.SetAgentsByExpiresAtCacheCountGauge(a.metrics, a.cache.Stats().AgentsByExpiresAt) - server_telemetry.SetAgentsByIDCacheCountGauge(a.metrics, a.cache.Stats().AgentsByID) + for _, node := range nodesResp.Nodes { + agentExpiresAt := time.Unix(node.CertNotAfter, 0) + if agentExpiresAt.Before(a.clk.Now()) { + continue + } + a.cache.UpdateAgent(node.SpiffeId, agentExpiresAt, api.ProtoFromSelectors(node.Selectors)) + } return nil } -// missedStartupEvents will check for any events that arrive with an ID less than the first event ID we receive. -// For example if the first event ID we receive is 3, this function will check for any IDs less than that. -// If event ID 2 comes in later on, due to a long running transaction, this function will update the cache -// with the information from this event. This function will run until time equal to sqlTransactionTimeout has elapsed after startup. -func (a *attestedNodes) missedStartupEvents(ctx context.Context) error { - if a.firstEventTime.IsZero() || a.clk.Now().Sub(a.firstEventTime) > a.sqlTransactionTimeout { - return nil +// buildAttestedNodesCache fetches all attested nodes and adds the unexpired ones to the cache. +// It runs once at startup. +func buildAttestedNodesCache(ctx context.Context, log logrus.FieldLogger, metrics telemetry.Metrics, ds datastore.DataStore, clk clock.Clock, cache *authorizedentries.Cache, cacheReloadInterval, sqlTransactionTimeout time.Duration) (*attestedNodes, error) { + pollPeriods := PollPeriods(cacheReloadInterval, sqlTransactionTimeout) + + attestedNodes := &attestedNodes{ + cache: cache, + clk: clk, + ds: ds, + log: log, + metrics: metrics, + sqlTransactionTimeout: sqlTransactionTimeout, + + eventsBeforeFirst: make(map[uint]struct{}), + fetchNodes: make(map[string]struct{}), + + eventTracker: NewEventTracker(pollPeriods), + + // initialize gauges to nonsense values to force a change. + skippedNodeEvents: -1, + lastCacheStats: authorizedentries.CacheStats{ + AgentsByID: -1, + AgentsByExpiresAt: -1, + }, } - req := &datastore.ListAttestedNodesEventsRequest{ - LessThanEventID: a.firstEventID, + if err := attestedNodes.loadCache(ctx); err != nil { + return nil, err } - resp, err := a.ds.ListAttestedNodesEvents(ctx, req) - if err != nil { - return err + if err := attestedNodes.updateCache(ctx); err != nil { + return nil, err } - for _, event := range resp.Events { - if _, seen := a.seenMissedStartupEvents[event.EventID]; !seen { - if err := a.updateCacheEntry(ctx, event.SpiffeID); err != nil { - a.log.WithError(err).Error("Failed to process missed startup event") - continue - } - a.seenMissedStartupEvents[event.EventID] = struct{}{} - } + return attestedNodes, nil +} + +// updateCache Fetches all the events since the last time this function was running and updates +// the cache with all the changes. +func (a *attestedNodes) updateCache(ctx context.Context) error { + if err := a.captureChangedNodes(ctx); err != nil { + return err } + if err := a.updateCachedNodes(ctx); err != nil { + return err + } + a.emitMetrics() return nil } -// replayMissedEvents Processes events that have been skipped over. Events can come out of order from -// SQL. This function processes events that came in later than expected. -func (a *attestedNodes) replayMissedEvents(ctx context.Context) { - a.mu.Lock() - defer a.mu.Unlock() - - for eventID := range a.missedEvents { - log := a.log.WithField(telemetry.EventID, eventID) +func (a *attestedNodes) updateCachedNodes(ctx context.Context) error { + for spiffeId, _ := range a.fetchNodes { + node, err := a.ds.FetchAttestedNode(ctx, spiffeId) + if err != nil { + return err + } - event, err := a.ds.FetchAttestedNodeEvent(ctx, eventID) - switch status.Code(err) { - case codes.OK: - case codes.NotFound: - continue - default: - log.WithError(err).Error("Failed to fetch info about missed Attested Node event") + // Node was deleted + if node == nil { + a.cache.RemoveAgent(spiffeId) + delete(a.fetchNodes, spiffeId) continue } - if err := a.updateCacheEntry(ctx, event.SpiffeID); err != nil { - log.WithError(err).Error("Failed to process missed Attested Node event") - continue + selectors, err := a.ds.GetNodeSelectors(ctx, spiffeId, datastore.RequireCurrent) + if err != nil { + return err } + node.Selectors = selectors - delete(a.missedEvents, eventID) + agentExpiresAt := time.Unix(node.CertNotAfter, 0) + a.cache.UpdateAgent(node.SpiffeId, agentExpiresAt, api.ProtoFromSelectors(node.Selectors)) + delete(a.fetchNodes, spiffeId) } - server_telemetry.SetSkippedNodeEventIDsCacheCountGauge(a.metrics, len(a.missedEvents)) + return nil } -// updatedCacheEntry update/deletes/creates an individual attested node in the cache. -func (a *attestedNodes) updateCacheEntry(ctx context.Context, spiffeID string) error { - node, err := a.ds.FetchAttestedNode(ctx, spiffeID) - if err != nil { - return err +func (a *attestedNodes) emitMetrics() { + if a.skippedNodeEvents != int(a.eventTracker.EventCount()) { + a.skippedNodeEvents = int(a.eventTracker.EventCount()) + server_telemetry.SetSkippedNodeEventIDsCacheCountGauge(a.metrics, a.skippedNodeEvents) } - // Node was deleted - if node == nil { - a.cache.RemoveAgent(spiffeID) - return nil + cacheStats := a.cache.Stats() + // AgentsByID and AgentsByExpiresAt should be the same. + if a.lastCacheStats.AgentsByID != cacheStats.AgentsByID { + a.lastCacheStats.AgentsByID = cacheStats.AgentsByID + server_telemetry.SetAgentsByIDCacheCountGauge(a.metrics, a.lastCacheStats.AgentsByID) } - - selectors, err := a.ds.GetNodeSelectors(ctx, spiffeID, datastore.RequireCurrent) - if err != nil { - return err - } - node.Selectors = selectors - - agentExpiresAt := time.Unix(node.CertNotAfter, 0) - a.cache.UpdateAgent(node.SpiffeId, agentExpiresAt, api.ProtoFromSelectors(node.Selectors)) - - return nil -} - -// prunedMissedEvents delete missed events that are older than the configured SQL transaction timeout time. -func (a *attestedNodes) pruneMissedEvents() { - a.mu.Lock() - defer a.mu.Unlock() - - for eventID, eventTime := range a.missedEvents { - if a.clk.Now().Sub(eventTime) > a.sqlTransactionTimeout { - delete(a.missedEvents, eventID) - } + if a.lastCacheStats.AgentsByExpiresAt != cacheStats.AgentsByExpiresAt { + a.lastCacheStats.AgentsByExpiresAt = cacheStats.AgentsByExpiresAt + server_telemetry.SetAgentsByExpiresAtCacheCountGauge(a.metrics, a.lastCacheStats.AgentsByExpiresAt) } } diff --git a/pkg/server/endpoints/authorized_entryfetcher_attested_nodes_test.go b/pkg/server/endpoints/authorized_entryfetcher_attested_nodes_test.go index d02ebe60dd..ba50386079 100644 --- a/pkg/server/endpoints/authorized_entryfetcher_attested_nodes_test.go +++ b/pkg/server/endpoints/authorized_entryfetcher_attested_nodes_test.go @@ -3,12 +3,16 @@ package endpoints import ( "context" "errors" + "maps" + "reflect" + "slices" + "strings" "testing" "time" "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" - "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/server/authorizedentries" "github.com/spiffe/spire/pkg/server/datastore" @@ -16,147 +20,1528 @@ import ( "github.com/spiffe/spire/test/clock" "github.com/spiffe/spire/test/fakes/fakedatastore" "github.com/spiffe/spire/test/fakes/fakemetrics" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestUpdateAttestedNodesCache(t *testing.T) { +var ( + cachedAgentsByID = []string{telemetry.Node, telemetry.AgentsByIDCache, telemetry.Count} + cachedAgentsByExpiresAt = []string{telemetry.Node, telemetry.AgentsByExpiresAtCache, telemetry.Count} + skippedNodeEventID = []string{telemetry.Node, telemetry.SkippedNodeEventIDs, telemetry.Count} + + // defaults used to setup a small initial load of attested nodes and events. + defaultAttestedNodes = []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_2", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + } + defaultNodeEventsStartingAt60 = []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 60, + SpiffeID: "spiffe://example.org/test_node_2", + }, + &datastore.AttestedNodeEvent{ + EventID: 61, + SpiffeID: "spiffe://example.org/test_node_3", + }, + } + defaultFirstNodeEvent = uint(60) + defaultLastNodeEvent = uint(61) + + noNodeFetches = []string{} +) + +type expectedGauge struct { + Key []string + Value int +} + +func TestLoadNodeCache(t *testing.T) { for _, tt := range []struct { - name string - errs []error - expectedLastAttestedNodeEventID uint - expectMetrics []fakemetrics.MetricItem + name string + setup *nodeScenarioSetup + + expectedError string + expectedAuthorizedEntries []string + expectedGauges []expectedGauge }{ { - name: "Error Listing Attested Node Events", - errs: []error{errors.New("listing attested node events")}, - expectedLastAttestedNodeEventID: uint(0), - expectMetrics: nil, + name: "initial load returns an error", + setup: &nodeScenarioSetup{ + err: errors.New("any error, doesn't matter"), + }, + expectedError: "any error, doesn't matter", }, { - name: "Error Fetching Attested Node", - errs: []error{nil, errors.New("fetching attested node")}, - expectedLastAttestedNodeEventID: uint(0), - expectMetrics: nil, + name: "initial load loads nothing", }, { - name: "Error Getting Node Selectors", - errs: []error{nil, nil, errors.New("getting node selectors")}, - expectedLastAttestedNodeEventID: uint(0), - expectMetrics: nil, + name: "initial load loads one attested node", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_1", + CertNotAfter: time.Now().Add(time.Duration(5) * time.Hour).Unix(), + }, + }, + }, + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_1", + }, + expectedGauges: []expectedGauge{ + expectedGauge{Key: skippedNodeEventID, Value: 0}, + expectedGauge{Key: cachedAgentsByID, Value: 1}, + expectedGauge{Key: cachedAgentsByExpiresAt, Value: 1}, + }, }, { - name: "No Errors", - expectedLastAttestedNodeEventID: uint(1), - expectMetrics: []fakemetrics.MetricItem{ - { - Type: fakemetrics.SetGaugeType, - Key: []string{telemetry.Node, telemetry.AgentsByExpiresAtCache, telemetry.Count}, - Val: 1, - Labels: nil, + name: "initial load loads five attested nodes", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_1", + CertNotAfter: time.Now().Add(time.Duration(5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_2", + CertNotAfter: time.Now().Add(time.Duration(5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_4", + CertNotAfter: time.Now().Add(time.Duration(5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_5", + CertNotAfter: time.Now().Add(time.Duration(5) * time.Hour).Unix(), + }, }, - { - Type: fakemetrics.SetGaugeType, - Key: []string{telemetry.Node, telemetry.AgentsByIDCache, telemetry.Count}, - Val: 1, - Labels: nil, + }, + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + }, + }, + { + name: "initial load loads five attested nodes, one expired", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_1", + CertNotAfter: time.Now().Add(time.Duration(5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_2", + CertNotAfter: time.Now().Add(time.Duration(-5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_4", + CertNotAfter: time.Now().Add(time.Duration(5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_5", + CertNotAfter: time.Now().Add(time.Duration(5) * time.Hour).Unix(), + }, + }, + }, + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + }, + }, + { + name: "initial load loads five attested nodes, all expired", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_1", + CertNotAfter: time.Now().Add(time.Duration(-5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_2", + CertNotAfter: time.Now().Add(time.Duration(-5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(-5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_4", + CertNotAfter: time.Now().Add(time.Duration(-5) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_5", + CertNotAfter: time.Now().Add(time.Duration(-5) * time.Hour).Unix(), + }, }, }, + expectedAuthorizedEntries: []string{}, + }, + } { + t.Run(tt.name, func(t *testing.T) { + scenario := NewNodeScenario(t, tt.setup) + attestedNodes, err := scenario.buildAttestedNodesCache() + if tt.expectedError != "" { + require.ErrorContains(t, err, tt.expectedError) + return + } + require.NoError(t, err) + + cacheStats := attestedNodes.cache.Stats() + require.Equal(t, len(tt.expectedAuthorizedEntries), cacheStats.AgentsByID, "wrong number of agents by ID") + + // for now, the only way to ensure the desired agent ids are present is + // to remove the desired ids and check the count is zero. + for _, expectedAuthorizedId := range tt.expectedAuthorizedEntries { + attestedNodes.cache.RemoveAgent(expectedAuthorizedId) + } + cacheStats = attestedNodes.cache.Stats() + require.Equal(t, 0, cacheStats.AgentsByID, "clearing all expected agent ids didn't clear cache") + + var lastMetrics map[string]int = make(map[string]int) + for _, metricItem := range scenario.metrics.AllMetrics() { + if metricItem.Type == fakemetrics.SetGaugeType { + key := strings.Join(metricItem.Key, " ") + lastMetrics[key] = int(metricItem.Val) + } + } + + for _, expectedGauge := range tt.expectedGauges { + key := strings.Join(expectedGauge.Key, " ") + value, exists := lastMetrics[key] + require.True(t, exists, "No metric value for %q", key) + require.Equal(t, expectedGauge.Value, value, "unexpected final metric value for %q", key) + } + + require.Zero(t, scenario.hook.Entries) + }) + } +} + +func TestSearchBeforeFirstNodeEvent(t *testing.T) { + for _, tt := range []struct { + name string + setup *nodeScenarioSetup + + waitToPoll time.Duration + eventsBeforeFirst []uint + polledEvents []*datastore.AttestedNodeEvent + errors []error + + expectedError string + expectedEventsBeforeFirst []uint + expectedFetches []string + }{ + { + name: "first event not loaded", + + expectedEventsBeforeFirst: []uint{}, + expectedFetches: []string{}, + }, + { + name: "before first event arrived, after transaction timeout", + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + + waitToPoll: time.Duration(2) * defaultSQLTransactionTimeout, + // even with new before first events, they shouldn't load + polledEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 58, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + + expectedEventsBeforeFirst: []uint{}, + expectedFetches: noNodeFetches, + }, + { + name: "no before first events", + + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + polledEvents: []*datastore.AttestedNodeEvent{}, + + expectedEventsBeforeFirst: []uint{}, + expectedFetches: []string{}, + }, + { + name: "new before first event", + + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + polledEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 58, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + + expectedEventsBeforeFirst: []uint{58}, + expectedFetches: []string{"spiffe://example.org/test_node_1"}, + }, + { + name: "new after last event", + + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + polledEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 64, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + + expectedEventsBeforeFirst: []uint{}, + expectedFetches: []string{}, + }, + { + name: "previously seen before first event", + + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + eventsBeforeFirst: []uint{58}, + polledEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 58, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + + expectedEventsBeforeFirst: []uint{58}, + expectedFetches: []string{}, + }, + { + name: "previously seen before first event and after last event", + + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + eventsBeforeFirst: []uint{58}, + polledEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: defaultFirstNodeEvent - 2, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: defaultLastNodeEvent + 2, + SpiffeID: "spiffe://example.org/test_node_4", + }, + }, + + expectedEventsBeforeFirst: []uint{defaultFirstNodeEvent - 2}, + expectedFetches: []string{}, + }, + { + name: "five new before first events", + + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + polledEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 48, + SpiffeID: "spiffe://example.org/test_node_10", + }, + &datastore.AttestedNodeEvent{ + EventID: 49, + SpiffeID: "spiffe://example.org/test_node_11", + }, + &datastore.AttestedNodeEvent{ + EventID: 53, + SpiffeID: "spiffe://example.org/test_node_12", + }, + &datastore.AttestedNodeEvent{ + EventID: 56, + SpiffeID: "spiffe://example.org/test_node_13", + }, + &datastore.AttestedNodeEvent{ + EventID: 57, + SpiffeID: "spiffe://example.org/test_node_14", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + expectedFetches: []string{ + "spiffe://example.org/test_node_10", + "spiffe://example.org/test_node_11", + "spiffe://example.org/test_node_12", + "spiffe://example.org/test_node_13", + "spiffe://example.org/test_node_14", + }, + }, + { + name: "five new before first events, one after last event", + + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + polledEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 48, + SpiffeID: "spiffe://example.org/test_node_10", + }, + &datastore.AttestedNodeEvent{ + EventID: 49, + SpiffeID: "spiffe://example.org/test_node_11", + }, + &datastore.AttestedNodeEvent{ + EventID: 53, + SpiffeID: "spiffe://example.org/test_node_12", + }, + &datastore.AttestedNodeEvent{ + EventID: 56, + SpiffeID: "spiffe://example.org/test_node_13", + }, + &datastore.AttestedNodeEvent{ + EventID: defaultLastNodeEvent + 1, + SpiffeID: "spiffe://example.org/test_node_14", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56}, + expectedFetches: []string{ + "spiffe://example.org/test_node_10", + "spiffe://example.org/test_node_11", + "spiffe://example.org/test_node_12", + "spiffe://example.org/test_node_13", + }, + }, + { + name: "five before first events, two previously seen", + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + + eventsBeforeFirst: []uint{48, 49}, + polledEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 48, + SpiffeID: "spiffe://example.org/test_node_10", + }, + &datastore.AttestedNodeEvent{ + EventID: 49, + SpiffeID: "spiffe://example.org/test_node_11", + }, + &datastore.AttestedNodeEvent{ + EventID: 53, + SpiffeID: "spiffe://example.org/test_node_12", + }, + &datastore.AttestedNodeEvent{ + EventID: 56, + SpiffeID: "spiffe://example.org/test_node_13", + }, + &datastore.AttestedNodeEvent{ + EventID: 57, + SpiffeID: "spiffe://example.org/test_node_14", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + expectedFetches: []string{ + "spiffe://example.org/test_node_12", + "spiffe://example.org/test_node_13", + "spiffe://example.org/test_node_14", + }, + }, + { + name: "five before first events, two previously seen, one after last event", + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + eventsBeforeFirst: []uint{48, 49}, + polledEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 48, + SpiffeID: "spiffe://example.org/test_node_10", + }, + &datastore.AttestedNodeEvent{ + EventID: 49, + SpiffeID: "spiffe://example.org/test_node_11", + }, + &datastore.AttestedNodeEvent{ + EventID: 53, + SpiffeID: "spiffe://example.org/test_node_12", + }, + &datastore.AttestedNodeEvent{ + EventID: 56, + SpiffeID: "spiffe://example.org/test_node_13", + }, + &datastore.AttestedNodeEvent{ + EventID: defaultLastNodeEvent + 1, + SpiffeID: "spiffe://example.org/test_node_14", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56}, + expectedFetches: []string{ + "spiffe://example.org/test_node_12", + "spiffe://example.org/test_node_13", + }, + }, + { + name: "five before first events, five previously seen", + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + + eventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + polledEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 48, + SpiffeID: "spiffe://example.org/test_node_10", + }, + &datastore.AttestedNodeEvent{ + EventID: 49, + SpiffeID: "spiffe://example.org/test_node_11", + }, + &datastore.AttestedNodeEvent{ + EventID: 53, + SpiffeID: "spiffe://example.org/test_node_12", + }, + &datastore.AttestedNodeEvent{ + EventID: 56, + SpiffeID: "spiffe://example.org/test_node_13", + }, + &datastore.AttestedNodeEvent{ + EventID: 57, + SpiffeID: "spiffe://example.org/test_node_14", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + expectedFetches: []string{}, + }, + { + name: "five before first events, five previously seen, with after last event", + setup: &nodeScenarioSetup{ + attestedNodes: defaultAttestedNodes, + attestedNodeEvents: defaultNodeEventsStartingAt60, + }, + + eventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + polledEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 48, + SpiffeID: "spiffe://example.org/test_node_10", + }, + &datastore.AttestedNodeEvent{ + EventID: 49, + SpiffeID: "spiffe://example.org/test_node_11", + }, + &datastore.AttestedNodeEvent{ + EventID: 53, + SpiffeID: "spiffe://example.org/test_node_12", + }, + &datastore.AttestedNodeEvent{ + EventID: 56, + SpiffeID: "spiffe://example.org/test_node_13", + }, + &datastore.AttestedNodeEvent{ + EventID: 57, + SpiffeID: "spiffe://example.org/test_node_14", + }, + &datastore.AttestedNodeEvent{ + EventID: defaultLastNodeEvent + 1, + SpiffeID: "spiffe://example.org/test_node_28", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + expectedFetches: []string{}, }, } { tt := tt t.Run(tt.name, func(t *testing.T) { - ctx := context.Background() - log, _ := test.NewNullLogger() - clk := clock.NewMock(t) - ds := fakedatastore.New(t) - cache := authorizedentries.NewCache(clk) - metrics := fakemetrics.New() - - attestedNodes, err := buildAttestedNodesCache(ctx, log, metrics, ds, clk, cache, defaultSQLTransactionTimeout) + scenario := NewNodeScenario(t, tt.setup) + attestedNodes, err := scenario.buildAttestedNodesCache() + if tt.expectedError != "" { + require.ErrorContains(t, err, tt.expectedError) + return + } require.NoError(t, err) - require.NotNil(t, attestedNodes) - agentID, err := spiffeid.FromString("spiffe://example.org/myagent") + if tt.waitToPoll == 0 { + scenario.clk.Add(defaultCacheReloadInterval) + } else { + scenario.clk.Add(tt.waitToPoll) + } + + for _, event := range tt.eventsBeforeFirst { + attestedNodes.eventsBeforeFirst[event] = struct{}{} + } + + for _, event := range tt.polledEvents { + err = scenario.ds.CreateAttestedNodeEventForTesting(scenario.ctx, event) + require.NoError(t, err, "error while setting up test") + } + + err = attestedNodes.searchBeforeFirstEvent(scenario.ctx) + require.NoError(t, err, "error while running test") + + t.Log(reflect.TypeOf(maps.Keys(attestedNodes.eventsBeforeFirst))) + require.ElementsMatch(t, tt.expectedEventsBeforeFirst, slices.Collect(maps.Keys(attestedNodes.eventsBeforeFirst)), "expected events before tracking mismatch") + require.ElementsMatch(t, tt.expectedEventsBeforeFirst, slices.Collect(maps.Keys(attestedNodes.eventsBeforeFirst)), "expected events before tracking mismatch") + require.ElementsMatch(t, tt.expectedFetches, slices.Collect[string](maps.Keys(attestedNodes.fetchNodes)), "expected fetches mismatch") + + require.Zero(t, scenario.hook.Entries) + }) + } +} + +func TestSelectedPolledNodeEvents(t *testing.T) { + for _, tt := range []struct { + name string + setup *nodeScenarioSetup + + polling []uint + events []*datastore.AttestedNodeEvent + expectedFetches []string + }{ + // polling is based on the eventTracker, not on events in the database + { + name: "nothing after to poll, no action taken, no events", + events: []*datastore.AttestedNodeEvent{}, + }, + { + name: "nothing to poll, no action take, one event", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 100, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + }, + }, + { + name: "nothing to poll, no action taken, five events", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 102, + SpiffeID: "spiffe://example.org/test_node_2", + }, + &datastore.AttestedNodeEvent{ + EventID: 103, + SpiffeID: "spiffe://example.org/test_node_3", + }, + &datastore.AttestedNodeEvent{ + EventID: 104, + SpiffeID: "spiffe://example.org/test_node_4", + }, + &datastore.AttestedNodeEvent{ + EventID: 105, + SpiffeID: "spiffe://example.org/test_node_5", + }, + }, + }, + }, + { + name: "polling one item, not found", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 102, + SpiffeID: "spiffe://example.org/test_node_2", + }, + &datastore.AttestedNodeEvent{ + EventID: 104, + SpiffeID: "spiffe://example.org/test_node_4", + }, + &datastore.AttestedNodeEvent{ + EventID: 105, + SpiffeID: "spiffe://example.org/test_node_5", + }, + }, + }, + polling: []uint{103}, + }, + { + name: "polling five items, not found", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 107, + SpiffeID: "spiffe://example.org/test_node_7", + }, + }, + }, + polling: []uint{102, 103, 104, 105, 106}, + }, + { + name: "polling one item, found", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 102, + SpiffeID: "spiffe://example.org/test_node_2", + }, + &datastore.AttestedNodeEvent{ + EventID: 103, + SpiffeID: "spiffe://example.org/test_node_3", + }, + }, + }, + polling: []uint{102}, + + expectedFetches: []string{ + "spiffe://example.org/test_node_2", + }, + }, + { + name: "polling five items, two found", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 103, + SpiffeID: "spiffe://example.org/test_node_3", + }, + &datastore.AttestedNodeEvent{ + EventID: 106, + SpiffeID: "spiffe://example.org/test_node_6", + }, + &datastore.AttestedNodeEvent{ + EventID: 107, + SpiffeID: "spiffe://example.org/test_node_7", + }, + }, + }, + polling: []uint{102, 103, 104, 105, 106}, + + expectedFetches: []string{ + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_6", + }, + }, + { + name: "polling five items, five found", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 102, + SpiffeID: "spiffe://example.org/test_node_2", + }, + &datastore.AttestedNodeEvent{ + EventID: 103, + SpiffeID: "spiffe://example.org/test_node_3", + }, + &datastore.AttestedNodeEvent{ + EventID: 104, + SpiffeID: "spiffe://example.org/test_node_4", + }, + &datastore.AttestedNodeEvent{ + EventID: 105, + SpiffeID: "spiffe://example.org/test_node_5", + }, + &datastore.AttestedNodeEvent{ + EventID: 106, + SpiffeID: "spiffe://example.org/test_node_6", + }, + &datastore.AttestedNodeEvent{ + EventID: 107, + SpiffeID: "spiffe://example.org/test_node_7", + }, + }, + }, + polling: []uint{102, 103, 104, 105, 106}, + + expectedFetches: []string{ + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + "spiffe://example.org/test_node_6", + }, + }, + } { + tt := tt + t.Run(tt.name, func(t *testing.T) { + scenario := NewNodeScenario(t, tt.setup) + attestedNodes, err := scenario.buildAttestedNodesCache() require.NoError(t, err) - _, err = ds.CreateAttestedNode(ctx, &common.AttestedNode{ - SpiffeId: agentID.String(), - CertNotAfter: time.Now().Add(5 * time.Hour).Unix(), - }) + // initialize the event tracker + for _, event := range tt.polling { + attestedNodes.eventTracker.StartTracking(event) + } + // poll the events + attestedNodes.selectPolledEvents(scenario.ctx) + + require.ElementsMatch(t, tt.expectedFetches, slices.Collect(maps.Keys(attestedNodes.fetchNodes))) + require.Zero(t, scenario.hook.Entries) + }) + } +} + +func TestScanForNewNodeEvents(t *testing.T) { + for _, tt := range []struct { + name string + setup *nodeScenarioSetup + + newEvents []*datastore.AttestedNodeEvent + + expectedTrackedEvents []uint + expectedFetches []string + }{ + { + name: "no new events, no first event", + + expectedTrackedEvents: []uint{}, + expectedFetches: []string{}, + }, + { + name: "no new event, with first event", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + }, + + expectedTrackedEvents: []uint{}, + expectedFetches: []string{}, + }, + { + name: "one new event", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + }, + newEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 102, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + + expectedTrackedEvents: []uint{}, + expectedFetches: []string{ + "spiffe://example.org/test_node_1", + }, + }, + { + name: "one new event, skipping an event", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + }, + newEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 103, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + + expectedTrackedEvents: []uint{102}, + expectedFetches: []string{ + "spiffe://example.org/test_node_1", + }, + }, + { + name: "two new events, same attested node", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + }, + newEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 102, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 103, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + + expectedTrackedEvents: []uint{}, + expectedFetches: []string{ + "spiffe://example.org/test_node_1", + }, + }, + { + name: "two new events, different attested nodes", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + }, + newEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 102, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 103, + SpiffeID: "spiffe://example.org/test_node_2", + }, + }, + + expectedTrackedEvents: []uint{}, + expectedFetches: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + }, + }, + { + name: "two new events, with a skipped event", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + }, + newEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 102, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 104, + SpiffeID: "spiffe://example.org/test_node_2", + }, + }, + + expectedTrackedEvents: []uint{103}, + expectedFetches: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + }, + }, + { + name: "two new events, with three skipped events", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + }, + }, + newEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 102, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 106, + SpiffeID: "spiffe://example.org/test_node_2", + }, + }, + + expectedTrackedEvents: []uint{103, 104, 105}, + expectedFetches: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + }, + }, + { + name: "five events, four new events, two skip regions", + setup: &nodeScenarioSetup{ + attestedNodeEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 101, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 102, + SpiffeID: "spiffe://example.org/test_node_2", + }, + &datastore.AttestedNodeEvent{ + EventID: 103, + SpiffeID: "spiffe://example.org/test_node_3", + }, + &datastore.AttestedNodeEvent{ + EventID: 104, + SpiffeID: "spiffe://example.org/test_node_4", + }, + &datastore.AttestedNodeEvent{ + EventID: 105, + SpiffeID: "spiffe://example.org/test_node_5", + }, + }, + }, + newEvents: []*datastore.AttestedNodeEvent{ + &datastore.AttestedNodeEvent{ + EventID: 108, + SpiffeID: "spiffe://example.org/test_node_1", + }, + &datastore.AttestedNodeEvent{ + EventID: 109, + SpiffeID: "spiffe://example.org/test_node_2", + }, + &datastore.AttestedNodeEvent{ + EventID: 110, + SpiffeID: "spiffe://example.org/test_node_2", + }, + &datastore.AttestedNodeEvent{ + EventID: 112, + SpiffeID: "spiffe://example.org/test_node_11", + }, + }, + + expectedTrackedEvents: []uint{106, 107, 111}, + expectedFetches: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_11", + }, + }, + } { + tt := tt + t.Run(tt.name, func(t *testing.T) { + scenario := NewNodeScenario(t, tt.setup) + attestedNodes, err := scenario.buildAttestedNodesCache() require.NoError(t, err) - for _, err = range tt.errs { - ds.AppendNextError(err) + for _, newEvent := range tt.newEvents { + err = scenario.ds.CreateAttestedNodeEventForTesting(scenario.ctx, newEvent) + require.NoError(t, err, "error while setting up test") } + err = attestedNodes.scanForNewEvents(scenario.ctx) + require.NoError(t, err, "error while running test") - err = attestedNodes.updateCache(ctx) - if len(tt.errs) > 0 { - assert.EqualError(t, err, tt.errs[len(tt.errs)-1].Error()) - } else { - assert.NoError(t, err) + require.ElementsMatch(t, tt.expectedTrackedEvents, slices.Collect(maps.Keys(attestedNodes.eventTracker.events))) + require.ElementsMatch(t, tt.expectedFetches, slices.Collect(maps.Keys(attestedNodes.fetchNodes))) + require.Zero(t, scenario.hook.Entries) + }) + } +} + +func TestUpdateAttestedNodesCache(t *testing.T) { + for _, tt := range []struct { + name string + setup *nodeScenarioSetup + createAttestedNodes []*common.AttestedNode // Nodes created after setup + deleteAttestedNodes []string // Nodes delted after setup + fetchNodes []string + + expectedAuthorizedEntries []string + }{ + { + name: "empty cache, no fetch nodes", + fetchNodes: []string{}, + + expectedAuthorizedEntries: []string{}, + }, + { + name: "empty cache, fetch one node, as a new entry", + createAttestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + fetchNodes: []string{ + "spiffe://example.org/test_node_3", + }, + + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_3", + }, + }, + { + name: "empty cache, fetch one node, as a delete", + fetchNodes: []string{ + "spiffe://example.org/test_node_3", + }, + }, + { + name: "empty cache, fetch five nodes, all new entries", + createAttestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_1", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_2", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_4", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_5", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + fetchNodes: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + }, + + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + }, + }, + { + name: "empty cache, fetch five nodes, three new and two deletes", + createAttestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_1", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_4", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + fetchNodes: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + }, + + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + }, + }, + { + name: "empty cache, fetch five nodes, all deletes", + fetchNodes: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + }, + + expectedAuthorizedEntries: []string{}, + }, + { + name: "one node in cache, no fetch nodes", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + }, + + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_3", + }, + }, + { + name: "one node in cache, fetch one node, as new entry", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + }, + createAttestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_4", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + fetchNodes: []string{ + "spiffe://example.org/test_node_4", + }, + + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + }, + }, + { + name: "one node in cache, fetch one node, as an update", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + }, + fetchNodes: []string{ + "spiffe://example.org/test_node_3", + }, + + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_3", + }, + }, + { + name: "one node in cache, fetch one node, as a delete", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + }, + deleteAttestedNodes: []string{ + "spiffe://example.org/test_node_3", + }, + fetchNodes: []string{ + "spiffe://example.org/test_node_3", + }, + + expectedAuthorizedEntries: []string{}, + }, + { + name: "one node in cache, fetch five nodes, all new entries", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + }, + createAttestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_1", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_2", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_4", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_5", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_6", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + fetchNodes: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + "spiffe://example.org/test_node_6", + }, + + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + "spiffe://example.org/test_node_6", + }, + }, + { + name: "one node in cache, fetch five nodes, four new entries and one update", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + }, + createAttestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_1", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_2", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_4", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_5", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + fetchNodes: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + }, + + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + }, + }, + { + name: "one node in cache, fetch five nodes, two new and three deletes", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + }, + createAttestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_1", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + { + SpiffeId: "spiffe://example.org/test_node_2", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + deleteAttestedNodes: []string{ + "spiffe://example.org/test_node_3", + }, + fetchNodes: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + }, + + expectedAuthorizedEntries: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + }, + }, + { + name: "one node in cache, fetch five nodes, all deletes", + setup: &nodeScenarioSetup{ + attestedNodes: []*common.AttestedNode{ + { + SpiffeId: "spiffe://example.org/test_node_3", + CertNotAfter: time.Now().Add(time.Duration(240) * time.Hour).Unix(), + }, + }, + }, + deleteAttestedNodes: []string{ + "spiffe://example.org/test_node_3", + }, + fetchNodes: []string{ + "spiffe://example.org/test_node_1", + "spiffe://example.org/test_node_2", + "spiffe://example.org/test_node_3", + "spiffe://example.org/test_node_4", + "spiffe://example.org/test_node_5", + }, + + expectedAuthorizedEntries: []string{}, + }, + } { + tt := tt + t.Run(tt.name, func(t *testing.T) { + scenario := NewNodeScenario(t, tt.setup) + attestedNodes, err := scenario.buildAttestedNodesCache() + require.NoError(t, err) + + for _, attestedNode := range tt.createAttestedNodes { + _, err = scenario.ds.CreateAttestedNode(scenario.ctx, attestedNode) + require.NoError(t, err, "error while setting up test") } + for _, attestedNode := range tt.deleteAttestedNodes { + _, err = scenario.ds.DeleteAttestedNode(scenario.ctx, attestedNode) + require.NoError(t, err, "error while setting up test") + } + for _, fetchNode := range tt.fetchNodes { + attestedNodes.fetchNodes[fetchNode] = struct{}{} + } + // clear out the events, to prove updates are not event based + err = scenario.ds.PruneAttestedNodeEvents(scenario.ctx, time.Duration(-5)*time.Hour) + require.NoError(t, err, "error while setting up test") + + err = attestedNodes.updateCachedNodes(scenario.ctx) + require.NoError(t, err) - assert.Equal(t, tt.expectedLastAttestedNodeEventID, attestedNodes.lastEventID) + cacheStats := attestedNodes.cache.Stats() + require.Equal(t, len(tt.expectedAuthorizedEntries), cacheStats.AgentsByID, "wrong number of agents by ID") - if tt.expectMetrics != nil { - assert.Subset(t, metrics.AllMetrics(), tt.expectMetrics) + // for now, the only way to ensure the desired agent ids are prsent is + // to remove the desired ids and check the count it zero. + for _, expectedAuthorizedId := range tt.expectedAuthorizedEntries { + attestedNodes.cache.RemoveAgent(expectedAuthorizedId) } + cacheStats = attestedNodes.cache.Stats() + require.Equal(t, 0, cacheStats.AgentsByID, "clearing all expected agent ids didn't clear cache") }) } } -func TestAttestedNodesCacheMissedEventNotFound(t *testing.T) { - ctx := context.Background() - log, hook := test.NewNullLogger() - log.SetLevel(logrus.DebugLevel) - clk := clock.NewMock(t) - ds := fakedatastore.New(t) - cache := authorizedentries.NewCache(clk) - metrics := fakemetrics.New() - - attestedNodes, err := buildAttestedNodesCache(ctx, log, metrics, ds, clk, cache, defaultSQLTransactionTimeout) - require.NoError(t, err) - require.NotNil(t, attestedNodes) +// utility functions +type scenario struct { + ctx context.Context + log *logrus.Logger + hook *test.Hook + clk *clock.Mock + cache *authorizedentries.Cache + metrics *fakemetrics.FakeMetrics + ds *fakedatastore.DataStore +} - attestedNodes.missedEvents[1] = clk.Now() - attestedNodes.replayMissedEvents(ctx) - require.Zero(t, hook.Entries) +type nodeScenarioSetup struct { + attestedNodes []*common.AttestedNode + attestedNodeEvents []*datastore.AttestedNodeEvent + err error } -func TestAttestedNodesSavesMissedStartupEvents(t *testing.T) { +func NewNodeScenario(t *testing.T, setup *nodeScenarioSetup) *scenario { + t.Helper() ctx := context.Background() log, hook := test.NewNullLogger() log.SetLevel(logrus.DebugLevel) clk := clock.NewMock(t) - ds := fakedatastore.New(t) cache := authorizedentries.NewCache(clk) metrics := fakemetrics.New() + ds := fakedatastore.New(t) + + if setup == nil { + setup = &nodeScenarioSetup{} + } + + var err error + // initialize the database + for _, attestedNode := range setup.attestedNodes { + _, err = ds.CreateAttestedNode(ctx, attestedNode) + require.NoError(t, err, "error while setting up test") + } + // prune autocreated node events, to test the event logic in more scenarios + // than possible with autocreated node events. + err = ds.PruneAttestedNodeEvents(ctx, time.Duration(-5)*time.Hour) + require.NoError(t, err, "error while setting up test") + // and then add back the specified node events + for _, event := range setup.attestedNodeEvents { + err = ds.CreateAttestedNodeEventForTesting(ctx, event) + require.NoError(t, err, "error while setting up test") + } + // inject db error for buildAttestedNodesCache call + if setup.err != nil { + ds.AppendNextError(setup.err) + } - err := ds.CreateAttestedNodeEventForTesting(ctx, &datastore.AttestedNodeEvent{ - EventID: 3, - SpiffeID: "test", - }) - require.NoError(t, err) - - attestedNodes, err := buildAttestedNodesCache(ctx, log, metrics, ds, clk, cache, defaultSQLTransactionTimeout) - require.NoError(t, err) - require.NotNil(t, attestedNodes) - require.Equal(t, uint(3), attestedNodes.firstEventID) - - err = ds.CreateAttestedNodeEventForTesting(ctx, &datastore.AttestedNodeEvent{ - EventID: 2, - SpiffeID: "test", - }) - require.NoError(t, err) - - err = attestedNodes.missedStartupEvents(ctx) - require.NoError(t, err) - - // Make sure no dupliate calls are made - ds.AppendNextError(nil) - ds.AppendNextError(errors.New("Duplicate call")) - err = attestedNodes.missedStartupEvents(ctx) - require.NoError(t, err) - require.Equal(t, 0, len(hook.AllEntries())) + return &scenario{ + ctx: ctx, + log: log, + hook: hook, + clk: clk, + cache: cache, + metrics: metrics, + ds: ds, + } +} + +func (s *scenario) buildAttestedNodesCache() (*attestedNodes, error) { + attestedNodes, err := buildAttestedNodesCache(s.ctx, s.log, s.metrics, s.ds, s.clk, s.cache, defaultCacheReloadInterval, defaultSQLTransactionTimeout) + if attestedNodes != nil { + // clear out the fetches + for node, _ := range attestedNodes.fetchNodes { + delete(attestedNodes.fetchNodes, node) + } + } + return attestedNodes, err } diff --git a/pkg/server/endpoints/authorized_entryfetcher_registration_entries.go b/pkg/server/endpoints/authorized_entryfetcher_registration_entries.go index 570cbad008..3fd9914c6d 100644 --- a/pkg/server/endpoints/authorized_entryfetcher_registration_entries.go +++ b/pkg/server/endpoints/authorized_entryfetcher_registration_entries.go @@ -3,7 +3,6 @@ package endpoints import ( "context" "fmt" - "sync" "time" "github.com/andres-erbsen/clock" @@ -23,47 +22,129 @@ type registrationEntries struct { ds datastore.DataStore log logrus.FieldLogger metrics telemetry.Metrics - mu sync.RWMutex - - firstEventID uint - firstEventTime time.Time - lastEventID uint - missedEvents map[uint]time.Time - seenMissedStartupEvents map[uint]struct{} - sqlTransactionTimeout time.Duration + + eventsBeforeFirst map[uint]struct{} + + firstEvent uint + firstEventTime time.Time + lastEvent uint + + eventTracker *eventTracker + sqlTransactionTimeout time.Duration + + fetchEntries map[string]struct{} + + // metrics change detection + skippedEntryEvents int + lastCacheStats authorizedentries.CacheStats } -// buildRegistrationEntriesCache Fetches all registration entries and adds them to the cache -func buildRegistrationEntriesCache(ctx context.Context, log logrus.FieldLogger, metrics telemetry.Metrics, ds datastore.DataStore, clk clock.Clock, cache *authorizedentries.Cache, pageSize int32, sqlTransactionTimeout time.Duration) (*registrationEntries, error) { - resp, err := ds.ListRegistrationEntriesEvents(ctx, &datastore.ListRegistrationEntriesEventsRequest{}) +func (a *registrationEntries) captureChangedEntries(ctx context.Context) error { + // first, reset the entries we might fetch. + a.fetchEntries = make(map[string]struct{}) + + if err := a.searchBeforeFirstEvent(ctx); err != nil { + return err + } + a.selectPolledEvents(ctx) + if err := a.scanForNewEvents(ctx); err != nil { + return err + } + + return nil +} + +func (a *registrationEntries) searchBeforeFirstEvent(ctx context.Context) error { + // First event detected, and startup was less than a transaction timout away. + if !a.firstEventTime.IsZero() && a.clk.Now().Sub(a.firstEventTime) <= a.sqlTransactionTimeout { + resp, err := a.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{ + LessThanEventID: a.firstEvent, + }) + if err != nil { + return err + } + for _, event := range resp.Events { + // if we have seen it before, don't reload it. + if _, seen := a.eventsBeforeFirst[event.EventID]; !seen { + a.fetchEntries[event.EntryID] = struct{}{} + a.eventsBeforeFirst[event.EventID] = struct{}{} + } + } + return nil + } + + // zero out unused event tracker + if len(a.eventsBeforeFirst) != 0 { + a.eventsBeforeFirst = make(map[uint]struct{}) + } + + return nil +} + +func (a *registrationEntries) selectPolledEvents(ctx context.Context) { + // check if the polled events have appeared out-of-order + selectedEvents := a.eventTracker.SelectEvents() + for _, eventID := range selectedEvents { + log := a.log.WithField(telemetry.EventID, eventID) + event, err := a.ds.FetchRegistrationEntryEvent(ctx, eventID) + + switch status.Code(err) { + case codes.OK: + case codes.NotFound: + continue + default: + log.WithError(err).Errorf("Failed to fetch info about skipped event %d", eventID) + continue + } + + a.fetchEntries[event.EntryID] = struct{}{} + a.eventTracker.StopTracking(eventID) + } + a.eventTracker.FreeEvents(selectedEvents) +} + +func (a *registrationEntries) scanForNewEvents(ctx context.Context) error { + // If we haven't seen an event, scan for all events; otherwise, scan from the last event. + var resp *datastore.ListRegistrationEntryEventsResponse + var err error + if a.firstEventTime.IsZero() { + resp, err = a.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{}) + } else { + resp, err = a.ds.ListRegistrationEntryEvents(ctx, &datastore.ListRegistrationEntryEventsRequest{ + GreaterThanEventID: a.lastEvent, + }) + } if err != nil { - return nil, err + return err } - // Gather any events that may have been skipped during restart - var firstEventID uint - var firstEventTime time.Time - var lastEventID uint - missedEvents := make(map[uint]time.Time) for _, event := range resp.Events { - now := clk.Now() - if firstEventTime.IsZero() { - firstEventID = event.EventID - firstEventTime = now - } else { - // After getting the first event, search for any gaps in the event stream, from the first event to the last event. - // During each cache refresh cycle, we will check if any of these missed events get populated. - for i := lastEventID + 1; i < event.EventID; i++ { - missedEvents[i] = clk.Now() - } + // event time determines if we have seen the first event. + if a.firstEventTime.IsZero() { + a.firstEvent = event.EventID + a.lastEvent = event.EventID + a.fetchEntries[event.EntryID] = struct{}{} + a.firstEventTime = a.clk.Now() + continue + } + + // track any skipped event ids, should they appear later. + for skipped := a.lastEvent + 1; skipped < event.EventID; skipped++ { + a.eventTracker.StartTracking(skipped) } - lastEventID = event.EventID + + // every event adds its entry to the entry fetch list. + a.fetchEntries[event.EntryID] = struct{}{} + a.lastEvent = event.EventID } + return nil +} +func (a *registrationEntries) loadCache(ctx context.Context, pageSize int32) error { // Build the cache var token string for { - resp, err := ds.ListRegistrationEntries(ctx, &datastore.ListRegistrationEntriesRequest{ + resp, err := a.ds.ListRegistrationEntries(ctx, &datastore.ListRegistrationEntriesRequest{ DataConsistency: datastore.RequireCurrent, // preliminary loading should not be done via read-replicas Pagination: &datastore.Pagination{ Token: token, @@ -71,7 +152,7 @@ func buildRegistrationEntriesCache(ctx context.Context, log logrus.FieldLogger, }, }) if err != nil { - return nil, fmt.Errorf("failed to list registration entries: %w", err) + return fmt.Errorf("failed to list registration entries: %w", err) } token = resp.Pagination.Token @@ -81,183 +162,115 @@ func buildRegistrationEntriesCache(ctx context.Context, log logrus.FieldLogger, entries, err := api.RegistrationEntriesToProto(resp.Entries) if err != nil { - return nil, fmt.Errorf("failed to convert registration entries: %w", err) + return fmt.Errorf("failed to convert registration entries: %w", err) } for _, entry := range entries { - cache.UpdateEntry(entry) + a.cache.UpdateEntry(entry) } } - - return ®istrationEntries{ - cache: cache, - clk: clk, - ds: ds, - firstEventID: firstEventID, - firstEventTime: firstEventTime, - log: log, - metrics: metrics, - lastEventID: lastEventID, - missedEvents: missedEvents, - seenMissedStartupEvents: make(map[uint]struct{}), - sqlTransactionTimeout: sqlTransactionTimeout, - }, nil + return nil } -// updateCache Fetches all the events since the last time this function was running and updates -// the cache with all the changes. -func (a *registrationEntries) updateCache(ctx context.Context) error { - // Process events skipped over previously - if err := a.missedStartupEvents(ctx); err != nil { - a.log.WithError(err).Error("Unable to process missed startup events") +// buildRegistrationEntriesCache Fetches all registration entries and adds them to the cache +func buildRegistrationEntriesCache(ctx context.Context, log logrus.FieldLogger, metrics telemetry.Metrics, ds datastore.DataStore, clk clock.Clock, cache *authorizedentries.Cache, pageSize int32, cacheReloadInterval, sqlTransactionTimeout time.Duration) (*registrationEntries, error) { + pollPeriods := PollPeriods(cacheReloadInterval, sqlTransactionTimeout) + + registrationEntries := ®istrationEntries{ + cache: cache, + clk: clk, + ds: ds, + log: log, + metrics: metrics, + sqlTransactionTimeout: sqlTransactionTimeout, + + eventsBeforeFirst: make(map[uint]struct{}), + fetchEntries: make(map[string]struct{}), + + eventTracker: NewEventTracker(pollPeriods), + + skippedEntryEvents: -1, + lastCacheStats: authorizedentries.CacheStats{ + AliasesByEntryID: -1, + AliasesBySelector: -1, + EntriesByEntryID: -1, + EntriesByParentID: -1, + }, } - a.replayMissedEvents(ctx) - req := &datastore.ListRegistrationEntriesEventsRequest{ - GreaterThanEventID: a.lastEventID, - } - resp, err := a.ds.ListRegistrationEntriesEvents(ctx, req) - if err != nil { - return err + if err := registrationEntries.loadCache(ctx, pageSize); err != nil { + return nil, err } - - seenMap := map[string]struct{}{} - for _, event := range resp.Events { - // If there is a gap in the event stream, log the missed events for later processing. - // For example if the current event ID is 6 and the previous one was 3, events 4 and 5 - // were skipped over and need to be queued in case they show up later. - // This can happen when a long running transaction allocates an event ID but a shorter transaction - // comes in after, allocates and commits the ID first. If a read comes in at this moment, the event id for - // the longer running transaction will be skipped over. - if !a.firstEventTime.IsZero() { - for i := a.lastEventID + 1; i < event.EventID; i++ { - a.log.WithField(telemetry.EventID, i).Info("Detected skipped registration entry event") - a.mu.Lock() - a.missedEvents[i] = a.clk.Now() - a.mu.Unlock() - } - } - - // Skip fetching entries we've already fetched this call - if _, seen := seenMap[event.EntryID]; seen { - a.lastEventID = event.EventID - continue - } - seenMap[event.EntryID] = struct{}{} - - // Update the cache - if err := a.updateCacheEntry(ctx, event.EntryID); err != nil { - return err - } - - if a.firstEventTime.IsZero() { - a.firstEventID = event.EventID - a.firstEventTime = a.clk.Now() - } - a.lastEventID = event.EventID + if err := registrationEntries.updateCache(ctx); err != nil { + return nil, err } - // These two should be the same value but it's valuable to have them both be emitted for incident triage. - server_telemetry.SetNodeAliasesByEntryIDCacheCountGauge(a.metrics, a.cache.Stats().AliasesByEntryID) - server_telemetry.SetNodeAliasesBySelectorCacheCountGauge(a.metrics, a.cache.Stats().AliasesBySelector) - - // These two should be the same value but it's valuable to have them both be emitted for incident triage. - server_telemetry.SetEntriesByEntryIDCacheCountGauge(a.metrics, a.cache.Stats().EntriesByEntryID) - server_telemetry.SetEntriesByParentIDCacheCountGauge(a.metrics, a.cache.Stats().EntriesByParentID) - - return nil + return registrationEntries, nil } -// missedStartupEvents will check for any events come in with an ID less than the first event ID we receive. -// For example if the first event ID we receive is 3, this function will check for any IDs less than that. -// If event ID 2 comes in later on, due to a long running transaction, this function will update the cache -// with the information from this event. This function will run until time equal to sqlTransactionTimeout has elapsed after startup. -func (a *registrationEntries) missedStartupEvents(ctx context.Context) error { - if a.firstEventTime.IsZero() || a.clk.Now().Sub(a.firstEventTime) > a.sqlTransactionTimeout { - return nil - } - - req := &datastore.ListRegistrationEntriesEventsRequest{ - LessThanEventID: a.firstEventID, - } - resp, err := a.ds.ListRegistrationEntriesEvents(ctx, req) - if err != nil { +// updateCache Fetches all the events since the last time this function was running and updates +// the cache with all the changes. +func (a *registrationEntries) updateCache(ctx context.Context) error { + if err := a.captureChangedEntries(ctx); err != nil { return err } - - for _, event := range resp.Events { - if _, seen := a.seenMissedStartupEvents[event.EventID]; !seen { - if err := a.updateCacheEntry(ctx, event.EntryID); err != nil { - a.log.WithError(err).Error("Failed to process missed startup event") - continue - } - a.seenMissedStartupEvents[event.EventID] = struct{}{} - } + if err := a.updateCachedEntries(ctx); err != nil { + return err } + a.emitMetrics() return nil } -// replayMissedEvents Processes events that have been skipped over. Events can come out of order from -// SQL. This function processes events that came in later than expected. -func (a *registrationEntries) replayMissedEvents(ctx context.Context) { - a.mu.Lock() - defer a.mu.Unlock() - - for eventID := range a.missedEvents { - log := a.log.WithField(telemetry.EventID, eventID) +// updateCacheEntry update/deletes/creates an individual registration entry in the cache. +func (a *registrationEntries) updateCachedEntries(ctx context.Context) error { + for entryId, _ := range a.fetchEntries { + commonEntry, err := a.ds.FetchRegistrationEntry(ctx, entryId) + if err != nil { + return err + } - event, err := a.ds.FetchRegistrationEntryEvent(ctx, eventID) - switch status.Code(err) { - case codes.OK: - case codes.NotFound: - continue - default: - log.WithError(err).Error("Failed to fetch info about missed event") + if commonEntry == nil { + a.cache.RemoveEntry(entryId) + delete(a.fetchEntries, entryId) continue } - if err := a.updateCacheEntry(ctx, event.EntryID); err != nil { - log.WithError(err).Error("Failed to process missed event") + entry, err := api.RegistrationEntryToProto(commonEntry) + if err != nil { + a.cache.RemoveEntry(entryId) + delete(a.fetchEntries, entryId) + a.log.WithField(telemetry.RegistrationID, entryId).Warn("Removed malformed registration entry from cache") continue } - delete(a.missedEvents, eventID) + a.cache.UpdateEntry(entry) + delete(a.fetchEntries, entryId) } - server_telemetry.SetSkippedEntryEventIDsCacheCountGauge(a.metrics, len(a.missedEvents)) + return nil } -// updateCacheEntry update/deletes/creates an individual registration entry in the cache. -func (a *registrationEntries) updateCacheEntry(ctx context.Context, entryID string) error { - commonEntry, err := a.ds.FetchRegistrationEntry(ctx, entryID) - if err != nil { - return err +func (a *registrationEntries) emitMetrics() { + if a.skippedEntryEvents != int(a.eventTracker.EventCount()) { + a.skippedEntryEvents = int(a.eventTracker.EventCount()) + server_telemetry.SetSkippedEntryEventIDsCacheCountGauge(a.metrics, a.skippedEntryEvents) } - if commonEntry == nil { - a.cache.RemoveEntry(entryID) - return nil + cacheStats := a.cache.Stats() + if a.lastCacheStats.AliasesByEntryID != cacheStats.AliasesByEntryID { + a.lastCacheStats.AliasesByEntryID = cacheStats.AliasesByEntryID + server_telemetry.SetNodeAliasesByEntryIDCacheCountGauge(a.metrics, a.lastCacheStats.AliasesByEntryID) } - - entry, err := api.RegistrationEntryToProto(commonEntry) - if err != nil { - a.cache.RemoveEntry(entryID) - a.log.WithField(telemetry.RegistrationID, entryID).Warn("Removed malformed registration entry from cache") - return nil + if a.lastCacheStats.AliasesBySelector != cacheStats.AliasesBySelector { + a.lastCacheStats.AliasesBySelector = cacheStats.AliasesBySelector + server_telemetry.SetNodeAliasesBySelectorCacheCountGauge(a.metrics, a.lastCacheStats.AliasesBySelector) } - - a.cache.UpdateEntry(entry) - return nil -} - -// prunedMissedEvents delete missed events that are older than the configured SQL transaction timeout time. -func (a *registrationEntries) pruneMissedEvents() { - a.mu.Lock() - defer a.mu.Unlock() - - for eventID, eventTime := range a.missedEvents { - if a.clk.Now().Sub(eventTime) > a.sqlTransactionTimeout { - delete(a.missedEvents, eventID) - } + if a.lastCacheStats.EntriesByEntryID != cacheStats.EntriesByEntryID { + a.lastCacheStats.EntriesByEntryID = cacheStats.EntriesByEntryID + server_telemetry.SetEntriesByEntryIDCacheCountGauge(a.metrics, a.lastCacheStats.EntriesByEntryID) + } + if a.lastCacheStats.EntriesByParentID != cacheStats.EntriesByParentID { + a.lastCacheStats.EntriesByParentID = cacheStats.EntriesByParentID + server_telemetry.SetEntriesByParentIDCacheCountGauge(a.metrics, a.lastCacheStats.EntriesByParentID) } } diff --git a/pkg/server/endpoints/authorized_entryfetcher_registration_entries_test.go b/pkg/server/endpoints/authorized_entryfetcher_registration_entries_test.go index 44b0b531ed..2bd21cf98a 100644 --- a/pkg/server/endpoints/authorized_entryfetcher_registration_entries_test.go +++ b/pkg/server/endpoints/authorized_entryfetcher_registration_entries_test.go @@ -3,14 +3,15 @@ package endpoints import ( "context" "errors" - "sort" - "strconv" + "maps" + "slices" + "strings" "testing" + "time" "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" - "github.com/spiffe/go-spiffe/v2/spiffeid" - "github.com/spiffe/spire/pkg/common/idutil" + "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/server/authorizedentries" "github.com/spiffe/spire/pkg/server/datastore" "github.com/spiffe/spire/proto/spire/common" @@ -20,137 +21,1899 @@ import ( "github.com/stretchr/testify/require" ) -func TestBuildRegistrationEntriesCache(t *testing.T) { - ctx := context.Background() - log, _ := test.NewNullLogger() - clk := clock.NewMock(t) - ds := fakedatastore.New(t) - - agentID, err := spiffeid.FromString("spiffe://example.org/myagent") - require.NoError(t, err) +var ( + nodeAliasesByEntryID = []string{telemetry.Entry, telemetry.NodeAliasesByEntryIDCache, telemetry.Count} + nodeAliasesBySelector = []string{telemetry.Entry, telemetry.NodeAliasesBySelectorCache, telemetry.Count} + entriesByEntryID = []string{telemetry.Entry, telemetry.EntriesByEntryIDCache, telemetry.Count} + entriesByParentID = []string{telemetry.Entry, telemetry.EntriesByParentIDCache, telemetry.Count} + skippedEntryEventID = []string{telemetry.Entry, telemetry.SkippedEntryEventIDs, telemetry.Count} - // Create registration entries - numEntries := 10 - for i := 0; i < numEntries; i++ { - _, err := ds.CreateRegistrationEntry(ctx, &common.RegistrationEntry{ - SpiffeId: "spiffe://example.org/workload" + strconv.Itoa(i), - ParentId: agentID.String(), + defaultRegistrationEntries = []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_2", Selectors: []*common.Selector{ - { - Type: "workload", - Value: "one", + {Type: "testjob", Value: "2"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + } + defaultRegistrationEntryEventsStartingAt60 = []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 60, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 61, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + } + defaultFirstEntryEvent = uint(60) + defaultLastEntryEvent = uint(61) + + NoEntryFetches = []string{} +) + +func TestLoadEntryCache(t *testing.T) { + for _, tt := range []struct { + name string + setup *entryScenarioSetup + + expectedError string + expectedRegistrationEntries []string + expectedGauges []expectedGauge + }{ + { + name: "initial load returns an error", + setup: &entryScenarioSetup{ + err: errors.New("any error, doesn't matter"), + }, + expectedError: "any error, doesn't matter", + }, + { + name: "loading nothing with a page size of zero raises an error", + setup: &entryScenarioSetup{ + pageSize: 0, + }, + expectedError: "cannot paginate with pagesize = 0", + }, + { + name: "initial load loads nothing", + setup: &entryScenarioSetup{ + pageSize: 1000, + }, + }, + { + name: "one registration entry with a page size of zero raises an error", + setup: &entryScenarioSetup{ + pageSize: 0, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "6837984a-bc44-462b-9ca6-5cd59be35066", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_1", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "1"}, + }, + }, }, }, + expectedError: "cannot paginate with pagesize = 0", + }, + { + name: "initial load loads one registration entry", + setup: &entryScenarioSetup{ + pageSize: 1000, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "6837984a-bc44-462b-9ca6-5cd59be35066", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_1", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "1"}, + }, + }, + }, + }, + expectedRegistrationEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + expectedGauges: []expectedGauge{ + expectedGauge{Key: skippedEntryEventID, Value: 0}, + expectedGauge{Key: nodeAliasesByEntryID, Value: 0}, + expectedGauge{Key: nodeAliasesBySelector, Value: 0}, + expectedGauge{Key: entriesByEntryID, Value: 1}, + expectedGauge{Key: entriesByParentID, Value: 1}, + }, + }, + { + name: "five registration entries with a page size of zero raises an error", + setup: &entryScenarioSetup{ + pageSize: 0, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "6837984a-bc44-462b-9ca6-5cd59be35066", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_1", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "1"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_2", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "2"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_4", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "4"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "354c16f4-4e61-4c17-8596-7baa7744d504", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_5", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "5"}, + }, + }, + }, + }, + expectedError: "cannot paginate with pagesize = 0", + }, + { + name: "initial load loads five registration entries", + setup: &entryScenarioSetup{ + pageSize: 1000, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "6837984a-bc44-462b-9ca6-5cd59be35066", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_1", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "1"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_2", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "2"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_4", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "4"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "354c16f4-4e61-4c17-8596-7baa7744d504", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_5", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "5"}, + }, + }, + }, + }, + expectedRegistrationEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + expectedGauges: []expectedGauge{ + expectedGauge{Key: skippedEntryEventID, Value: 0}, + expectedGauge{Key: nodeAliasesByEntryID, Value: 0}, + expectedGauge{Key: nodeAliasesBySelector, Value: 0}, + expectedGauge{Key: entriesByEntryID, Value: 5}, + expectedGauge{Key: entriesByParentID, Value: 5}, + }, + }, + { + name: "initial load loads five registration entries, in one page exact", + setup: &entryScenarioSetup{ + pageSize: 5, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "6837984a-bc44-462b-9ca6-5cd59be35066", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_1", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "1"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_2", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "2"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_4", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "4"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "354c16f4-4e61-4c17-8596-7baa7744d504", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_5", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "5"}, + }, + }, + }, + }, + expectedRegistrationEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + expectedGauges: []expectedGauge{ + expectedGauge{Key: skippedEntryEventID, Value: 0}, + expectedGauge{Key: nodeAliasesByEntryID, Value: 0}, + expectedGauge{Key: nodeAliasesBySelector, Value: 0}, + expectedGauge{Key: entriesByEntryID, Value: 5}, + expectedGauge{Key: entriesByParentID, Value: 5}, + }, + }, + { + name: "initial load loads five registration entries, in 2 pages", + setup: &entryScenarioSetup{ + pageSize: 3, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "6837984a-bc44-462b-9ca6-5cd59be35066", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_1", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "1"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_2", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "2"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_4", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "4"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "354c16f4-4e61-4c17-8596-7baa7744d504", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_5", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "5"}, + }, + }, + }, + }, + expectedRegistrationEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + expectedGauges: []expectedGauge{ + expectedGauge{Key: skippedEntryEventID, Value: 0}, + expectedGauge{Key: nodeAliasesByEntryID, Value: 0}, + expectedGauge{Key: nodeAliasesBySelector, Value: 0}, + expectedGauge{Key: entriesByEntryID, Value: 5}, + expectedGauge{Key: entriesByParentID, Value: 5}, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + scenario := NewEntryScenario(t, tt.setup) + registrationEntries, err := scenario.buildRegistrationEntriesCache() + + if tt.expectedError != "" { + t.Logf("expecting error: %s\n", tt.expectedError) + require.ErrorContains(t, err, tt.expectedError) + return + } + require.NoError(t, err) + + cacheStats := registrationEntries.cache.Stats() + t.Logf("%s: cache stats %+v\n", tt.name, cacheStats) + require.Equal(t, len(tt.expectedRegistrationEntries), cacheStats.EntriesByEntryID, + "wrong number of entries by ID") + + // for now, the only way to ensure the desired agent ids are prsent is + // to remove the desired ids and check the count it zero. + for _, expectedRegistrationEntry := range tt.expectedRegistrationEntries { + registrationEntries.cache.RemoveEntry(expectedRegistrationEntry) + } + cacheStats = registrationEntries.cache.Stats() + require.Equal(t, 0, cacheStats.EntriesByEntryID, + "clearing all expected entry ids didn't clear cache") + + var lastMetrics map[string]int = make(map[string]int) + for _, metricItem := range scenario.metrics.AllMetrics() { + if metricItem.Type == fakemetrics.SetGaugeType { + key := strings.Join(metricItem.Key, " ") + lastMetrics[key] = int(metricItem.Val) + t.Logf("metricItem: %+v\n", metricItem) + } + } + + for _, expectedGauge := range tt.expectedGauges { + key := strings.Join(expectedGauge.Key, " ") + value, exists := lastMetrics[key] + require.True(t, exists, "No metric value for %q", key) + require.Equal(t, expectedGauge.Value, value, "unexpected final metric value for %q", key) + } + + require.Zero(t, scenario.hook.Entries) }) - require.NoError(t, err) } +} +func TestSearchBeforeFirstEntryEvent(t *testing.T) { for _, tt := range []struct { - name string - pageSize int32 - err string + name string + setup *entryScenarioSetup + + waitToPoll time.Duration + eventsBeforeFirst []uint + polledEvents []*datastore.RegistrationEntryEvent + errors []error + + expectedError error + expectedEventsBeforeFirst []uint + expectedFetches []string }{ { - name: "Page size of 0", - pageSize: 0, - err: "cannot paginate with pagesize = 0", + name: "first event not loaded", + setup: &entryScenarioSetup{ + pageSize: 1024, + }, + + expectedEventsBeforeFirst: []uint{}, + expectedFetches: []string{}, + }, + { + name: "before first event arrived, after transaction timeout", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + + waitToPoll: time.Duration(2) * defaultSQLTransactionTimeout, + // even with new before first events, they shouldn't load + polledEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 58, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + + expectedEventsBeforeFirst: []uint{}, + expectedFetches: NoEntryFetches, + }, + { + name: "no before first events", + + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + polledEvents: []*datastore.RegistrationEntryEvent{}, + + expectedEventsBeforeFirst: []uint{}, + expectedFetches: []string{}, + }, + { + name: "new before first event", + + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + polledEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 58, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + + expectedEventsBeforeFirst: []uint{58}, + expectedFetches: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + }, }, { - name: "Page size of half the entries", - pageSize: int32(numEntries / 2), + name: "new after last event", + + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + polledEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 64, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + + expectedEventsBeforeFirst: []uint{}, + expectedFetches: []string{}, }, { - name: "Page size of all the entries", - pageSize: int32(numEntries), + name: "previously seen before first event", + + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + eventsBeforeFirst: []uint{58}, + polledEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 58, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + + expectedEventsBeforeFirst: []uint{58}, + expectedFetches: []string{}, }, { - name: "Page size of all the entries + 1", - pageSize: int32(numEntries + 1), + name: "previously seen before first event and after last event", + + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + eventsBeforeFirst: []uint{58}, + polledEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: defaultFirstEntryEvent - 2, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: defaultLastEntryEvent + 2, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + }, + + expectedEventsBeforeFirst: []uint{defaultFirstEntryEvent - 2}, + expectedFetches: []string{}, + }, + { + name: "five new before first events", + + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + polledEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 48, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 49, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 53, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + &datastore.RegistrationEntryEvent{ + EventID: 56, + EntryID: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + &datastore.RegistrationEntryEvent{ + EventID: 57, + EntryID: "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + expectedFetches: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + { + name: "five new before first events, one after last event", + + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + polledEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 48, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 49, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 53, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + &datastore.RegistrationEntryEvent{ + EventID: 56, + EntryID: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + &datastore.RegistrationEntryEvent{ + EventID: defaultLastEntryEvent + 1, + EntryID: "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56}, + expectedFetches: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + }, + { + name: "five before first events, two previously seen", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + + eventsBeforeFirst: []uint{48, 49}, + polledEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 48, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 49, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 53, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + &datastore.RegistrationEntryEvent{ + EventID: 56, + EntryID: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + &datastore.RegistrationEntryEvent{ + EventID: 57, + EntryID: "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + expectedFetches: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + { + name: "five before first events, two previously seen, one after last event", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + eventsBeforeFirst: []uint{48, 49}, + polledEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 48, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 49, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 53, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + &datastore.RegistrationEntryEvent{ + EventID: 56, + EntryID: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + &datastore.RegistrationEntryEvent{ + EventID: defaultLastEntryEvent + 1, + EntryID: "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56}, + expectedFetches: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + }, + { + name: "five before first events, five previously seen", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + + eventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + polledEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 48, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 49, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 53, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + &datastore.RegistrationEntryEvent{ + EventID: 56, + EntryID: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + &datastore.RegistrationEntryEvent{ + EventID: 57, + EntryID: "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + expectedFetches: []string{}, + }, + { + name: "five before first events, five previously seen, with after last event", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: defaultRegistrationEntries, + registrationEntryEvents: defaultRegistrationEntryEventsStartingAt60, + }, + + eventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + polledEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 48, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 49, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 53, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + &datastore.RegistrationEntryEvent{ + EventID: 56, + EntryID: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + &datastore.RegistrationEntryEvent{ + EventID: 57, + EntryID: "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + &datastore.RegistrationEntryEvent{ + EventID: defaultLastEntryEvent + 1, + EntryID: "aeb603b2-e1d1-4832-8809-60a1d14b42e0", + }, + }, + + expectedEventsBeforeFirst: []uint{48, 49, 53, 56, 57}, + expectedFetches: []string{}, }, } { tt := tt t.Run(tt.name, func(t *testing.T) { - cache := authorizedentries.NewCache(clk) - metrics := fakemetrics.New() + scenario := NewEntryScenario(t, tt.setup) + registrationEntries, err := scenario.buildRegistrationEntriesCache() - registrationEntries, err := buildRegistrationEntriesCache(ctx, log, metrics, ds, clk, cache, tt.pageSize, defaultSQLTransactionTimeout) - if tt.err != "" { - require.ErrorContains(t, err, tt.err) - return + require.NoError(t, err) + + if tt.waitToPoll == 0 { + scenario.clk.Add(time.Duration(1) * defaultCacheReloadInterval) + } else { + scenario.clk.Add(tt.waitToPoll) } - require.NoError(t, err) - require.False(t, registrationEntries.firstEventTime.IsZero()) + for _, event := range tt.eventsBeforeFirst { + registrationEntries.eventsBeforeFirst[event] = struct{}{} + } + + for _, event := range tt.polledEvents { + err = scenario.ds.CreateRegistrationEntryEventForTesting(scenario.ctx, event) + require.NoError(t, err, "error while setting up test") + } + + err = registrationEntries.searchBeforeFirstEvent(scenario.ctx) + require.NoError(t, err, "error while running the test") + + require.ElementsMatch(t, tt.expectedEventsBeforeFirst, slices.Collect(maps.Keys(registrationEntries.eventsBeforeFirst)), "expected events before tracking mismatch") + require.ElementsMatch(t, tt.expectedFetches, slices.Collect[string](maps.Keys(registrationEntries.fetchEntries)), "expected fetches mismatch") + + require.Zero(t, scenario.hook.Entries) + }) + } +} + +func TestSelectedPolledEntryEvents(t *testing.T) { + for _, tt := range []struct { + name string + setup *entryScenarioSetup + + polling []uint + events []*datastore.RegistrationEntryEvent + expectedFetches []string + }{ + // polling is based on the eventTracker, not on events in the database + { + name: "nothing after to poll, no action taken, no events", + events: []*datastore.RegistrationEntryEvent{}, + setup: &entryScenarioSetup{ + pageSize: 1024, + }, + }, + { + name: "nothing to poll, no action take, one event", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 100, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + }, + }, + { + name: "nothing to poll, no action taken, five events", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 102, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 103, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + &datastore.RegistrationEntryEvent{ + EventID: 104, + EntryID: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + &datastore.RegistrationEntryEvent{ + EventID: 105, + EntryID: "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + }, + }, + { + name: "polling one item, not found", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 102, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 104, + EntryID: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + &datastore.RegistrationEntryEvent{ + EventID: 105, + EntryID: "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + }, + polling: []uint{103}, + }, + { + name: "polling five items, not found", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 107, + EntryID: "c3f4ada0-3f8d-421e-b5d1-83aaee203d94", + }, + }, + }, + polling: []uint{102, 103, 104, 105, 106}, + }, + { + name: "polling one item, found", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 102, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 103, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + }, + }, + polling: []uint{102}, + + expectedFetches: []string{ + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + }, + { + name: "polling five items, two found", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 103, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + &datastore.RegistrationEntryEvent{ + EventID: 106, + EntryID: "aeb603b2-e1d1-4832-8809-60a1d14b42e0", + }, + &datastore.RegistrationEntryEvent{ + EventID: 107, + EntryID: "c3f4ada0-3f8d-421e-b5d1-83aaee203d94", + }, + }, + }, + polling: []uint{102, 103, 104, 105, 106}, - entries := cache.GetAuthorizedEntries(agentID) - require.Equal(t, numEntries, len(entries)) + expectedFetches: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "aeb603b2-e1d1-4832-8809-60a1d14b42e0", + }, + }, + { + name: "polling five items, five found", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 102, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 103, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + &datastore.RegistrationEntryEvent{ + EventID: 104, + EntryID: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + &datastore.RegistrationEntryEvent{ + EventID: 105, + EntryID: "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + &datastore.RegistrationEntryEvent{ + EventID: 106, + EntryID: "aeb603b2-e1d1-4832-8809-60a1d14b42e0", + }, + &datastore.RegistrationEntryEvent{ + EventID: 107, + EntryID: "c3f4ada0-3f8d-421e-b5d1-83aaee203d94", + }, + }, + }, + polling: []uint{102, 103, 104, 105, 106}, - spiffeIDs := make([]string, 0, numEntries) - for _, entry := range entries { - spiffeID, err := idutil.IDFromProto(entry.SpiffeId) - require.NoError(t, err) - spiffeIDs = append(spiffeIDs, spiffeID.String()) + expectedFetches: []string{ + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + "aeb603b2-e1d1-4832-8809-60a1d14b42e0", + }, + }, + } { + tt := tt + t.Run(tt.name, func(t *testing.T) { + scenario := NewEntryScenario(t, tt.setup) + registrationEntries, err := scenario.buildRegistrationEntriesCache() + require.NoError(t, err) + + // initialize the event tracker + for _, event := range tt.polling { + registrationEntries.eventTracker.StartTracking(event) } - sort.Strings(spiffeIDs) + // poll the events + registrationEntries.selectPolledEvents(scenario.ctx) - for i, spiffeID := range spiffeIDs { - require.Equal(t, "spiffe://example.org/workload"+strconv.Itoa(i), spiffeID) + require.ElementsMatch(t, tt.expectedFetches, slices.Collect(maps.Keys(registrationEntries.fetchEntries))) + require.Zero(t, scenario.hook.Entries) + }) + } +} + +func TestScanForNewEntryEvents(t *testing.T) { + for _, tt := range []struct { + name string + setup *entryScenarioSetup + + newEvents []*datastore.RegistrationEntryEvent + + expectedTrackedEvents []uint + expectedFetches []string + }{ + { + name: "no new events, no first event", + setup: &entryScenarioSetup{ + pageSize: 1024, + }, + + expectedTrackedEvents: []uint{}, + expectedFetches: []string{}, + }, + { + name: "no new event, with first event", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + }, + + expectedTrackedEvents: []uint{}, + expectedFetches: []string{}, + }, + { + name: "one new event", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + }, + newEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 102, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + + expectedTrackedEvents: []uint{}, + expectedFetches: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + { + name: "one new event, skipping an event", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + }, + newEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 103, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + + expectedTrackedEvents: []uint{102}, + expectedFetches: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + { + name: "two new events, same registered event", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + }, + newEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 102, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 103, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + + expectedTrackedEvents: []uint{}, + expectedFetches: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + { + name: "two new events, different attested entries", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + }, + newEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 102, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 103, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + }, + + expectedTrackedEvents: []uint{}, + expectedFetches: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + }, + { + name: "two new events, with a skipped event", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + }, + newEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 102, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 104, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + }, + + expectedTrackedEvents: []uint{103}, + expectedFetches: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + }, + { + name: "two new events, with three skipped events", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + }, + }, + newEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 102, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 106, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + }, + + expectedTrackedEvents: []uint{103, 104, 105}, + expectedFetches: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + }, + { + name: "five events, four new events, two skip regions", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntryEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 101, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 102, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 103, + EntryID: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + &datastore.RegistrationEntryEvent{ + EventID: 104, + EntryID: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + &datastore.RegistrationEntryEvent{ + EventID: 105, + EntryID: "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + }, + newEvents: []*datastore.RegistrationEntryEvent{ + &datastore.RegistrationEntryEvent{ + EventID: 108, + EntryID: "6837984a-bc44-462b-9ca6-5cd59be35066", + }, + &datastore.RegistrationEntryEvent{ + EventID: 109, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 110, + EntryID: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + &datastore.RegistrationEntryEvent{ + EventID: 112, + EntryID: "c3f4ada0-3f8d-421e-b5d1-83aaee203d94", + }, + }, + + expectedTrackedEvents: []uint{106, 107, 111}, + expectedFetches: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "c3f4ada0-3f8d-421e-b5d1-83aaee203d94", + }, + }, + } { + tt := tt + t.Run(tt.name, func(t *testing.T) { + scenario := NewEntryScenario(t, tt.setup) + attestedEntries, err := scenario.buildRegistrationEntriesCache() + require.NoError(t, err) + + for _, newEvent := range tt.newEvents { + err = scenario.ds.CreateRegistrationEntryEventForTesting(scenario.ctx, newEvent) + require.NoError(t, err, "error while setting up test") } + err = attestedEntries.scanForNewEvents(scenario.ctx) + require.NoError(t, err, "error while running the test") + + require.ElementsMatch(t, tt.expectedTrackedEvents, slices.Collect(maps.Keys(attestedEntries.eventTracker.events))) + require.ElementsMatch(t, tt.expectedFetches, slices.Collect(maps.Keys(attestedEntries.fetchEntries))) + require.Zero(t, scenario.hook.Entries) }) } } -func TestRegistrationEntriesCacheMissedEventNotFound(t *testing.T) { - ctx := context.Background() - log, hook := test.NewNullLogger() - log.SetLevel(logrus.DebugLevel) - clk := clock.NewMock(t) - ds := fakedatastore.New(t) - cache := authorizedentries.NewCache(clk) - metrics := fakemetrics.New() +func TestUpdateRegistrationEntriesCache(t *testing.T) { + for _, tt := range []struct { + name string + setup *entryScenarioSetup + createRegistrationEntries []*common.RegistrationEntry // Entries created after setup + deleteRegistrationEntries []string // Entries delted after setup + fetchEntries []string + + expectedAuthorizedEntries []string + }{ + { + name: "empty cache, no fetch entries", + setup: &entryScenarioSetup{ + pageSize: 1024, + }, + fetchEntries: []string{}, + + expectedAuthorizedEntries: []string{}, + }, + { + name: "empty cache, fetch one entry, as a new entry", + setup: &entryScenarioSetup{ + pageSize: 1024, + }, + createRegistrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + }, + fetchEntries: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + + expectedAuthorizedEntries: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + }, + { + name: "empty cache, fetch one entry, as a delete", + setup: &entryScenarioSetup{ + pageSize: 1024, + }, + fetchEntries: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + }, + { + name: "empty cache, fetch five entries, all new entries", + setup: &entryScenarioSetup{ + pageSize: 1024, + }, + createRegistrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "6837984a-bc44-462b-9ca6-5cd59be35066", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_1", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "1"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_2", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "2"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_4", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "4"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "354c16f4-4e61-4c17-8596-7baa7744d504", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_5", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "5"}, + }, + }, + }, + fetchEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, - registrationEntries, err := buildRegistrationEntriesCache(ctx, log, metrics, ds, clk, cache, buildCachePageSize, defaultSQLTransactionTimeout) - require.NoError(t, err) - require.NotNil(t, registrationEntries) + expectedAuthorizedEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + { + name: "empty cache, fetch five entries, three new and two deletes", + setup: &entryScenarioSetup{ + pageSize: 1024, + }, + createRegistrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "6837984a-bc44-462b-9ca6-5cd59be35066", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_1", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "1"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_4", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "4"}, + }, + }, + }, + fetchEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + + expectedAuthorizedEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + }, + { + name: "empty cache, fetch five entries, all deletes", + setup: &entryScenarioSetup{ + pageSize: 1024, + }, + fetchEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + + expectedAuthorizedEntries: []string{}, + }, + { + name: "one entry in cache, no fetch entries", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + }, + }, + + expectedAuthorizedEntries: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + }, + { + name: "one entry in cache, fetch one entry, as new entry", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + }, + }, + createRegistrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_4", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "4"}, + }, + }, + }, + fetchEntries: []string{ + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + + expectedAuthorizedEntries: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + }, + }, + { + name: "one entry in cache, fetch one entry, as an update", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + }, + }, + fetchEntries: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + + expectedAuthorizedEntries: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + }, + { + name: "one entry in cache, fetch one entry, as a delete", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + }, + }, + deleteRegistrationEntries: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + fetchEntries: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + + expectedAuthorizedEntries: []string{}, + }, + { + name: "one entry in cache, fetch five entries, all new entries", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + }, + }, + createRegistrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "6837984a-bc44-462b-9ca6-5cd59be35066", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_1", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "1"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_2", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "2"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_4", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "4"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "354c16f4-4e61-4c17-8596-7baa7744d504", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_5", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "5"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "aeb603b2-e1d1-4832-8809-60a1d14b42e0", + ParentId: "spiffe://example.org/test_node_3", + SpiffeId: "spiffe://example.org/test_job_6", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "6"}, + }, + }, + }, + fetchEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + "aeb603b2-e1d1-4832-8809-60a1d14b42e0", + }, + + expectedAuthorizedEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + "aeb603b2-e1d1-4832-8809-60a1d14b42e0", + }, + }, + { + name: "one entry in cache, fetch five entries, four new entries and one update", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + }, + }, + createRegistrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "6837984a-bc44-462b-9ca6-5cd59be35066", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_1", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "1"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_2", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "2"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "8cbf7d48-9d43-41ae-ab63-77d66891f948", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_4", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "4"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "354c16f4-4e61-4c17-8596-7baa7744d504", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_5", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "5"}, + }, + }, + }, + fetchEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + + expectedAuthorizedEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + }, + { + name: "one entry in cache, fetch five entries, two new and three deletes", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + }, + }, + createRegistrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "6837984a-bc44-462b-9ca6-5cd59be35066", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_1", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "1"}, + }, + }, + &common.RegistrationEntry{ + EntryId: "47c96201-a4b1-4116-97fe-8aa9c2440aad", + ParentId: "spiffe://example.org/test_node_1", + SpiffeId: "spiffe://example.org/test_job_2", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "2"}, + }, + }, + }, + deleteRegistrationEntries: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + fetchEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + + expectedAuthorizedEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + }, + }, + { + name: "one entry in cache, fetch five entries, all deletes", + setup: &entryScenarioSetup{ + pageSize: 1024, + registrationEntries: []*common.RegistrationEntry{ + &common.RegistrationEntry{ + EntryId: "1d78521b-cc92-47c1-85a5-28ce47f121f2", + ParentId: "spiffe://example.org/test_node_2", + SpiffeId: "spiffe://example.org/test_job_3", + Selectors: []*common.Selector{ + {Type: "testjob", Value: "3"}, + }, + }, + }, + }, + deleteRegistrationEntries: []string{ + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + }, + fetchEntries: []string{ + "6837984a-bc44-462b-9ca6-5cd59be35066", + "47c96201-a4b1-4116-97fe-8aa9c2440aad", + "1d78521b-cc92-47c1-85a5-28ce47f121f2", + "8cbf7d48-9d43-41ae-ab63-77d66891f948", + "354c16f4-4e61-4c17-8596-7baa7744d504", + }, + + expectedAuthorizedEntries: []string{}, + }, + } { + tt := tt + t.Run(tt.name, func(t *testing.T) { + scenario := NewEntryScenario(t, tt.setup) + registeredEntries, err := scenario.buildRegistrationEntriesCache() + require.NoError(t, err) + for _, registrationEntry := range tt.createRegistrationEntries { + _, err = scenario.ds.CreateRegistrationEntry(scenario.ctx, registrationEntry) + require.NoError(t, err, "error while setting up test") + } + for _, registrationEntry := range tt.deleteRegistrationEntries { + _, err = scenario.ds.DeleteRegistrationEntry(scenario.ctx, registrationEntry) + require.NoError(t, err, "error while setting up test") + } + for _, fetchEntry := range tt.fetchEntries { + registeredEntries.fetchEntries[fetchEntry] = struct{}{} + } + // clear out the events, to prove updates are not event based + err = scenario.ds.PruneRegistrationEntryEvents(scenario.ctx, time.Duration(-5)*time.Hour) + require.NoError(t, err, "error while running the test") + + err = registeredEntries.updateCachedEntries(scenario.ctx) + require.NoError(t, err) + + cacheStats := registeredEntries.cache.Stats() + require.Equal(t, len(tt.expectedAuthorizedEntries), cacheStats.EntriesByEntryID, "wrong number of registered entries by ID") + + // for now, the only way to ensure the desired agent ids are prsent is + // to remove the desired ids and check the count it zero. + for _, expectedAuthorizedId := range tt.expectedAuthorizedEntries { + registeredEntries.cache.RemoveEntry(expectedAuthorizedId) + } + cacheStats = registeredEntries.cache.Stats() + require.Equal(t, 0, cacheStats.EntriesByEntryID, "clearing all expected registered entries didn't clear cache") + }) + } +} + +type entryScenario struct { + ctx context.Context + log *logrus.Logger + hook *test.Hook + clk *clock.Mock + cache *authorizedentries.Cache + metrics *fakemetrics.FakeMetrics + ds *fakedatastore.DataStore + pageSize int32 +} - registrationEntries.missedEvents[1] = clk.Now() - registrationEntries.replayMissedEvents(ctx) - require.Zero(t, len(hook.Entries)) +type entryScenarioSetup struct { + attestedNodes []*common.AttestedNode + attestedNodeEvents []*datastore.AttestedNodeEvent + registrationEntries []*common.RegistrationEntry + registrationEntryEvents []*datastore.RegistrationEntryEvent + err error + pageSize int32 } -func TestRegistrationEntriesSavesMissedStartupEvents(t *testing.T) { +func NewEntryScenario(t *testing.T, setup *entryScenarioSetup) *entryScenario { + t.Helper() ctx := context.Background() log, hook := test.NewNullLogger() log.SetLevel(logrus.DebugLevel) clk := clock.NewMock(t) - ds := fakedatastore.New(t) cache := authorizedentries.NewCache(clk) metrics := fakemetrics.New() + ds := fakedatastore.New(t) + + if setup == nil { + setup = &entryScenarioSetup{} + } - err := ds.CreateRegistrationEntryEventForTesting(ctx, &datastore.RegistrationEntryEvent{ - EventID: 3, - EntryID: "test", - }) - require.NoError(t, err) - - registrationEntries, err := buildRegistrationEntriesCache(ctx, log, metrics, ds, clk, cache, buildCachePageSize, defaultSQLTransactionTimeout) - require.NoError(t, err) - require.NotNil(t, registrationEntries) - require.Equal(t, uint(3), registrationEntries.firstEventID) - - err = ds.CreateRegistrationEntryEventForTesting(ctx, &datastore.RegistrationEntryEvent{ - EventID: 2, - EntryID: "test", - }) - require.NoError(t, err) - - err = registrationEntries.missedStartupEvents(ctx) - require.NoError(t, err) - - // Make sure no dupliate calls are made - ds.AppendNextError(nil) - ds.AppendNextError(errors.New("Duplicate call")) - err = registrationEntries.missedStartupEvents(ctx) - require.NoError(t, err) - require.Equal(t, 0, len(hook.AllEntries())) + var err error + for _, attestedNode := range setup.attestedNodes { + _, err = ds.CreateAttestedNode(ctx, attestedNode) + require.NoError(t, err, "error while setting up test") + } + // prune autocreated node events, to test the event logic in more scenarios + // than possible with autocreated node events. + err = ds.PruneAttestedNodeEvents(ctx, time.Duration(-5)*time.Hour) + require.NoError(t, err, "error while setting up test") + // and then add back the specified node events + for _, event := range setup.attestedNodeEvents { + err = ds.CreateAttestedNodeEventForTesting(ctx, event) + require.NoError(t, err, "error while setting up test") + } + // initialize the database + for _, registrationEntry := range setup.registrationEntries { + _, err = ds.CreateRegistrationEntry(ctx, registrationEntry) + require.NoError(t, err, "error while setting up test") + } + // prune autocreated entry events, to test the event logic in more + // scenarios than possible with autocreated entry events. + err = ds.PruneRegistrationEntryEvents(ctx, time.Duration(-5)*time.Hour) + require.NoError(t, err, "error while setting up test") + // and then add back the specified node events + for _, event := range setup.registrationEntryEvents { + err = ds.CreateRegistrationEntryEventForTesting(ctx, event) + require.NoError(t, err, "error while setting up test") + } + // inject db error for buildRegistrationEntriesCache call + if setup.err != nil { + ds.AppendNextError(setup.err) + } + + return &entryScenario{ + ctx: ctx, + log: log, + hook: hook, + clk: clk, + cache: cache, + metrics: metrics, + ds: ds, + pageSize: setup.pageSize, + } +} + +func (s *entryScenario) buildRegistrationEntriesCache() (*registrationEntries, error) { + registrationEntries, err := buildRegistrationEntriesCache(s.ctx, s.log, s.metrics, s.ds, s.clk, s.cache, s.pageSize, defaultCacheReloadInterval, defaultSQLTransactionTimeout) + if registrationEntries != nil { + // clear out the fetches + for entry, _ := range registrationEntries.fetchEntries { + delete(registrationEntries.fetchEntries, entry) + } + } + return registrationEntries, err } diff --git a/pkg/server/endpoints/authorized_entryfetcher_test.go b/pkg/server/endpoints/authorized_entryfetcher_test.go index 761ce966ed..e0dac53201 100644 --- a/pkg/server/endpoints/authorized_entryfetcher_test.go +++ b/pkg/server/endpoints/authorized_entryfetcher_test.go @@ -31,6 +31,21 @@ func TestNewAuthorizedEntryFetcherWithEventsBasedCache(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, ef) + buildMetrics := []fakemetrics.MetricItem{ + agentsByIDMetric(0), + agentsByIDExpiresAtMetric(0), + nodeAliasesByEntryIDMetric(0), + nodeAliasesBySelectorMetric(0), + nodeSkippedEventMetric(0), + + entriesByEntryIDMetric(0), + entriesByParentIDMetric(0), + entriesSkippedEventMetric(0), + } + + assert.ElementsMatch(t, buildMetrics, metrics.AllMetrics(), "should emit metrics for node aliases, entries, and agents") + metrics.Reset() + agentID := spiffeid.RequireFromString("spiffe://example.org/myagent") _, err = ds.CreateAttestedNode(ctx, &common.AttestedNode{ @@ -106,9 +121,6 @@ func TestNewAuthorizedEntryFetcherWithEventsBasedCache(t *testing.T) { nodeAliasesBySelectorMetric(1), entriesByEntryIDMetric(2), entriesByParentIDMetric(2), - // Here we have 2 skipped events, one for nodes, one for entries - nodeSkippedEventMetric(0), - entriesSkippedEventMetric(0), } assert.ElementsMatch(t, expectedMetrics, metrics.AllMetrics(), "should emit metrics for node aliases, entries, and agents") @@ -133,7 +145,7 @@ func TestNewAuthorizedEntryFetcherWithEventsBasedCacheErrorBuildingCache(t *test assert.ElementsMatch(t, expectedMetrics, metrics.AllMetrics(), "should emit no metrics") } -func TestBuildCacheSavesMissedEvents(t *testing.T) { +func TestBuildCacheSavesSkippedEvents(t *testing.T) { ctx := context.Background() log, _ := test.NewNullLogger() clk := clock.NewMock(t) @@ -166,20 +178,30 @@ func TestBuildCacheSavesMissedEvents(t *testing.T) { }) require.NoError(t, err) - _, registrationEntries, attestedNodes, err := buildCache(ctx, log, metrics, ds, clk, defaultSQLTransactionTimeout) + _, registrationEntries, attestedNodes, err := buildCache(ctx, log, metrics, ds, clk, defaultCacheReloadInterval, defaultSQLTransactionTimeout) require.NoError(t, err) require.NotNil(t, registrationEntries) require.NotNil(t, attestedNodes) - assert.Contains(t, registrationEntries.missedEvents, uint(2)) - assert.Equal(t, uint(3), registrationEntries.lastEventID) + assert.Contains(t, registrationEntries.eventTracker.events, uint(2)) + assert.Equal(t, uint(3), registrationEntries.lastEvent) - assert.Contains(t, attestedNodes.missedEvents, uint(2)) - assert.Contains(t, attestedNodes.missedEvents, uint(3)) - assert.Equal(t, uint(4), attestedNodes.lastEventID) + assert.Contains(t, attestedNodes.eventTracker.events, uint(2)) + assert.Contains(t, attestedNodes.eventTracker.events, uint(3)) + assert.Equal(t, uint(4), attestedNodes.lastEvent) - // Assert metrics since the updateCache() method doesn't get called right at built time. - expectedMetrics := []fakemetrics.MetricItem{} + // Assert zero metrics since the updateCache() method doesn't get called right at built time. + expectedMetrics := []fakemetrics.MetricItem{ + agentsByIDMetric(0), + agentsByIDExpiresAtMetric(0), + nodeAliasesByEntryIDMetric(0), + nodeAliasesBySelectorMetric(0), + nodeSkippedEventMetric(2), + + entriesByEntryIDMetric(0), + entriesByParentIDMetric(0), + entriesSkippedEventMetric(1), + } assert.ElementsMatch(t, expectedMetrics, metrics.AllMetrics(), "should emit no metrics") } @@ -250,7 +272,7 @@ func TestRunUpdateCacheTaskPrunesExpiredAgents(t *testing.T) { require.ErrorIs(t, err, context.Canceled) } -func TestUpdateRegistrationEntriesCacheMissedEvents(t *testing.T) { +func TestUpdateRegistrationEntriesCacheSkippedEvents(t *testing.T) { ctx := context.Background() log, _ := test.NewNullLogger() clk := clock.NewMock(t) @@ -334,7 +356,7 @@ func TestUpdateRegistrationEntriesCacheMissedEvents(t *testing.T) { require.Equal(t, 1, len(entries)) } -func TestUpdateRegistrationEntriesCacheMissedStartupEvents(t *testing.T) { +func TestUpdateRegistrationEntriesCacheSkippedStartupEvents(t *testing.T) { ctx := context.Background() log, _ := test.NewNullLogger() clk := clock.NewMock(t) @@ -356,12 +378,17 @@ func TestUpdateRegistrationEntriesCacheMissedStartupEvents(t *testing.T) { }) require.NoError(t, err) - // Delete the event and entry for now and then add it back later to simulate out of order events + // Delete the create event for the first entry err = ds.DeleteRegistrationEntryEventForTesting(ctx, 1) require.NoError(t, err) + _, err = ds.DeleteRegistrationEntry(ctx, entry1.EntryId) require.NoError(t, err) + // Delete the delete event for the first entry + err = ds.DeleteRegistrationEntryEventForTesting(ctx, 2) + require.NoError(t, err) + // Create Second entry entry2, err := ds.CreateRegistrationEntry(ctx, &common.RegistrationEntry{ SpiffeId: "spiffe://example.org/workload2", @@ -399,6 +426,7 @@ func TestUpdateRegistrationEntriesCacheMissedStartupEvents(t *testing.T) { }, }) require.NoError(t, err) + err = ds.DeleteRegistrationEntryEventForTesting(ctx, 4) require.NoError(t, err) @@ -406,7 +434,7 @@ func TestUpdateRegistrationEntriesCacheMissedStartupEvents(t *testing.T) { err = ef.updateCache(ctx) require.NoError(t, err) - // Still should be 1 entry + // Still should be 1 entry, no event tells us about spiffe://example.org/workload entries, err = ef.FetchAuthorizedEntries(ctx, agentID) require.NoError(t, err) require.Equal(t, 1, len(entries)) @@ -441,7 +469,7 @@ func TestUpdateRegistrationEntriesCacheMissedStartupEvents(t *testing.T) { require.Contains(t, spiffeIDs, entry2.SpiffeId) } -func TestUpdateAttestedNodesCacheMissedEvents(t *testing.T) { +func TestUpdateAttestedNodesCacheSkippedEvents(t *testing.T) { ctx := context.Background() log, _ := test.NewNullLogger() clk := clock.NewMock(t) @@ -558,7 +586,7 @@ func TestUpdateAttestedNodesCacheMissedEvents(t *testing.T) { require.Equal(t, entry.SpiffeId, idutil.RequireIDProtoString(entries[0].SpiffeId)) } -func TestUpdateAttestedNodesCacheMissedStartupEvents(t *testing.T) { +func TestUpdateAttestedNodesCacheSkippedStartupEvents(t *testing.T) { ctx := context.Background() log, _ := test.NewNullLogger() clk := clock.NewMock(t) diff --git a/pkg/server/endpoints/entryfetcher.go b/pkg/server/endpoints/entryfetcher.go index cfa9c471d5..19e2e0b6b9 100644 --- a/pkg/server/endpoints/entryfetcher.go +++ b/pkg/server/endpoints/entryfetcher.go @@ -96,8 +96,8 @@ func (a *AuthorizedEntryFetcherWithFullCache) PruneEventsTask(ctx context.Contex } func (a *AuthorizedEntryFetcherWithFullCache) pruneEvents(ctx context.Context, olderThan time.Duration) error { - pruneRegistrationEntriesEventsErr := a.ds.PruneRegistrationEntriesEvents(ctx, olderThan) - pruneAttestedNodesEventsErr := a.ds.PruneAttestedNodesEvents(ctx, olderThan) + pruneRegistrationEntryEventsErr := a.ds.PruneRegistrationEntryEvents(ctx, olderThan) + pruneAttestedNodeEventsErr := a.ds.PruneAttestedNodeEvents(ctx, olderThan) - return errors.Join(pruneRegistrationEntriesEventsErr, pruneAttestedNodesEventsErr) + return errors.Join(pruneRegistrationEntryEventsErr, pruneAttestedNodeEventsErr) } diff --git a/pkg/server/endpoints/eventTracker.go b/pkg/server/endpoints/eventTracker.go new file mode 100644 index 0000000000..7be1913bb1 --- /dev/null +++ b/pkg/server/endpoints/eventTracker.go @@ -0,0 +1,79 @@ +package endpoints + +import ( + "sync" + "time" +) + +type eventTracker struct { + pollPeriods uint + + events map[uint]uint + + pool sync.Pool +} + +func PollPeriods(pollTime time.Duration, trackTime time.Duration) uint { + if pollTime < time.Second { + pollTime = time.Second + } + if trackTime < time.Second { + trackTime = time.Second + } + return uint(1 + (trackTime-1)/pollTime) +} + +func NewEventTracker(pollPeriods uint) *eventTracker { + if pollPeriods < 1 { + pollPeriods = 1 + } + + return &eventTracker{ + pollPeriods: pollPeriods, + events: make(map[uint]uint), + pool: sync.Pool{ + New: func() any { + // See https://staticcheck.dev/docs/checks#SA6002. + return new([]uint) + }, + }, + } +} + +func (et *eventTracker) PollPeriods() uint { + return et.pollPeriods +} + +func (et *eventTracker) Polls() uint { + return et.pollPeriods +} + +func (et *eventTracker) StartTracking(event uint) { + et.events[event] = 0 +} + +func (et *eventTracker) StopTracking(event uint) { + delete(et.events, event) +} + +func (et *eventTracker) SelectEvents() []uint { + pollList := *et.pool.Get().(*[]uint) + for event, _ := range et.events { + if et.events[event] >= et.pollPeriods { + et.StopTracking(event) + continue + } + pollList = append(pollList, event) + et.events[event]++ + } + return pollList +} + +func (et *eventTracker) FreeEvents(events []uint) { + events = events[:0] + et.pool.Put(&events) +} + +func (et *eventTracker) EventCount() uint { + return uint(len(et.events)) +} diff --git a/pkg/server/endpoints/eventTracker_test.go b/pkg/server/endpoints/eventTracker_test.go new file mode 100644 index 0000000000..be86bce4b9 --- /dev/null +++ b/pkg/server/endpoints/eventTracker_test.go @@ -0,0 +1,247 @@ +package endpoints_test + +import ( + "testing" + "time" + + "github.com/spiffe/spire/pkg/server/endpoints" + "github.com/stretchr/testify/require" +) + +func TestPollPeriods(t *testing.T) { + for _, tt := range []struct { + name string + pollInterval time.Duration + pollDuration time.Duration + + expectedPollPeriods uint + }{ + { + name: "polling always polls at least once, even for zero duration", + pollInterval: time.Minute, + pollDuration: time.Duration(0) * time.Minute, + + expectedPollPeriods: 1, + }, + { + name: "polling always polls at least once, even for negative durations", + pollInterval: time.Minute, + pollDuration: time.Duration(-10) * time.Minute, + + expectedPollPeriods: 1, + }, + { + name: "minimum poll interval of one second", + pollInterval: time.Duration(0) * time.Second, + pollDuration: time.Duration(10) * time.Second, + + expectedPollPeriods: 10, + }, + { + name: "minimum poll interval of one second, even for negative intervals", + pollInterval: time.Duration(-100) * time.Second, + pollDuration: time.Duration(10) * time.Second, + + expectedPollPeriods: 10, + }, + { + name: "polling every minute in two mintues", + pollInterval: time.Minute, + pollDuration: time.Minute * time.Duration(2), + + expectedPollPeriods: 2, + }, + { + name: "polling every minute of an hours", + pollInterval: time.Minute, + pollDuration: time.Hour, + + expectedPollPeriods: 60, + }, + { + name: "polling rounds up", + pollInterval: time.Minute * time.Duration(3), + pollDuration: time.Minute * time.Duration(10), + + expectedPollPeriods: 4, + }, + } { + t.Run(tt.name, func(t *testing.T) { + pollPeriods := endpoints.PollPeriods(tt.pollInterval, tt.pollDuration) + + require.Equal(t, tt.expectedPollPeriods, pollPeriods, "interval %s, polled over %s yeilds %d poll periods, not %d poll periods", tt.pollInterval.String(), tt.pollDuration.String(), pollPeriods, tt.expectedPollPeriods) + }) + } +} + +func TestNewEventTracker(t *testing.T) { + for _, tt := range []struct { + name string + pollPeriods uint + + expectedPollPeriods uint + expectedPolls uint + }{ + { + name: "polling always polls at least once", + pollPeriods: 0, + + expectedPollPeriods: 1, + expectedPolls: 1, + }, + { + name: "polling once", + pollPeriods: 1, + + expectedPollPeriods: 1, + expectedPolls: 1, + }, + { + name: "polling twice", + pollPeriods: 2, + + expectedPollPeriods: 2, + expectedPolls: 2, + }, + { + name: "polling three times", + pollPeriods: 3, + + expectedPollPeriods: 3, + expectedPolls: 3, + }, + { + name: "polling 120 times", + pollPeriods: 120, + + expectedPollPeriods: 120, + expectedPolls: 120, + }, + { + name: "polling 600 times", + pollPeriods: 600, + + expectedPollPeriods: 600, + expectedPolls: 600, + }, + } { + t.Run(tt.name, func(t *testing.T) { + eventTracker := endpoints.NewEventTracker(tt.pollPeriods) + + require.Equal(t, tt.expectedPollPeriods, eventTracker.PollPeriods(), "expecting %d poll periods; but, %d poll periods reported", eventTracker.PollPeriods(), tt.expectedPollPeriods) + + require.Equal(t, tt.expectedPolls, eventTracker.Polls(), "polling each element %d times, when expecting %d times", tt.expectedPolls, eventTracker.Polls()) + }) + } +} + +func TestEvenTrackerPolling(t *testing.T) { + for _, tt := range []struct { + name string + pollPeriods uint + + trackEvents [][]uint + expectedPolls uint + expectedEvents [][]uint + }{ + { + name: "every event is polled at least once, even when zero polling periods", + pollPeriods: 0, + trackEvents: [][]uint{ + {5, 11, 12, 15}, + {6, 7, 8, 9, 10}, + }, + + expectedPolls: 1, + expectedEvents: [][]uint{ + {5, 11, 12, 15}, + {6, 7, 8, 9, 10}, + {}, + }, + }, + { + name: "polling each event once, initial period", + pollPeriods: 1, + trackEvents: [][]uint{ + {5, 11, 12, 15}, + {6, 7, 8, 9, 10}, + }, + + expectedPolls: 1, + expectedEvents: [][]uint{ + {5, 11, 12, 15}, + {6, 7, 8, 9, 10}, + {}, + }, + }, + { + name: "polling each event twice, initial period", + pollPeriods: 2, + trackEvents: [][]uint{ + {5, 11, 12, 15}, + {6, 7, 8, 9, 10}, + }, + + expectedPolls: 2, + expectedEvents: [][]uint{ + {5, 11, 12, 15}, + {5, 6, 7, 8, 9, 10, 11, 12, 15}, + {6, 7, 8, 9, 10}, + {}, + }, + }, + { + name: "polling each event thrice, initial period", + pollPeriods: 3, + trackEvents: [][]uint{ + {5, 11, 12, 15}, + {6, 7, 8, 9, 10}, + {1, 2, 3, 4, 13}, + }, + + expectedPolls: 3, + expectedEvents: [][]uint{ + {5, 11, 12, 15}, + {5, 6, 7, 8, 9, 10, 11, 12, 15}, + {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15}, + {1, 2, 3, 4, 6, 7, 8, 9, 10, 13}, + {1, 2, 3, 4, 13}, + {}, + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + eventTracker := endpoints.NewEventTracker(tt.pollPeriods) + require.Equal(t, tt.expectedPolls, eventTracker.Polls(), + "expecting %d polls per event, but event tracker reports %d polls per event", + tt.expectedPolls, eventTracker.Polls()) + + pollCount := make(map[uint]uint) + + // run the simulation over what we expect + for index, expectedEvents := range tt.expectedEvents { + // if there are new tracking requests, add them + if index < len(tt.trackEvents) { + for _, event := range tt.trackEvents[index] { + eventTracker.StartTracking(event) + } + } + // get the events we should poll + events := eventTracker.SelectEvents() + // update count for each event + for _, event := range events { + pollCount[event]++ + } + // see if the results match the expecations + require.ElementsMatch(t, expectedEvents, events, + "At time step %d, expected set of Events %v, received %v", + index, expectedEvents, events) + } + for event, polls := range pollCount { + require.Equal(t, tt.expectedPolls, polls, + "expecting %d polls for event %d, but received %d polls", + tt.expectedPolls, polls, event) + } + }) + } +} diff --git a/pkg/server/endpoints/middleware_test.go b/pkg/server/endpoints/middleware_test.go index 84d3862fc9..d3b668e2a1 100644 --- a/pkg/server/endpoints/middleware_test.go +++ b/pkg/server/endpoints/middleware_test.go @@ -227,7 +227,6 @@ func TestAgentAuthorizer(t *testing.T) { }, }, } { - tt := tt t.Run(tt.name, func(t *testing.T) { log, hook := test.NewNullLogger() ds := fakedatastore.New(t) diff --git a/test/fakes/fakedatastore/fakedatastore.go b/test/fakes/fakedatastore/fakedatastore.go index 283a2b61ee..13728a13ea 100644 --- a/test/fakes/fakedatastore/fakedatastore.go +++ b/test/fakes/fakedatastore/fakedatastore.go @@ -162,18 +162,18 @@ func (s *DataStore) DeleteAttestedNode(ctx context.Context, spiffeID string) (*c return s.ds.DeleteAttestedNode(ctx, spiffeID) } -func (s *DataStore) ListAttestedNodesEvents(ctx context.Context, req *datastore.ListAttestedNodesEventsRequest) (*datastore.ListAttestedNodesEventsResponse, error) { +func (s *DataStore) ListAttestedNodeEvents(ctx context.Context, req *datastore.ListAttestedNodeEventsRequest) (*datastore.ListAttestedNodeEventsResponse, error) { if err := s.getNextError(); err != nil { return nil, err } - return s.ds.ListAttestedNodesEvents(ctx, req) + return s.ds.ListAttestedNodeEvents(ctx, req) } -func (s *DataStore) PruneAttestedNodesEvents(ctx context.Context, olderThan time.Duration) error { +func (s *DataStore) PruneAttestedNodeEvents(ctx context.Context, olderThan time.Duration) error { if err := s.getNextError(); err != nil { return err } - return s.ds.PruneAttestedNodesEvents(ctx, olderThan) + return s.ds.PruneAttestedNodeEvents(ctx, olderThan) } func (s *DataStore) CreateAttestedNodeEventForTesting(ctx context.Context, event *datastore.AttestedNodeEvent) error { @@ -312,18 +312,18 @@ func (s *DataStore) PruneRegistrationEntries(ctx context.Context, expiresBefore return s.ds.PruneRegistrationEntries(ctx, expiresBefore) } -func (s *DataStore) ListRegistrationEntriesEvents(ctx context.Context, req *datastore.ListRegistrationEntriesEventsRequest) (*datastore.ListRegistrationEntriesEventsResponse, error) { +func (s *DataStore) ListRegistrationEntryEvents(ctx context.Context, req *datastore.ListRegistrationEntryEventsRequest) (*datastore.ListRegistrationEntryEventsResponse, error) { if err := s.getNextError(); err != nil { return nil, err } - return s.ds.ListRegistrationEntriesEvents(ctx, req) + return s.ds.ListRegistrationEntryEvents(ctx, req) } -func (s *DataStore) PruneRegistrationEntriesEvents(ctx context.Context, olderThan time.Duration) error { +func (s *DataStore) PruneRegistrationEntryEvents(ctx context.Context, olderThan time.Duration) error { if err := s.getNextError(); err != nil { return err } - return s.ds.PruneRegistrationEntriesEvents(ctx, olderThan) + return s.ds.PruneRegistrationEntryEvents(ctx, olderThan) } func (s *DataStore) CreateRegistrationEntryEventForTesting(ctx context.Context, event *datastore.RegistrationEntryEvent) error { From 7abee0acc3ff9aea658d80a8f1a60cf08821874d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Agust=C3=ADn=20Mart=C3=ADnez=20Fay=C3=B3?= Date: Thu, 17 Oct 2024 15:34:51 -0300 Subject: [PATCH 2/2] Remove `forced_rotation` feature flag (#5586) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Agustín Martínez Fayó --- cmd/spire-server/cli/cli.go | 91 ++++++------------- pkg/common/fflag/fflag.go | 8 +- pkg/server/endpoints/endpoints.go | 8 +- pkg/server/endpoints/endpoints_test.go | 3 - .../05-prepare-jwt-authority | 2 +- .../06-fetch-jwt-svid | 2 +- .../07-activate-jwt-authority | 4 +- .../08-taint-jwt-authority | 4 +- .../09-verify-svid-rotation | 2 +- .../10-revoke-jwt-authority | 4 +- .../11-verify-revoked-jwt-authority | 2 +- .../conf/server/server.conf | 3 - .../10-prepare-authority | 2 +- .../11-activate-x509authority | 4 +- .../12-taint-x509authority | 4 +- .../14-revoke-x509authority | 4 +- .../15-verify-revoked-x509authority | 2 +- .../root/server/server.conf | 3 - .../06-prepare-x509-authority | 4 +- .../07-activate-x509-authority | 6 +- .../08-taint-upstream-authority | 4 +- .../10-revoke-upstream-authority | 4 +- .../11-verify-revoked-upstream-authority | 2 +- .../conf/server/server.conf | 3 - 24 files changed, 61 insertions(+), 114 deletions(-) diff --git a/cmd/spire-server/cli/cli.go b/cmd/spire-server/cli/cli.go index bccb08ca67..e86c939305 100644 --- a/cmd/spire-server/cli/cli.go +++ b/cmd/spire-server/cli/cli.go @@ -3,8 +3,6 @@ package cli import ( "context" stdlog "log" - "os" - "strings" "github.com/mitchellh/cli" "github.com/spiffe/spire/cmd/spire-server/cli/agent" @@ -21,7 +19,6 @@ import ( "github.com/spiffe/spire/cmd/spire-server/cli/upstreamauthority" "github.com/spiffe/spire/cmd/spire-server/cli/validate" "github.com/spiffe/spire/cmd/spire-server/cli/x509" - "github.com/spiffe/spire/pkg/common/fflag" "github.com/spiffe/spire/pkg/common/log" "github.com/spiffe/spire/pkg/common/version" ) @@ -130,75 +127,47 @@ func (cc *CLI) Run(ctx context.Context, args []string) int { "validate": func() (cli.Command, error) { return validate.NewValidateCommand(), nil }, - } - - // TODO: Remove this when the forced_rotation feature flag is no longer - // needed. Refer to https://github.com/spiffe/spire/issues/5398. - addCommandsEnabledByFFlags(c.Commands) - - exitStatus, err := c.Run() - if err != nil { - stdlog.Println(err) - } - return exitStatus -} - -// addCommandsEnabledByFFlags adds commands that are currently available only -// through a feature flag. -// Feature flags support through the fflag package in SPIRE Server is -// designed to work only with the run command and the config file. -// Since feature flags are intended to be used by developers of a specific -// feature only, exposing them through command line arguments is not -// convenient. Instead, we use the SPIRE_SERVER_FFLAGS environment variable -// to read the configured SPIRE Server feature flags from the environment -// when other commands may be enabled through feature flags. -func addCommandsEnabledByFFlags(commands map[string]cli.CommandFactory) { - fflagsEnv := os.Getenv("SPIRE_SERVER_FFLAGS") - fflags := strings.Split(fflagsEnv, " ") - flagForcedRotationFound := false - for _, ff := range fflags { - if ff == string(fflag.FlagForcedRotation) { - flagForcedRotationFound = true - break - } - } - - if flagForcedRotationFound { - commands["localauthority x509 show"] = func() (cli.Command, error) { + "localauthority x509 show": func() (cli.Command, error) { return localauthority_x509.NewX509ShowCommand(), nil - } - commands["localauthority x509 prepare"] = func() (cli.Command, error) { + }, + "localauthority x509 prepare": func() (cli.Command, error) { return localauthority_x509.NewX509PrepareCommand(), nil - } - commands["localauthority x509 activate"] = func() (cli.Command, error) { + }, + "localauthority x509 activate": func() (cli.Command, error) { return localauthority_x509.NewX509ActivateCommand(), nil - } - commands["localauthority x509 taint"] = func() (cli.Command, error) { + }, + "localauthority x509 taint": func() (cli.Command, error) { return localauthority_x509.NewX509TaintCommand(), nil - } - commands["localauthority x509 revoke"] = func() (cli.Command, error) { + }, + "localauthority x509 revoke": func() (cli.Command, error) { return localauthority_x509.NewX509RevokeCommand(), nil - } - commands["localauthority jwt show"] = func() (cli.Command, error) { + }, + "localauthority jwt show": func() (cli.Command, error) { return localauthority_jwt.NewJWTShowCommand(), nil - } - commands["localauthority jwt prepare"] = func() (cli.Command, error) { + }, + "localauthority jwt prepare": func() (cli.Command, error) { return localauthority_jwt.NewJWTPrepareCommand(), nil - } - commands["localauthority jwt activate"] = func() (cli.Command, error) { + }, + "localauthority jwt activate": func() (cli.Command, error) { return localauthority_jwt.NewJWTActivateCommand(), nil - } - commands["localauthority jwt taint"] = func() (cli.Command, error) { + }, + "localauthority jwt taint": func() (cli.Command, error) { return localauthority_jwt.NewJWTTaintCommand(), nil - } - commands["localauthority jwt revoke"] = func() (cli.Command, error) { + }, + "localauthority jwt revoke": func() (cli.Command, error) { return localauthority_jwt.NewJWTRevokeCommand(), nil - } - commands["upstreamauthority taint"] = func() (cli.Command, error) { + }, + "upstreamauthority taint": func() (cli.Command, error) { return upstreamauthority.NewTaintCommand(), nil - } - commands["upstreamauthority revoke"] = func() (cli.Command, error) { + }, + "upstreamauthority revoke": func() (cli.Command, error) { return upstreamauthority.NewRevokeCommand(), nil - } + }, } + + exitStatus, err := c.Run() + if err != nil { + stdlog.Println(err) + } + return exitStatus } diff --git a/pkg/common/fflag/fflag.go b/pkg/common/fflag/fflag.go index caad3283f7..870b47e330 100644 --- a/pkg/common/fflag/fflag.go +++ b/pkg/common/fflag/fflag.go @@ -24,11 +24,6 @@ type RawConfig []string // false, with the only exception being flags that are in the process of being // deprecated. const ( - // FlagForcedRotation controls whether or not the new APIs and - // extensions related to forced rotation and revocation are - // enabled or not. See #1934 for more information. - FlagForcedRotation Flag = "forced_rotation" - // FlagTestFlag is defined purely for testing purposes. FlagTestFlag Flag = "i_am_a_test_flag" ) @@ -40,8 +35,7 @@ var ( mtx *sync.RWMutex }{ flags: map[Flag]bool{ - FlagForcedRotation: false, - FlagTestFlag: false, + FlagTestFlag: false, }, loaded: false, mtx: new(sync.RWMutex), diff --git a/pkg/server/endpoints/endpoints.go b/pkg/server/endpoints/endpoints.go index af2db4f908..5ffee7217c 100644 --- a/pkg/server/endpoints/endpoints.go +++ b/pkg/server/endpoints/endpoints.go @@ -30,7 +30,6 @@ import ( svidv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/svid/v1" trustdomainv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/trustdomain/v1" "github.com/spiffe/spire/pkg/common/auth" - "github.com/spiffe/spire/pkg/common/fflag" "github.com/spiffe/spire/pkg/common/peertracker" "github.com/spiffe/spire/pkg/common/telemetry" "github.com/spiffe/spire/pkg/common/util" @@ -202,11 +201,8 @@ func (e *Endpoints) ListenAndServe(ctx context.Context) error { svidv1.RegisterSVIDServer(udsServer, e.APIServers.SVIDServer) trustdomainv1.RegisterTrustDomainServer(tcpServer, e.APIServers.TrustDomainServer) trustdomainv1.RegisterTrustDomainServer(udsServer, e.APIServers.TrustDomainServer) - - if fflag.IsSet(fflag.FlagForcedRotation) { - localauthorityv1.RegisterLocalAuthorityServer(tcpServer, e.APIServers.LocalAUthorityServer) - localauthorityv1.RegisterLocalAuthorityServer(udsServer, e.APIServers.LocalAUthorityServer) - } + localauthorityv1.RegisterLocalAuthorityServer(tcpServer, e.APIServers.LocalAUthorityServer) + localauthorityv1.RegisterLocalAuthorityServer(udsServer, e.APIServers.LocalAUthorityServer) // UDS only loggerv1.RegisterLoggerServer(udsServer, e.APIServers.LoggerServer) diff --git a/pkg/server/endpoints/endpoints_test.go b/pkg/server/endpoints/endpoints_test.go index a6b6823fb1..18b220c2b4 100644 --- a/pkg/server/endpoints/endpoints_test.go +++ b/pkg/server/endpoints/endpoints_test.go @@ -24,7 +24,6 @@ import ( svidv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/svid/v1" trustdomainv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/trustdomain/v1" "github.com/spiffe/spire-api-sdk/proto/spire/api/types" - "github.com/spiffe/spire/pkg/common/fflag" "github.com/spiffe/spire/pkg/common/util" "github.com/spiffe/spire/pkg/server/authpolicy" "github.com/spiffe/spire/pkg/server/ca/manager" @@ -166,8 +165,6 @@ func TestNewErrorCreatingAuthorizedEntryFetcher(t *testing.T) { } func TestListenAndServe(t *testing.T) { - require.NoError(t, fflag.Load(fflag.RawConfig{"forced_rotation"})) - ctx := context.Background() ca := testca.New(t, testTD) federatedCA := testca.New(t, foreignFederatedTD) diff --git a/test/integration/suites/force-rotation-jwt-authority/05-prepare-jwt-authority b/test/integration/suites/force-rotation-jwt-authority/05-prepare-jwt-authority index 8bcc43958f..0cbab9dc7d 100755 --- a/test/integration/suites/force-rotation-jwt-authority/05-prepare-jwt-authority +++ b/test/integration/suites/force-rotation-jwt-authority/05-prepare-jwt-authority @@ -12,7 +12,7 @@ if [[ $amount_authorities -ne 1 ]]; then fi # Prepare authority -prepared_authority_id=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +prepared_authority_id=$(docker compose exec -T spire-server \ /opt/spire/bin/spire-server localauthority jwt prepare -output json | jq -r .prepared_authority.authority_id) # Verify that the prepared authority is logged diff --git a/test/integration/suites/force-rotation-jwt-authority/06-fetch-jwt-svid b/test/integration/suites/force-rotation-jwt-authority/06-fetch-jwt-svid index 42c3a82b13..45424203dc 100755 --- a/test/integration/suites/force-rotation-jwt-authority/06-fetch-jwt-svid +++ b/test/integration/suites/force-rotation-jwt-authority/06-fetch-jwt-svid @@ -1,6 +1,6 @@ #!/bin/bash -prepared_authority=$(docker compose exec -t -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +prepared_authority=$(docker compose exec -t spire-server \ /opt/spire/bin/spire-server \ localauthority jwt show -output json | jq -r .active.authority_id) || fail-now "Failed to fetch prepared JWT authority ID" diff --git a/test/integration/suites/force-rotation-jwt-authority/07-activate-jwt-authority b/test/integration/suites/force-rotation-jwt-authority/07-activate-jwt-authority index 45683d098d..2a546fe94f 100755 --- a/test/integration/suites/force-rotation-jwt-authority/07-activate-jwt-authority +++ b/test/integration/suites/force-rotation-jwt-authority/07-activate-jwt-authority @@ -1,12 +1,12 @@ #!/bin/bash # Fetch the prepared authority ID -prepared_authority=$(docker compose exec -t -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +prepared_authority=$(docker compose exec -t spire-server \ /opt/spire/bin/spire-server \ localauthority jwt show -output json | jq -r .prepared.authority_id) || fail-now "Failed to fetch prepared JWT authority ID" # Activate the authority -activated_authority=$(docker compose exec -t -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +activated_authority=$(docker compose exec -t spire-server \ /opt/spire/bin/spire-server \ localauthority jwt activate -authorityID "${prepared_authority}" \ -output json | jq -r .activated_authority.authority_id) || fail-now "Failed to activate JWT authority" diff --git a/test/integration/suites/force-rotation-jwt-authority/08-taint-jwt-authority b/test/integration/suites/force-rotation-jwt-authority/08-taint-jwt-authority index 24ad360714..9ce538b113 100755 --- a/test/integration/suites/force-rotation-jwt-authority/08-taint-jwt-authority +++ b/test/integration/suites/force-rotation-jwt-authority/08-taint-jwt-authority @@ -9,14 +9,14 @@ check-logs() { } # Fetch old authority ID -old_jwt_authority=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +old_jwt_authority=$(docker compose exec -T spire-server \ /opt/spire/bin/spire-server \ localauthority jwt show -output json | jq -r .old.authority_id) || fail-now "Failed to fetch old authority ID" log-debug "Old authority: $old_jwt_authority" # Taint the old authority -docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +docker compose exec -T spire-server \ /opt/spire/bin/spire-server \ localauthority jwt taint -authorityID "${old_jwt_authority}" || fail-now "Failed to taint old authority" diff --git a/test/integration/suites/force-rotation-jwt-authority/09-verify-svid-rotation b/test/integration/suites/force-rotation-jwt-authority/09-verify-svid-rotation index 52d895d14e..182972b4b4 100755 --- a/test/integration/suites/force-rotation-jwt-authority/09-verify-svid-rotation +++ b/test/integration/suites/force-rotation-jwt-authority/09-verify-svid-rotation @@ -1,6 +1,6 @@ #!/bin/bash -active_authority=$(docker compose exec -t -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +active_authority=$(docker compose exec -t spire-server \ /opt/spire/bin/spire-server \ localauthority jwt show -output json | jq -r .active.authority_id) || fail-now "Failed to fetch active JWT authority ID" diff --git a/test/integration/suites/force-rotation-jwt-authority/10-revoke-jwt-authority b/test/integration/suites/force-rotation-jwt-authority/10-revoke-jwt-authority index 747989a41b..bfbda00568 100755 --- a/test/integration/suites/force-rotation-jwt-authority/10-revoke-jwt-authority +++ b/test/integration/suites/force-rotation-jwt-authority/10-revoke-jwt-authority @@ -1,6 +1,6 @@ #!/bin/bash -old_jwt_authority=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +old_jwt_authority=$(docker compose exec -T spire-server \ /opt/spire/bin/spire-server \ localauthority jwt show -output json | jq -r .old.authority_id) || fail-now "Failed to fetch old authority ID" @@ -22,7 +22,7 @@ if [[ -z "$tainted_found" ]]; then fail-now "Tainted JWT authority expected" fi -docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +docker compose exec -T spire-server \ /opt/spire/bin/spire-server localauthority jwt \ revoke -authorityID $old_jwt_authority -output json || fail-now "Failed to revoke JWT authority" diff --git a/test/integration/suites/force-rotation-jwt-authority/11-verify-revoked-jwt-authority b/test/integration/suites/force-rotation-jwt-authority/11-verify-revoked-jwt-authority index 2d7ef4ca35..e9c0e5a0e8 100755 --- a/test/integration/suites/force-rotation-jwt-authority/11-verify-revoked-jwt-authority +++ b/test/integration/suites/force-rotation-jwt-authority/11-verify-revoked-jwt-authority @@ -1,7 +1,7 @@ #!/bin/bash for i in {1..20}; do - active_jwt_authority=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ + active_jwt_authority=$(docker compose exec -T spire-server \ /opt/spire/bin/spire-server \ localauthority jwt show -output json | jq -r .active.authority_id) || fail-now "Failed to fetch old jwt authority ID" diff --git a/test/integration/suites/force-rotation-jwt-authority/conf/server/server.conf b/test/integration/suites/force-rotation-jwt-authority/conf/server/server.conf index 7793db57c1..1749d743bf 100644 --- a/test/integration/suites/force-rotation-jwt-authority/conf/server/server.conf +++ b/test/integration/suites/force-rotation-jwt-authority/conf/server/server.conf @@ -6,9 +6,6 @@ server { log_level = "DEBUG" ca_ttl = "24h" default_jwt_svid_ttl = "8h" - experimental { - feature_flags = ["forced_rotation"] - } } plugins { diff --git a/test/integration/suites/force-rotation-self-signed/10-prepare-authority b/test/integration/suites/force-rotation-self-signed/10-prepare-authority index 9fdc5897b7..20f49b034a 100755 --- a/test/integration/suites/force-rotation-self-signed/10-prepare-authority +++ b/test/integration/suites/force-rotation-self-signed/10-prepare-authority @@ -48,7 +48,7 @@ for server in intermediateA-server intermediateB-server leafA-server leafB-serve done # Prepare authority -prepared_authority_id=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation root-server \ +prepared_authority_id=$(docker compose exec -T root-server \ /opt/spire/bin/spire-server localauthority x509 prepare -output json | jq -r .prepared_authority.authority_id) # Verify that the prepared authority is logged diff --git a/test/integration/suites/force-rotation-self-signed/11-activate-x509authority b/test/integration/suites/force-rotation-self-signed/11-activate-x509authority index 7659b3ea38..7c9b086144 100755 --- a/test/integration/suites/force-rotation-self-signed/11-activate-x509authority +++ b/test/integration/suites/force-rotation-self-signed/11-activate-x509authority @@ -1,12 +1,12 @@ #!/bin/bash # Fetch the prepared authority ID -prepared_authority=$(docker compose exec -t -e SPIRE_SERVER_FFLAGS=forced_rotation root-server \ +prepared_authority=$(docker compose exec -t root-server \ /opt/spire/bin/spire-server \ localauthority x509 show -output json | jq -r .prepared.authority_id) || fail-now "Failed to fetch prepared authority ID" # Activate the authority -activated_authority=$(docker compose exec -t -e SPIRE_SERVER_FFLAGS=forced_rotation root-server \ +activated_authority=$(docker compose exec -t root-server \ /opt/spire/bin/spire-server \ localauthority x509 activate -authorityID "${prepared_authority}" \ -output json | jq -r .activated_authority.authority_id) || fail-now "Failed to activate authority" diff --git a/test/integration/suites/force-rotation-self-signed/12-taint-x509authority b/test/integration/suites/force-rotation-self-signed/12-taint-x509authority index fca9c966c1..639b15f76b 100755 --- a/test/integration/suites/force-rotation-self-signed/12-taint-x509authority +++ b/test/integration/suites/force-rotation-self-signed/12-taint-x509authority @@ -9,12 +9,12 @@ check-logs() { } # Fetch old authority ID -old_authority=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation root-server \ +old_authority=$(docker compose exec -T root-server \ /opt/spire/bin/spire-server \ localauthority x509 show -output json | jq .old.authority_id -r) || fail-now "Failed to fetch old authority ID" # Taint the old authority -docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation root-server \ +docker compose exec -T root-server \ /opt/spire/bin/spire-server \ localauthority x509 taint -authorityID "${old_authority}" || fail-now "Failed to taint old authority" diff --git a/test/integration/suites/force-rotation-self-signed/14-revoke-x509authority b/test/integration/suites/force-rotation-self-signed/14-revoke-x509authority index 7d8aeb8d01..2c689dcb87 100755 --- a/test/integration/suites/force-rotation-self-signed/14-revoke-x509authority +++ b/test/integration/suites/force-rotation-self-signed/14-revoke-x509authority @@ -8,7 +8,7 @@ get-x509-authorities-count() { docker compose exec -T $server /opt/spire/bin/spire-server bundle show -output json | jq '.x509_authorities | length' } -old_authority=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation root-server \ +old_authority=$(docker compose exec -T root-server \ /opt/spire/bin/spire-server localauthority x509 show -output json | jq .old.authority_id -r) || fail-now "Failed to get old authority" log-debug "Old authority: $old_authority" @@ -27,7 +27,7 @@ if [[ -z "$tainted_found" ]]; then fail-now "Tainted authority expected" fi -docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation root-server \ +docker compose exec -T root-server \ /opt/spire/bin/spire-server localauthority x509 revoke -authorityID $old_authority -output json || fail-now "Failed to revoke authority" check-log-line root-server "X\.509 authority revoked successfully|local_authority_id=$old_authority" diff --git a/test/integration/suites/force-rotation-self-signed/15-verify-revoked-x509authority b/test/integration/suites/force-rotation-self-signed/15-verify-revoked-x509authority index 58aee486c6..85405089ab 100755 --- a/test/integration/suites/force-rotation-self-signed/15-verify-revoked-x509authority +++ b/test/integration/suites/force-rotation-self-signed/15-verify-revoked-x509authority @@ -4,7 +4,7 @@ MAX_RETRIES=10 RETRY_DELAY=2 # seconds between retries fetch-active-authority() { - docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation root-server \ + docker compose exec -T root-server \ /opt/spire/bin/spire-server localauthority x509 show -output json | jq -r .active.authority_id } diff --git a/test/integration/suites/force-rotation-self-signed/root/server/server.conf b/test/integration/suites/force-rotation-self-signed/root/server/server.conf index 4a59b623be..af9998d306 100644 --- a/test/integration/suites/force-rotation-self-signed/root/server/server.conf +++ b/test/integration/suites/force-rotation-self-signed/root/server/server.conf @@ -7,9 +7,6 @@ server { # Set big numbers, to never go into regular rotations ca_ttl = "216h" default_x509_svid_ttl = "36h" - experimental { - feature_flags = ["forced_rotation"] - } } plugins { diff --git a/test/integration/suites/force-rotation-upstream-authority/06-prepare-x509-authority b/test/integration/suites/force-rotation-upstream-authority/06-prepare-x509-authority index ca93138840..caefaf37f0 100755 --- a/test/integration/suites/force-rotation-upstream-authority/06-prepare-x509-authority +++ b/test/integration/suites/force-rotation-upstream-authority/06-prepare-x509-authority @@ -12,7 +12,7 @@ if [[ $amount_bundles -ne 1 ]]; then fi # Prepare authority -prepared_authority_id=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +prepared_authority_id=$(docker compose exec -T spire-server \ /opt/spire/bin/spire-server localauthority x509 prepare -output json | jq -r .prepared_authority.authority_id) # Verify that the prepared authority is logged @@ -32,7 +32,7 @@ fi new_dummy_ca_skid=$(openssl x509 -in conf/server/new_upstream_ca.crt -text | grep \ -A 1 'Subject Key Identifier' | tail -n 1 | tr -d ' ' | tr -d ':' | tr '[:upper:]' '[:lower:]') -upstream_authority_id=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +upstream_authority_id=$(docker compose exec -T spire-server \ /opt/spire/bin/spire-server \ localauthority x509 show -output json | jq .prepared.upstream_authority_subject_key_id -r) diff --git a/test/integration/suites/force-rotation-upstream-authority/07-activate-x509-authority b/test/integration/suites/force-rotation-upstream-authority/07-activate-x509-authority index 1b103c2cfa..6a28a4fd80 100755 --- a/test/integration/suites/force-rotation-upstream-authority/07-activate-x509-authority +++ b/test/integration/suites/force-rotation-upstream-authority/07-activate-x509-authority @@ -1,15 +1,15 @@ #!/bin/bash # Fetch the prepared authority ID -prepared_authority=$(docker compose exec -t -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +prepared_authority=$(docker compose exec -t spire-server \ /opt/spire/bin/spire-server \ localauthority x509 show -output json | jq -r .prepared.authority_id) || fail-now "Failed to fetch prepared authority ID" -upstream_authority=$(docker compose exec -t -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +upstream_authority=$(docker compose exec -t spire-server \ /opt/spire/bin/spire-server \ localauthority x509 show -output json | jq -r .prepared.upstream_authority_subject_key_id) || fail-now "Failed to fetch prepared authority ID" # Activate the authority -activated_authority=$(docker compose exec -t -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +activated_authority=$(docker compose exec -t spire-server \ /opt/spire/bin/spire-server \ localauthority x509 activate -authorityID "${prepared_authority}" \ -output json | jq -r .activated_authority.authority_id) || fail-now "Failed to activate authority" diff --git a/test/integration/suites/force-rotation-upstream-authority/08-taint-upstream-authority b/test/integration/suites/force-rotation-upstream-authority/08-taint-upstream-authority index d365018595..6508f5a308 100755 --- a/test/integration/suites/force-rotation-upstream-authority/08-taint-upstream-authority +++ b/test/integration/suites/force-rotation-upstream-authority/08-taint-upstream-authority @@ -9,14 +9,14 @@ check-logs() { } # Fetch old authority ID -old_upstream_authority=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +old_upstream_authority=$(docker compose exec -T spire-server \ /opt/spire/bin/spire-server \ localauthority x509 show -output json | jq -r .old.upstream_authority_subject_key_id) || fail-now "Failed to fetch old upstrem authority ID" log-debug "Old upstream authority: $old_upstream_authority" # Taint the old authority -docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +docker compose exec -T spire-server \ /opt/spire/bin/spire-server \ upstreamauthority taint -subjectKeyID "${old_upstream_authority}" || fail-now "Failed to taint old authority" diff --git a/test/integration/suites/force-rotation-upstream-authority/10-revoke-upstream-authority b/test/integration/suites/force-rotation-upstream-authority/10-revoke-upstream-authority index 4262106736..39255cb4c3 100755 --- a/test/integration/suites/force-rotation-upstream-authority/10-revoke-upstream-authority +++ b/test/integration/suites/force-rotation-upstream-authority/10-revoke-upstream-authority @@ -4,7 +4,7 @@ get-x509-authorities-count() { local server=$1 } -old_upstream_authority=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +old_upstream_authority=$(docker compose exec -T spire-server \ /opt/spire/bin/spire-server \ localauthority x509 show -output json | jq -r .old.upstream_authority_subject_key_id) || fail-now "Failed to fetch old upstrem authority ID" @@ -27,7 +27,7 @@ if [[ -z "$tainted_found" ]]; then fail-now "Tainted authority expected" fi -docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +docker compose exec -T spire-server \ /opt/spire/bin/spire-server upstreamauthority \ revoke -subjectKeyID $old_upstream_authority -output json || fail-now "Failed to revoke upstream authority" diff --git a/test/integration/suites/force-rotation-upstream-authority/11-verify-revoked-upstream-authority b/test/integration/suites/force-rotation-upstream-authority/11-verify-revoked-upstream-authority index 3419ad430f..a31342631e 100755 --- a/test/integration/suites/force-rotation-upstream-authority/11-verify-revoked-upstream-authority +++ b/test/integration/suites/force-rotation-upstream-authority/11-verify-revoked-upstream-authority @@ -43,7 +43,7 @@ check_ski() { fi } -active_upstream_authority=$(docker compose exec -T -e SPIRE_SERVER_FFLAGS=forced_rotation spire-server \ +active_upstream_authority=$(docker compose exec -T spire-server \ /opt/spire/bin/spire-server \ localauthority x509 show -output json | jq -r .active.upstream_authority_subject_key_id) || fail-now "Failed to fetch old upstrem authority ID" diff --git a/test/integration/suites/force-rotation-upstream-authority/conf/server/server.conf b/test/integration/suites/force-rotation-upstream-authority/conf/server/server.conf index 74b4f1cc38..3eab850acf 100644 --- a/test/integration/suites/force-rotation-upstream-authority/conf/server/server.conf +++ b/test/integration/suites/force-rotation-upstream-authority/conf/server/server.conf @@ -6,9 +6,6 @@ server { log_level = "DEBUG" ca_ttl = "24h" default_x509_svid_ttl = "8h" - experimental { - feature_flags = ["forced_rotation"] - } } plugins {