Skip to content

Commit

Permalink
Allow running tests in a docker-compose env & fix failing tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Max Kuznetsov committed Mar 21, 2024
1 parent 35735d5 commit 1771a5c
Show file tree
Hide file tree
Showing 10 changed files with 180 additions and 90 deletions.
15 changes: 15 additions & 0 deletions .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# every push to a branch: run tests
name: Run tests
on:
pull_request:
types: [opened, synchronize]
jobs:
run_tests:
name: Run Nakama tests
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3

- name: Run tests
run: docker-compose -f ./docker-compose-tests.yml up --build --abort-on-container-exit; docker-compose -f ./docker-compose-tests.yml down -v
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@ The format is based on [keep a changelog](http://keepachangelog.com) and this pr
- Tracker interface cleanup.
- Converted StatusRegistry to an interface.

### Added
- Add an ability to easily run unit and integration tests in an isolated docker-compose environment.

## [3.21.0] - 2024-03-17
### Added
- Add Fleet Manager API to power session-based multiplayer integrations. See [the documentation](https://heroiclabs.com/docs/nakama/concepts/multiplayer/session-based/) for more details.
Expand Down
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,17 @@ To build the codebase and generate all sources follow these steps.
go build -trimpath -mod=vendor
```

### Testing

In order to run all the unit and integration tests run:

```shell
docker-compose -f ./docker-compose-tests.yml up --build --abort-on-container-exit; docker-compose -f ./docker-compose-tests.yml down -v
```

This will create an isolated environment with Nakama and database instances, run
all the tests, and drop the environment afterwards.

### License

This project is licensed under the [Apache-2 License](https://github.com/heroiclabs/nakama/blob/master/LICENSE).
62 changes: 62 additions & 0 deletions docker-compose-tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
version: "3.9"
services:
db:
container_name: db
image: postgres:15
command: -c 'max_connections=1000'
environment:
- POSTGRES_DB=nakama
- POSTGRES_PASSWORD=localdb
expose:
- "5432"
healthcheck:
test: ["CMD", "pg_isready", "-U", "postgres", "-d", "nakama"]
start_period: 10s
interval: 10s
timeout: 10s
retries: 10

nakama:
build:
context: .
dockerfile: ./build/Dockerfile.local
image: nakama-intergration-tests
entrypoint:
- "/bin/sh"
- "-ecx"
- >
/nakama/nakama migrate up --database.address postgres:localdb@db:5432/nakama &&
exec /nakama/nakama --logger.level DEBUG --name nakama --database.address postgres:localdb@db:5432/nakama --session.token_expiry_sec 7200 --socket.port 7350 --console.port 7351
restart: always
links:
- "db:db"
depends_on:
db:
condition: service_healthy
healthcheck:
test: ["CMD", "/nakama/nakama", "healthcheck"]
timeout: 10s
retries: 10
start_period: 5s
interval: 5s

test:
image: "golang:1.21"
command: ["go", "test", "-v", "-race", "./..."]
working_dir: "/nakama"
environment:
- "GODEBUG=netdns=cgo"
- TEST_DB_URL=postgresql://postgres:localdb@db:5432/nakama?sslmode=disable
volumes:
- "./:/nakama"
links:
- "db:db"
depends_on:
db:
condition: service_healthy
nakama:
condition: service_healthy

networks:
default:
driver: bridge
2 changes: 1 addition & 1 deletion internal/gopher-lua/script_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ func sleep(L *LState) int {
}

func countFinalizers(L *LState) int {
L.Push(LNumber(numActiveUserDatas))
L.Push(LNumber(atomic.LoadInt32(&numActiveUserDatas)))
return 1
}

Expand Down
2 changes: 1 addition & 1 deletion main.go
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ func newOrLoadCookie(config server.Config) string {
cookie := uuid.FromBytesOrNil(b)
if err != nil || cookie == uuid.Nil {
cookie = uuid.Must(uuid.NewV4())
_ = os.WriteFile(filePath, cookie.Bytes(), 0644)
_ = os.WriteFile(filePath, cookie.Bytes(), 0o644)
}
return cookie.String()
}
16 changes: 11 additions & 5 deletions server/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,8 +171,13 @@ func NewConsoleLogger(output *os.File, verbose bool) *zap.Logger {
}

func NewDB(t *testing.T) *sql.DB {
db, err := sql.Open("pgx", "postgresql://[email protected]:26257/nakama?sslmode=disable")
// db, err := sql.Open("pgx", "postgresql://[email protected]:5432/nakama?sslmode=disable")
// dbUrl := "postgresql://[email protected]:5432/nakama?sslmode=disable"
dbUrl := "postgresql://[email protected]:26257/nakama?sslmode=disable"
if dbUrlEnv := os.Getenv("TEST_DB_URL"); len(dbUrlEnv) > 0 {
dbUrl = dbUrlEnv
}

db, err := sql.Open("pgx", dbUrl)
if err != nil {
t.Fatal("Error connecting to database", err)
}
Expand Down Expand Up @@ -218,10 +223,11 @@ func WaitForSocket(expected error, cfg *config) {
func NewAPIServer(t *testing.T, runtime *Runtime) (*ApiServer, *Pipeline) {
db := NewDB(t)
router := &DummyMessageRouter{}
tracker := &LocalTracker{}
sessionCache := NewLocalSessionCache(3_600, 7_200)
pipeline := NewPipeline(logger, cfg, db, protojsonMarshaler, protojsonUnmarshaler, nil, nil, nil, nil, nil, tracker, router, runtime)
apiServer := StartApiServer(logger, logger, db, protojsonMarshaler, protojsonUnmarshaler, cfg, "3.0.0", nil, storageIdx, nil, nil, nil, sessionCache, nil, nil, nil, tracker, router, nil, metrics, pipeline, runtime)
sessionRegistry := NewLocalSessionRegistry(metrics)
tracker := &LocalTracker{sessionRegistry: sessionRegistry}
pipeline := NewPipeline(logger, cfg, db, protojsonMarshaler, protojsonUnmarshaler, sessionRegistry, nil, nil, nil, nil, tracker, router, runtime)
apiServer := StartApiServer(logger, logger, db, protojsonMarshaler, protojsonUnmarshaler, cfg, "3.0.0", nil, storageIdx, nil, nil, sessionRegistry, sessionCache, nil, nil, nil, tracker, router, nil, metrics, pipeline, runtime)

WaitForSocket(nil, cfg)
return apiServer, pipeline
Expand Down
51 changes: 18 additions & 33 deletions server/match_common_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,7 @@ func createTestMatchRegistry(t fatalable, logger *zap.Logger) (*LocalMatchRegist
mp := NewMatchProvider()

mp.RegisterCreateFn("go",
func(ctx context.Context, logger *zap.Logger, id uuid.UUID, node string, stopped *atomic.Bool,
name string) (RuntimeMatchCore, error) {
func(ctx context.Context, logger *zap.Logger, id uuid.UUID, node string, stopped *atomic.Bool, name string) (RuntimeMatchCore, error) {
match, err := newTestMatch(context.Background(), NewRuntimeGoLogger(logger), nil, nil)
if err != nil {
return nil, err
Expand All @@ -82,13 +81,11 @@ type testMatchState struct {
// testMatch is a minimal implementation of runtime.Match for testing purposes
type testMatch struct{}

func newTestMatch(ctx context.Context, logger runtime.Logger, db *sql.DB,
nk runtime.NakamaModule) (m runtime.Match, err error) {
func newTestMatch(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule) (m runtime.Match, err error) {
return &testMatch{}, nil
}

func (m *testMatch) MatchInit(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule,
params map[string]interface{}) (interface{}, int, string) {
func (m *testMatch) MatchInit(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule, params map[string]interface{}) (interface{}, int, string) {
state := &testMatchState{
presences: make(map[string]runtime.Presence),
}
Expand All @@ -104,33 +101,28 @@ func (m *testMatch) MatchInit(ctx context.Context, logger runtime.Logger, db *sq
return state, tickRate, label
}

func (m *testMatch) MatchJoinAttempt(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule,
dispatcher runtime.MatchDispatcher, tick int64, state interface{}, presence runtime.Presence,
metadata map[string]string) (interface{}, bool, string) {
func (m *testMatch) MatchJoinAttempt(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule, dispatcher runtime.MatchDispatcher, tick int64, state interface{}, presence runtime.Presence, metadata map[string]string) (interface{}, bool, string) {
acceptUser := true
return state, acceptUser, ""
}

func (m *testMatch) MatchJoin(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule,
dispatcher runtime.MatchDispatcher, tick int64, state interface{}, presences []runtime.Presence) interface{} {
func (m *testMatch) MatchJoin(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule, dispatcher runtime.MatchDispatcher, tick int64, state interface{}, presences []runtime.Presence) interface{} {
mState, _ := state.(*testMatchState)
for _, p := range presences {
mState.presences[p.GetUserId()] = p
}
return mState
}

func (m *testMatch) MatchLeave(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule,
dispatcher runtime.MatchDispatcher, tick int64, state interface{}, presences []runtime.Presence) interface{} {
func (m *testMatch) MatchLeave(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule, dispatcher runtime.MatchDispatcher, tick int64, state interface{}, presences []runtime.Presence) interface{} {
mState, _ := state.(*testMatchState)
for _, p := range presences {
delete(mState.presences, p.GetUserId())
}
return mState
}

func (m *testMatch) MatchLoop(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule,
dispatcher runtime.MatchDispatcher, tick int64, state interface{}, messages []runtime.MatchData) interface{} {
func (m *testMatch) MatchLoop(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule, dispatcher runtime.MatchDispatcher, tick int64, state interface{}, messages []runtime.MatchData) interface{} {
mState, _ := state.(*testMatchState)
for _, presence := range mState.presences {
logger.Info("Presence %v named %v", presence.GetUserId(), presence.GetUsername())
Expand All @@ -145,8 +137,7 @@ func (m *testMatch) MatchLoop(ctx context.Context, logger runtime.Logger, db *sq
return mState
}

func (m *testMatch) MatchTerminate(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule,
dispatcher runtime.MatchDispatcher, tick int64, state interface{}, graceSeconds int) interface{} {
func (m *testMatch) MatchTerminate(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule, dispatcher runtime.MatchDispatcher, tick int64, state interface{}, graceSeconds int) interface{} {
message := "Server shutting down in " + strconv.Itoa(graceSeconds) + " seconds."
reliable := true
if err := dispatcher.BroadcastMessage(2, []byte(message), []runtime.Presence{}, nil, reliable); err != nil {
Expand All @@ -155,8 +146,7 @@ func (m *testMatch) MatchTerminate(ctx context.Context, logger runtime.Logger, d
return state
}

func (m *testMatch) MatchSignal(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule,
dispatcher runtime.MatchDispatcher, tick int64, state interface{}, data string) (interface{}, string) {
func (m *testMatch) MatchSignal(ctx context.Context, logger runtime.Logger, db *sql.DB, nk runtime.NakamaModule, dispatcher runtime.MatchDispatcher, tick int64, state interface{}, data string) (interface{}, string) {
return state, "signal received: " + data
}

Expand All @@ -170,6 +160,7 @@ func (s *testMetrics) SnapshotRecvKbSec() float64 { return 0 }
func (s *testMetrics) SnapshotSentKbSec() float64 { return 0 }
func (s *testMetrics) Api(name string, elapsed time.Duration, recvBytes, sentBytes int64, isErr bool) {
}

func (s *testMetrics) ApiRpc(id string, elapsed time.Duration, recvBytes, sentBytes int64, isErr bool) {
}
func (s *testMetrics) ApiBefore(name string, elapsed time.Duration, isErr bool) {}
Expand Down Expand Up @@ -199,8 +190,7 @@ type testMessageRouter struct {
sendToPresence func(presences []*PresenceID, envelope *rtapi.Envelope)
}

func (s *testMessageRouter) SendToPresenceIDs(_ *zap.Logger, presences []*PresenceID,
envelope *rtapi.Envelope, _ bool) {
func (s *testMessageRouter) SendToPresenceIDs(_ *zap.Logger, presences []*PresenceID, envelope *rtapi.Envelope, _ bool) {
if s.sendToPresence != nil {
s.sendToPresence(presences, envelope)
}
Expand All @@ -219,12 +209,11 @@ func (s *testTracker) SetPartyLeaveListener(func(id uuid.UUID, leaves []*Presenc
func (s *testTracker) Stop() {}

// Track returns success true/false, and new presence true/false.
func (s *testTracker) Track(ctx context.Context, sessionID uuid.UUID, stream PresenceStream, userID uuid.UUID,
meta PresenceMeta, allowIfFirstForSession bool) (bool, bool) {
func (s *testTracker) Track(ctx context.Context, sessionID uuid.UUID, stream PresenceStream, userID uuid.UUID, meta PresenceMeta) (bool, bool) {
return true, true
}
func (s *testTracker) TrackMulti(ctx context.Context, sessionID uuid.UUID, ops []*TrackerOp, userID uuid.UUID,
allowIfFirstForSession bool) bool {

func (s *testTracker) TrackMulti(ctx context.Context, sessionID uuid.UUID, ops []*TrackerOp, userID uuid.UUID) bool {
return true
}
func (s *testTracker) Untrack(sessionID uuid.UUID, stream PresenceStream, userID uuid.UUID) {}
Expand All @@ -234,8 +223,7 @@ func (s *testTracker) UntrackAll(sessionID uuid.UUID, reason runtime.PresenceRea

// Update returns success true/false - will only fail if the user has no presence and allowIfFirstForSession is false,
// otherwise is an upsert.
func (s *testTracker) Update(ctx context.Context, sessionID uuid.UUID, stream PresenceStream, userID uuid.UUID,
meta PresenceMeta, allowIfFirstForSession bool) bool {
func (s *testTracker) Update(ctx context.Context, sessionID uuid.UUID, stream PresenceStream, userID uuid.UUID, meta PresenceMeta) bool {
return true
}

Expand Down Expand Up @@ -275,14 +263,12 @@ func (s *testTracker) CountByStreamModeFilter(modes map[uint8]*uint8) map[*Prese
}

// Check if a single presence on the current node exists.
func (s *testTracker) GetLocalBySessionIDStreamUserID(sessionID uuid.UUID, stream PresenceStream,
userID uuid.UUID) *PresenceMeta {
func (s *testTracker) GetLocalBySessionIDStreamUserID(sessionID uuid.UUID, stream PresenceStream, userID uuid.UUID) *PresenceMeta {
return nil
}

// Check if a single presence on any node exists.
func (s *testTracker) GetBySessionIDStreamUserID(node string, sessionID uuid.UUID, stream PresenceStream,
userID uuid.UUID) *PresenceMeta {
func (s *testTracker) GetBySessionIDStreamUserID(node string, sessionID uuid.UUID, stream PresenceStream, userID uuid.UUID) *PresenceMeta {
return nil
}

Expand Down Expand Up @@ -320,8 +306,7 @@ func (s *testSessionRegistry) Add(session Session) {}

func (s *testSessionRegistry) Remove(sessionID uuid.UUID) {}

func (s *testSessionRegistry) Disconnect(ctx context.Context, sessionID uuid.UUID, ban bool,
reason ...runtime.PresenceReason) error {
func (s *testSessionRegistry) Disconnect(ctx context.Context, sessionID uuid.UUID, ban bool, reason ...runtime.PresenceReason) error {
return nil
}

Expand Down
Loading

0 comments on commit 1771a5c

Please sign in to comment.