diff --git a/api/csrf.go b/api/csrf.go index 1ae8c16476f..a1e59851a92 100644 --- a/api/csrf.go +++ b/api/csrf.go @@ -27,7 +27,7 @@ func csrfProtect(config_obj *config_proto.Config, _, _ = hasher.Write([]byte(config_obj.Frontend.PrivateKey)) token := hasher.Sum(nil) - protectionFn := csrf.Protect(token, csrf.Path("/")) + protectionFn := csrf.Protect(token, csrf.Path("/"), csrf.MaxAge(7*24*60*60)) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { protectionFn(parent).ServeHTTP(w, r) diff --git a/artifacts/definitions/Windows/Forensics/Lnk.yaml b/artifacts/definitions/Windows/Forensics/Lnk.yaml index c269e321ce8..c42e50ab415 100644 --- a/artifacts/definitions/Windows/Forensics/Lnk.yaml +++ b/artifacts/definitions/Windows/Forensics/Lnk.yaml @@ -395,9 +395,9 @@ sources: column_types: - name: Mtime type: timestamp - - name: ATime + - name: Atime type: timestamp - - name: CTime + - name: Ctime type: timestamp - name: HeaderCreationTime type: timestamp diff --git a/executor/pool.go b/executor/pool.go index b860f73ab4e..9c46f45fbb2 100644 --- a/executor/pool.go +++ b/executor/pool.go @@ -71,15 +71,18 @@ func (self *PoolClientExecutor) ReadResponse() <-chan *crypto_proto.GrrMessage { // Inspect the request and decide if we will cache it under a query. func getQueryName(message *crypto_proto.GrrMessage) string { + query_name := "" if message.VQLClientAction != nil { for _, query := range message.VQLClientAction.Query { if query.Name != "" { - return query.Name + query_name = query.Name } } + // Cache it under the query name and the + serialized, _ := json.Marshal(message.VQLClientAction.Env) + return fmt.Sprintf("%v: %v", query_name, string(serialized)) } - return "" } diff --git a/file_store/api/queues.go b/file_store/api/queues.go index 989ef91c273..42c8e39998f 100644 --- a/file_store/api/queues.go +++ b/file_store/api/queues.go @@ -10,7 +10,8 @@ import ( // responsible for rotating the queue files as required. type QueueManager interface { PushEventRows(path_manager PathManager, rows []*ordereddict.Dict) error - Watch(queue_name string) (output <-chan *ordereddict.Dict, cancel func()) + Watch(ctx context.Context, queue_name string) ( + output <-chan *ordereddict.Dict, cancel func()) } type ResultSetFileProperties struct { diff --git a/file_store/api/testsuite.go b/file_store/api/testsuite.go index 7b9b5e7c4c0..bb134af32ee 100644 --- a/file_store/api/testsuite.go +++ b/file_store/api/testsuite.go @@ -241,7 +241,8 @@ func (self *QueueManagerTestSuite) TestPush() { ordereddict.NewDict().Set("foo", 1), ordereddict.NewDict().Set("foo", 2)} - output, cancel := self.manager.Watch(artifact_name) + ctx := context.Background() + output, cancel := self.manager.Watch(ctx, artifact_name) defer cancel() err := self.manager.PushEventRows( diff --git a/file_store/directory/buffer.go b/file_store/directory/buffer.go new file mode 100644 index 00000000000..2975589500a --- /dev/null +++ b/file_store/directory/buffer.go @@ -0,0 +1,239 @@ +// A ring buffer to queue messages + +// Similar to the client ring buffer but this one has no limit because +// we never want to block writers. + +package directory + +import ( + "encoding/binary" + "encoding/json" + "errors" + "io" + "os" + "sync" + + "github.com/Velocidex/ordereddict" + config_proto "www.velocidex.com/golang/velociraptor/config/proto" + "www.velocidex.com/golang/velociraptor/constants" + logging "www.velocidex.com/golang/velociraptor/logging" +) + +// The below is similar to http_comms.FileBasedRingBuffer except: +// * Size of the file is not limited. +// * Leasing a full number of messages at once (rather than combined size). + +const ( + FileMagic = "VRB\x5e" + FirstRecordOffset = 50 +) + +type Header struct { + ReadPointer int64 // Leasing will start at this file offset. + WritePointer int64 // Enqueue will write at this file position. +} + +func (self *Header) MarshalBinary() ([]byte, error) { + data := make([]byte, FirstRecordOffset) + copy(data, FileMagic) + + binary.LittleEndian.PutUint64(data[4:12], uint64(self.ReadPointer)) + binary.LittleEndian.PutUint64(data[12:20], uint64(self.WritePointer)) + + return data, nil +} + +func (self *Header) UnmarshalBinary(data []byte) error { + if len(data) < FirstRecordOffset { + return errors.New("Invalid header length") + } + + if string(data[:4]) != FileMagic { + return errors.New("Invalid Magic") + } + + self.ReadPointer = int64(binary.LittleEndian.Uint64(data[4:12])) + self.WritePointer = int64(binary.LittleEndian.Uint64(data[12:20])) + + return nil +} + +type FileBasedRingBuffer struct { + config_obj *config_proto.Config + + mu sync.Mutex + + fd *os.File + header *Header + + read_buf []byte + write_buf []byte + + log_ctx *logging.LogContext +} + +// Enqueue the item into the ring buffer and append to the end. +func (self *FileBasedRingBuffer) Enqueue(item interface{}) error { + serialized, err := json.Marshal(item) + if err != nil { + return err + } + + self.mu.Lock() + defer self.mu.Unlock() + + // Write the new message to the end of the file at the WritePointer + binary.LittleEndian.PutUint64(self.write_buf, uint64(len(serialized))) + _, err = self.fd.WriteAt(self.write_buf, int64(self.header.WritePointer)) + if err != nil { + // File is corrupt now, reset it. + self.Reset() + return err + } + + n, err := self.fd.WriteAt(serialized, int64(self.header.WritePointer+8)) + if err != nil { + self.Reset() + return err + } + + self.header.WritePointer += 8 + int64(n) + + // Update the header + serialized, err = self.header.MarshalBinary() + if err != nil { + return err + } + _, err = self.fd.WriteAt(serialized, 0) + if err != nil { + self.Reset() + return err + } + + return nil +} + +// Returns some messages message from the file. +func (self *FileBasedRingBuffer) Lease(count int) []*ordereddict.Dict { + self.mu.Lock() + defer self.mu.Unlock() + + result := make([]*ordereddict.Dict, 0, count) + + // The file contains more data. + for self.header.WritePointer > self.header.ReadPointer { + + // Read the next chunk (length+value) from the current leased pointer. + n, err := self.fd.ReadAt(self.read_buf, self.header.ReadPointer) + if err != nil || n != len(self.read_buf) { + self.log_ctx.Error("Possible corruption detected: file too short.") + self._Truncate() + return nil + } + + length := int64(binary.LittleEndian.Uint64(self.read_buf)) + // File might be corrupt - just reset the + // entire file. + if length > constants.MAX_MEMORY*2 || length <= 0 { + self.log_ctx.Error("Possible corruption detected - item length is too large.") + self._Truncate() + return nil + } + + // Unmarshal one item at a time. + serialized := make([]byte, length) + n, _ = self.fd.ReadAt(serialized, self.header.ReadPointer+8) + if int64(n) != length { + self.log_ctx.Errorf( + "Possible corruption detected - expected item of length %v received %v.", + length, n) + self._Truncate() + return nil + } + + item := ordereddict.NewDict() + err = item.UnmarshalJSON(serialized) + if err == nil { + result = append(result, item) + } + + self.header.ReadPointer += 8 + int64(n) + // We read up to the write pointer, we may truncate the file now. + if self.header.ReadPointer == self.header.WritePointer { + self._Truncate() + } + + if len(result) >= count { + break + } + } + + return result +} + +// _Truncate returns the file to a virgin state. Assumes +// FileBasedRingBuffer is already under lock. +func (self *FileBasedRingBuffer) _Truncate() { + _ = self.fd.Truncate(0) + self.header.ReadPointer = FirstRecordOffset + self.header.WritePointer = FirstRecordOffset + serialized, _ := self.header.MarshalBinary() + _, _ = self.fd.WriteAt(serialized, 0) +} + +func (self *FileBasedRingBuffer) Reset() { + self.mu.Lock() + defer self.mu.Unlock() + + self._Truncate() +} + +// Closes the underlying file and shut down the readers. +func (self *FileBasedRingBuffer) Close() { + self.fd.Close() +} + +func NewFileBasedRingBuffer( + config_obj *config_proto.Config, fd *os.File) (*FileBasedRingBuffer, error) { + + log_ctx := logging.GetLogger(config_obj, &logging.FrontendComponent) + + header := &Header{ + // Pad the header a bit to allow for extensions. + WritePointer: FirstRecordOffset, + ReadPointer: FirstRecordOffset, + } + data := make([]byte, FirstRecordOffset) + n, err := fd.ReadAt(data, 0) + if n > 0 && n < FirstRecordOffset && err == io.EOF { + log_ctx.Error("Possible corruption detected: file too short.") + err = fd.Truncate(0) + if err != nil { + return nil, err + } + } + + if n > 0 && (err == nil || err == io.EOF) { + err := header.UnmarshalBinary(data[:n]) + // The header is not valid, truncate the file and + // start again. + if err != nil { + log_ctx.Errorf("Possible corruption detected: %v.", err) + err = fd.Truncate(0) + if err != nil { + return nil, err + } + } + } + + result := &FileBasedRingBuffer{ + config_obj: config_obj, + fd: fd, + header: header, + read_buf: make([]byte, 8), + write_buf: make([]byte, 8), + log_ctx: log_ctx, + } + + return result, nil +} diff --git a/file_store/directory/queue.go b/file_store/directory/queue.go index 00fee83806a..2b7628ec623 100644 --- a/file_store/directory/queue.go +++ b/file_store/directory/queue.go @@ -1,21 +1,279 @@ +// A Queue manager that uses files on disk. + +// The queue manager is a broken between writers and readers. Writers +// want to emit a message to a queue with minimumal delay, and have +// the message dispatched to all readers with minimal latency. + +// A memory queue simply pushes the message to all reader's via a +// buffered channel. As long as the channel buffer remains available +// this works well with very minimal latency in broadcasting to +// readers. However, when the channel becomes full the writers may be +// blocked while readers are working their way through the channel. + +// This queue manager uses a combination of a channel and a disk file +// to buffer messages for readers. When a writer writes to the queue +// manager, the manager attempts to write on the channel but if it is +// not available, then writer switches to a ring buffer file on disk. +// A separate go routine drains the disk file into the channel +// periodically. Therefore, we never block the writer - either the +// message is delivered immediately to the buffered channel, or it is +// written to disk and later delivered. + +// This low latency property is critical because queue managers are +// used to deliver messages in critical code paths and can not be +// delayed. + package directory import ( + "context" + "fmt" + "io/ioutil" + "os" + "sync" + "time" + "github.com/Velocidex/ordereddict" config_proto "www.velocidex.com/golang/velociraptor/config/proto" "www.velocidex.com/golang/velociraptor/file_store/api" - "www.velocidex.com/golang/velociraptor/file_store/memory" "www.velocidex.com/golang/velociraptor/file_store/result_sets" "www.velocidex.com/golang/velociraptor/utils" ) -var ( - pool = memory.NewQueuePool() -) +// A listener wraps a channel that our client will listen on. We send +// the message to each listener that is subscribed to the queue. +type Listener struct { + id int64 + mu sync.Mutex + + // The consumer interested in these events. The consumer may + // block arbitrarily. + output chan *ordereddict.Dict + + // We receive events on this channel - we guarantee this does + // not block for long. + input chan *ordereddict.Dict + + // A backup file to store extra messages. + file_buffer *FileBasedRingBuffer + + // Name of the file_buffer + tmpfile string + cancel func() +} + +// Should not block - very fast. +func (self *Listener) Send(item *ordereddict.Dict) { + self.input <- item +} + +func (self *Listener) Close() { + self.cancel() + self.file_buffer.Close() + + // Close the output channel so our listener will exit. + close(self.output) + + os.Remove(self.tmpfile) // clean up file buffer +} + +func (self *Listener) Debug() *ordereddict.Dict { + result := ordereddict.NewDict().Set("BackingFile", self.tmpfile) + st, _ := os.Stat(self.tmpfile) + result.Set("Size", int64(st.Size())) + + return result +} + +func NewListener(config_obj *config_proto.Config, ctx context.Context, + output chan *ordereddict.Dict) (*Listener, error) { + + tmpfile, err := ioutil.TempFile("", "journal") + if err != nil { + return nil, err + } + + file_buffer, err := NewFileBasedRingBuffer(config_obj, tmpfile) + if err != nil { + return nil, err + } + + subctx, cancel := context.WithCancel(ctx) + self := &Listener{ + id: time.Now().UnixNano(), + cancel: cancel, + input: make(chan *ordereddict.Dict), + output: output, + file_buffer: file_buffer, + tmpfile: tmpfile.Name(), + } + + // Pump messages from input channel and distribute to + // output. If output is busy we divert to the file buffer. + go func() { + defer cancel() + + for { + select { + case <-subctx.Done(): + return + + case item, ok := <-self.input: + if !ok { + return + } + select { + case <-subctx.Done(): + return + + // If we can immediately push + // to the output, do so + case self.output <- item: + + // Otherwise push to the file. + default: + self.file_buffer.Enqueue(item) + } + } + } + + }() + + // Pump messages from the file_buffer to our listeners. + go func() { + for { + // Wait here until the file has some data in it. + select { + case <-subctx.Done(): + return + + case <-time.After(time.Second): + // Get some messages from the file. + for _, item := range self.file_buffer.Lease(100) { + select { + case <-subctx.Done(): + return + case self.output <- item: + } + } + } + } + }() + + return self, nil +} + +// A Queue manages a set of registrations at a specific queue name +// (artifact name). +type QueuePool struct { + mu sync.Mutex + + config_obj *config_proto.Config + + registrations map[string][]*Listener +} + +func (self *QueuePool) Register( + ctx context.Context, vfs_path string) (<-chan *ordereddict.Dict, func()) { + self.mu.Lock() + defer self.mu.Unlock() + + output_chan := make(chan *ordereddict.Dict) + + registrations := self.registrations[vfs_path] + new_registration, err := NewListener(self.config_obj, ctx, output_chan) + if err != nil { + close(output_chan) + return output_chan, func() {} + } + + registrations = append(registrations, new_registration) + + self.registrations[vfs_path] = registrations + + return output_chan, func() { + self.unregister(vfs_path, new_registration.id) + } +} + +// This holds a lock on the entire pool and it is used when the system +// shuts down so not very often. +func (self *QueuePool) unregister(vfs_path string, id int64) { + self.mu.Lock() + defer self.mu.Unlock() + + registrations, pres := self.registrations[vfs_path] + if pres { + new_registrations := make([]*Listener, 0, len(registrations)) + for _, item := range registrations { + if id == item.id { + item.Close() + } else { + new_registrations = append(new_registrations, + item) + } + } + + self.registrations[vfs_path] = new_registrations + } +} + +// Make a copy of the registrations under lock and then we can take +// our time to send them later. +func (self *QueuePool) getRegistrations(vfs_path string) []*Listener { + self.mu.Lock() + defer self.mu.Unlock() + + registrations, ok := self.registrations[vfs_path] + if ok { + // Make a copy of the registrations for sending this + // message. + return append([]*Listener{}, registrations...) + } + + return nil +} + +func (self *QueuePool) Broadcast(vfs_path string, row *ordereddict.Dict) { + // Ensure we do not hold the lock for very long here. + for _, item := range self.getRegistrations(vfs_path) { + item.Send(row) + } +} + +func (self *QueuePool) Debug() *ordereddict.Dict { + self.mu.Lock() + defer self.mu.Unlock() + + result := ordereddict.NewDict() + for k, v := range self.registrations { + listeners := ordereddict.NewDict() + for idx, l := range v { + listeners.Set(fmt.Sprintf("%v", idx), l.Debug()) + } + result.Set(k, listeners) + } + return result +} + +func NewQueuePool(config_obj *config_proto.Config) *QueuePool { + return &QueuePool{ + config_obj: config_obj, + registrations: make(map[string][]*Listener), + } +} type DirectoryQueueManager struct { - FileStore api.FileStore - Clock utils.Clock + mu sync.Mutex + + queue_pool *QueuePool + FileStore api.FileStore + config_obj *config_proto.Config + Clock utils.Clock +} + +func (self *DirectoryQueueManager) Debug() *ordereddict.Dict { + return self.queue_pool.Debug() } func (self *DirectoryQueueManager) PushEventRows( @@ -32,20 +290,22 @@ func (self *DirectoryQueueManager) PushEventRows( // Set a timestamp per event for easier querying. row.Set("_ts", int(self.Clock.Now().Unix())) rs_writer.Write(row) - pool.Broadcast(path_manager.GetQueueName(), row) + self.queue_pool.Broadcast(path_manager.GetQueueName(), row) } return nil } -func (self *DirectoryQueueManager) Watch( +func (self *DirectoryQueueManager) Watch(ctx context.Context, queue_name string) (output <-chan *ordereddict.Dict, cancel func()) { - return pool.Register(queue_name) + return self.queue_pool.Register(ctx, queue_name) } func NewDirectoryQueueManager(config_obj *config_proto.Config, file_store api.FileStore) api.QueueManager { return &DirectoryQueueManager{ - FileStore: file_store, - Clock: utils.RealClock{}, + FileStore: file_store, + config_obj: config_obj, + queue_pool: NewQueuePool(config_obj), + Clock: utils.RealClock{}, } } diff --git a/file_store/directory/queue_test.go b/file_store/directory/queue_test.go index e73a8131ebc..b1f57431786 100644 --- a/file_store/directory/queue_test.go +++ b/file_store/directory/queue_test.go @@ -1,15 +1,34 @@ -package directory +package directory_test import ( + "context" "io/ioutil" "os" "testing" + "time" + "github.com/Velocidex/ordereddict" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "www.velocidex.com/golang/velociraptor/config" + config_proto "www.velocidex.com/golang/velociraptor/config/proto" "www.velocidex.com/golang/velociraptor/file_store/api" + "www.velocidex.com/golang/velociraptor/file_store/directory" "www.velocidex.com/golang/velociraptor/file_store/memory" + "www.velocidex.com/golang/velociraptor/file_store/test_utils" + "www.velocidex.com/golang/velociraptor/paths/artifacts" + "www.velocidex.com/golang/velociraptor/services" + "www.velocidex.com/golang/velociraptor/services/journal" + "www.velocidex.com/golang/velociraptor/services/repository" + "www.velocidex.com/golang/velociraptor/utils" +) + +var ( + monitoringArtifact = ` +name: TestQueue +type: SERVER_EVENT +` ) func TestDirectoryQueueManager(t *testing.T) { @@ -23,6 +42,121 @@ func TestDirectoryQueueManager(t *testing.T) { config_obj.Datastore.FilestoreDirectory = dir config_obj.Datastore.Location = dir - manager := NewDirectoryQueueManager(config_obj, memory.Test_memory_file_store) + manager := directory.NewDirectoryQueueManager(config_obj, memory.Test_memory_file_store) suite.Run(t, api.NewQueueManagerTestSuite(config_obj, manager, memory.Test_memory_file_store)) } + +type TestSuite struct { + suite.Suite + config_obj *config_proto.Config + client_id string + sm *services.Service + dir string +} + +func (self *TestSuite) SetupTest() { + dir, err := ioutil.TempDir("", "file_store_test") + assert.NoError(self.T(), err) + self.dir = dir + + os.Setenv("temp", dir) + + self.config_obj, err = new(config.Loader).WithFileLoader( + "../../http_comms/test_data/server.config.yaml"). + WithRequiredFrontend().WithWriteback(). + LoadAndValidate() + require.NoError(self.T(), err) + + // Start essential services. + ctx, _ := context.WithTimeout(context.Background(), time.Second*60) + self.sm = services.NewServiceManager(ctx, self.config_obj) + + require.NoError(self.T(), self.sm.Start(journal.StartJournalService)) + require.NoError(self.T(), self.sm.Start(repository.StartRepositoryManager)) + + self.client_id = "C.12312" +} + +func (self *TestSuite) TearDownTest() { + self.sm.Close() + test_utils.GetMemoryFileStore(self.T(), self.config_obj).Clear() + test_utils.GetMemoryDataStore(self.T(), self.config_obj).Clear() + os.RemoveAll(self.dir) // clean up + +} + +func (self *TestSuite) TestQueueManager() { + repo_manager, err := services.GetRepositoryManager() + assert.NoError(self.T(), err) + + repository, err := repo_manager.GetGlobalRepository(self.config_obj) + assert.NoError(self.T(), err) + + _, err = repository.LoadYaml(monitoringArtifact, true) + assert.NoError(self.T(), err) + + file_store := test_utils.GetMemoryFileStore(self.T(), self.config_obj) + manager := directory.NewDirectoryQueueManager( + self.config_obj, file_store).(*directory.DirectoryQueueManager) + + // Push some rows to the queue manager + ctx := context.Background() + + reader, cancel := manager.Watch(ctx, "TestQueue") + + path_manager := artifacts.NewMonitoringArtifactLogPathManager(self.config_obj, + "C.123", "TestQueue") + + // Query the state of the manager for testing. + dbg := manager.Debug() + // The initial size is zero + assert.Equal(self.T(), int64(0), utils.GetInt64(dbg, "TestQueue.0.Size")) + + // Push some rows without reading - this should write to the + // file buffer and not block. + for i := 0; i < 10; i++ { + err = manager.PushEventRows(path_manager, []*ordereddict.Dict{ + ordereddict.NewDict(). + Set("Foo", "Bar"), + }) + assert.NoError(self.T(), err) + } + + // The file should contain all the rows now. + dbg = manager.Debug() + + // File size is not accurate due to timestamps + assert.Greater(self.T(), utils.GetInt64(dbg, "TestQueue.0.Size"), int64(300)) + + // Now read all the rows from the file. + count := 0 + for row := range reader { + count++ + assert.Equal(self.T(), "Bar", utils.GetString(row, "Foo")) + + // Break on the 10th row + if count >= 10 { + break + } + } + + // Now check the file - it should be truncated since we read all messages. + dbg = manager.Debug() + assert.Equal(self.T(), int64(50), utils.GetInt64(dbg, "TestQueue.0.Size")) + + // Now cancel the watcher - further reads from the channel + // should not block - the channel is closed. + cancel() + + for range reader { + } + + // Now make sure the tempfile is removed. + tempfile := utils.GetString(dbg, "TestQueue.0.BackingFile") + _, err = os.Stat(tempfile) + assert.Error(self.T(), err) +} + +func TestFileBasedQueueManager(t *testing.T) { + suite.Run(t, &TestSuite{}) +} diff --git a/file_store/memory/queue.go b/file_store/memory/queue.go index 801337acca8..a6f1fa8a08d 100644 --- a/file_store/memory/queue.go +++ b/file_store/memory/queue.go @@ -8,28 +8,46 @@ package memory import ( + "context" "sync" "time" "github.com/Velocidex/ordereddict" config_proto "www.velocidex.com/golang/velociraptor/config/proto" "www.velocidex.com/golang/velociraptor/file_store/api" + "www.velocidex.com/golang/velociraptor/logging" "www.velocidex.com/golang/velociraptor/utils" ) var ( - pool = NewQueuePool() + mu sync.Mutex + pool *QueuePool ) +func GlobalQueuePool(config_obj *config_proto.Config) *QueuePool { + mu.Lock() + defer mu.Unlock() + + if pool != nil { + return pool + } + + pool = NewQueuePool(config_obj) + return pool +} + // A queue pool is an in-process listener for events. type Listener struct { id int64 Channel chan *ordereddict.Dict + name string } type QueuePool struct { mu sync.Mutex + config_obj *config_proto.Config + registrations map[string][]*Listener } @@ -41,6 +59,7 @@ func (self *QueuePool) Register(vfs_path string) (<-chan *ordereddict.Dict, func new_registration := &Listener{ Channel: make(chan *ordereddict.Dict, 1000), id: time.Now().UnixNano(), + name: vfs_path, } registrations = append(registrations, new_registration) @@ -51,6 +70,8 @@ func (self *QueuePool) Register(vfs_path string) (<-chan *ordereddict.Dict, func } } +// This holds a lock on the entire pool and it is used when the system +// shuts down so not very often. func (self *QueuePool) unregister(vfs_path string, id int64) { self.mu.Lock() defer self.mu.Unlock() @@ -71,28 +92,47 @@ func (self *QueuePool) unregister(vfs_path string, id int64) { } } -func (self *QueuePool) Broadcast(vfs_path string, row *ordereddict.Dict) { +// Make a copy of the registrations under lock and then we can take +// our time to send them later. +func (self *QueuePool) getRegistrations(vfs_path string) []*Listener { self.mu.Lock() defer self.mu.Unlock() registrations, ok := self.registrations[vfs_path] if ok { - for _, item := range registrations { - item.Channel <- row + // Make a copy of the registrations for sending this + // message. + return append([]*Listener{}, registrations...) + } + + return nil +} + +func (self *QueuePool) Broadcast(vfs_path string, row *ordereddict.Dict) { + // Ensure we do not hold the lock for very long here. + for _, item := range self.getRegistrations(vfs_path) { + select { + case item.Channel <- row: + case <-time.After(2 * time.Second): + logger := logging.GetLogger( + self.config_obj, &logging.FrontendComponent) + logger.Error("QueuePool: Dropping message to queue %v", + item.name) } } } -func NewQueuePool() *QueuePool { +func NewQueuePool(config_obj *config_proto.Config) *QueuePool { return &QueuePool{ + config_obj: config_obj, registrations: make(map[string][]*Listener), } } type MemoryQueueManager struct { - FileStore api.FileStore - - Clock utils.Clock + FileStore api.FileStore + config_obj *config_proto.Config + Clock utils.Clock } func (self *MemoryQueueManager) Debug() { @@ -106,7 +146,7 @@ func (self *MemoryQueueManager) PushEventRows( path_manager api.PathManager, dict_rows []*ordereddict.Dict) error { for _, row := range dict_rows { - pool.Broadcast(path_manager.GetQueueName(), + GlobalQueuePool(self.config_obj).Broadcast(path_manager.GetQueueName(), row.Set("_ts", int(self.Clock.Now().Unix()))) } @@ -131,14 +171,15 @@ func (self *MemoryQueueManager) PushEventRows( } func (self *MemoryQueueManager) Watch( - queue_name string) (output <-chan *ordereddict.Dict, cancel func()) { - return pool.Register(queue_name) + ctx context.Context, queue_name string) (output <-chan *ordereddict.Dict, cancel func()) { + return GlobalQueuePool(self.config_obj).Register(queue_name) } func NewMemoryQueueManager(config_obj *config_proto.Config, file_store api.FileStore) api.QueueManager { return &MemoryQueueManager{ - FileStore: file_store, - Clock: utils.RealClock{}, + FileStore: file_store, + config_obj: config_obj, + Clock: utils.RealClock{}, } } diff --git a/file_store/mysql/queue.go b/file_store/mysql/queue.go index 943dc75fdb8..4b935387759 100644 --- a/file_store/mysql/queue.go +++ b/file_store/mysql/queue.go @@ -30,6 +30,7 @@ package mysql import ( + "context" "database/sql" "fmt" "sync" @@ -242,7 +243,8 @@ func (self *MysqlQueueManager) PushEventRows( return err } -func (self *MysqlQueueManager) Watch(queue_name string) (<-chan *ordereddict.Dict, func()) { +func (self *MysqlQueueManager) Watch( + ctx context.Context, queue_name string) (<-chan *ordereddict.Dict, func()) { return pool.Register(queue_name) } diff --git a/file_store/result_sets/events_test.go b/file_store/result_sets/events_test.go index d266990a6a9..8167d0d9221 100644 --- a/file_store/result_sets/events_test.go +++ b/file_store/result_sets/events_test.go @@ -177,10 +177,9 @@ func (self *TimedResultSetTestSuite) TestTimedResultSets() { clock := &utils.MockClock{MockNow: now} // Start off by writing some events on a queue. - qm := &directory.DirectoryQueueManager{ - FileStore: self.file_store, - Clock: clock, - } + qm := directory.NewDirectoryQueueManager( + self.config_obj, self.file_store).(*directory.DirectoryQueueManager) + qm.Clock = clock path_manager := artifacts.NewArtifactPathManager( self.config_obj, diff --git a/flows/artifacts.go b/flows/artifacts.go index 5292afb2c0d..c4305df65ea 100644 --- a/flows/artifacts.go +++ b/flows/artifacts.go @@ -1,5 +1,5 @@ /* - Velociraptor - Hunting Evil +o Velociraptor - Hunting Evil Copyright (C) 2019 Velocidex Innovations. This program is free software: you can redistribute it and/or modify @@ -55,6 +55,8 @@ var ( Name: "uploaded_bytes", Help: "Total bytes of Uploaded Files.", }) + + notModified = errors.New("Not modified") ) // closeContext is called after all messages from the clients are @@ -420,15 +422,21 @@ func IsRequestComplete( // Update any hunts if needed. if constants.HuntIdRegex.MatchString(collection_context.Request.Creator) { - err := services.GetHuntDispatcher().ModifyHunt( + dispatcher := services.GetHuntDispatcher() + if dispatcher == nil { + return false, errors.New("Hunt dispatcher not valid") + } + + err := dispatcher.ModifyHunt( collection_context.Request.Creator, func(hunt *api_proto.Hunt) error { if hunt != nil && hunt.Stats != nil { hunt.Stats.TotalClientsWithResults++ + return nil } - return nil + return notModified }) - if err != nil { + if err != nil && err != notModified { return true, err } } diff --git a/flows/foreman.go b/flows/foreman.go index f6a6f090248..7aae9a85861 100644 --- a/flows/foreman.go +++ b/flows/foreman.go @@ -134,6 +134,10 @@ func ForemanProcessMessage( return err } + notifier := services.GetNotifier() + if notifier == nil { + return errors.New("Notifier not configured") + } return services.GetNotifier().NotifyListener(config_obj, client_id) }) } diff --git a/flows/hunts.go b/flows/hunts.go index 9e6f0d0a6e3..006a1c2534e 100644 --- a/flows/hunts.go +++ b/flows/hunts.go @@ -51,6 +51,8 @@ func GetNewHuntId() string { return constants.HUNT_PREFIX + string(result) } +// Backwards compatibility: Figure out the list of collected hunts +// from the hunt object's request func FindCollectedArtifacts( config_obj *config_proto.Config, hunt *api_proto.Hunt) { @@ -59,6 +61,11 @@ func FindCollectedArtifacts( return } + // Hunt already has artifacts list. + if len(hunt.Artifacts) > 0 { + return + } + hunt.Artifacts = hunt.StartRequest.Artifacts hunt.ArtifactSources = []string{} for _, artifact := range hunt.StartRequest.Artifacts { @@ -124,6 +131,17 @@ func CreateHunt( return "", errors.New("Hunt expiry is in the past!") } + // Set the artifacts information in the hunt object itself. + hunt.Artifacts = hunt.StartRequest.Artifacts + hunt.ArtifactSources = []string{} + for _, artifact := range hunt.StartRequest.Artifacts { + for _, source := range GetArtifactSources( + config_obj, artifact) { + hunt.ArtifactSources = append( + hunt.ArtifactSources, path.Join(artifact, source)) + } + } + manager, err := services.GetRepositoryManager() if err != nil { return "", err @@ -194,7 +212,12 @@ func ListHunts(config_obj *config_proto.Config, in *api_proto.ListHuntsRequest) result := &api_proto.ListHuntsResponse{} - err := services.GetHuntDispatcher().ApplyFuncOnHunts( + dispatcher := services.GetHuntDispatcher() + if dispatcher == nil { + return nil, errors.New("Hunt dispatcher not initialized") + } + + err := dispatcher.ApplyFuncOnHunts( func(hunt *api_proto.Hunt) error { if uint64(len(result.Items)) < in.Offset { return nil @@ -205,10 +228,6 @@ func ListHunts(config_obj *config_proto.Config, in *api_proto.ListHuntsRequest) } if in.IncludeArchived || hunt.State != api_proto.Hunt_ARCHIVED { - - // FIXME: Backwards compatibility. - hunt.HuntId = path.Base(hunt.HuntId) - result.Items = append(result.Items, hunt) } return nil @@ -230,23 +249,24 @@ func GetHunt(config_obj *config_proto.Config, in *api_proto.GetHuntRequest) ( var result *api_proto.Hunt - err = services.GetHuntDispatcher().ModifyHunt( - in.HuntId, + dispatcher := services.GetHuntDispatcher() + if dispatcher == nil { + return nil, errors.New("Hunt dispatcher not valid") + } + + err = dispatcher.ModifyHunt(in.HuntId, func(hunt_obj *api_proto.Hunt) error { - // Make a copy + // Make a copy of the hunt result = proto.Clone(hunt_obj).(*api_proto.Hunt) - // HACK: Velociraptor only knows how to - // collect artifacts now. Eventually the whole - // concept of a flow will go away but for now - // we need to figure out which artifacts we - // are actually collecting - there are not - // many possibilities since we have reduced - // the number of possible flows significantly. - FindCollectedArtifacts(config_obj, result) - - return nil + // We do not modify the hunt so it is not dirty. + return notModified }) + if err != notModified { + return nil, err + } + + FindCollectedArtifacts(config_obj, result) if result == nil || result.Stats == nil { return result, errors.New("Not found") @@ -254,7 +274,7 @@ func GetHunt(config_obj *config_proto.Config, in *api_proto.GetHuntRequest) ( result.Stats.AvailableDownloads, _ = availableHuntDownloadFiles(config_obj, in.HuntId) - return result, err + return result, nil } // availableHuntDownloadFiles returns the prepared zip downloads available to @@ -275,12 +295,6 @@ func availableHuntDownloadFiles(config_obj *config_proto.Config, // will update the StartTime. // 2. A hunt in the running state can go to the Stop state // 3. A hunt's description can be modified. - -// It is not possible to restart a stopped hunt. This is because the -// hunt manager watches the hunt participation events for all hunts at -// the same time, and just ignores clients that want to participate in -// stopped hunts. It is not possible to go back and re-examine the -// queue. func ModifyHunt( ctx context.Context, config_obj *config_proto.Config, @@ -335,19 +349,9 @@ func ModifyHunt( hunt.State = api_proto.Hunt_STOPPED } - // Write the new hunt object to the datastore. - db, err := datastore.GetDB(config_obj) - if err != nil { - return err - } - - hunt_path_manager := paths.NewHuntPathManager(hunt.HuntId) - err = db.SetSubject( - config_obj, hunt_path_manager.Path(), hunt) - if err != nil { - return err - } - + // Returning nil indicates to the hunt manager + // that the hunt was successfully modified. It + // will then flush it to disk. return nil }) diff --git a/gui/velociraptor/src/components/core/api-service.js b/gui/velociraptor/src/components/core/api-service.js index 606686f16b8..d502e2dd32a 100644 --- a/gui/velociraptor/src/components/core/api-service.js +++ b/gui/velociraptor/src/components/core/api-service.js @@ -38,6 +38,13 @@ const get = function(url, params, cancel_token) { url: api_handlers + url, params: params, cancelToken: cancel_token, + }).then(response=>{ + // Update the csrf token. + let token = response.headers["x-csrf-token"]; + if (token && token.length > 0) { + window.CsrfToken = token; + } + return response; }).catch(handle_error); }; diff --git a/gui/velociraptor/src/components/forms/validated_int.js b/gui/velociraptor/src/components/forms/validated_int.js index 3014dae2a78..69664914fd6 100644 --- a/gui/velociraptor/src/components/forms/validated_int.js +++ b/gui/velociraptor/src/components/forms/validated_int.js @@ -21,9 +21,13 @@ export default class ValidatedInteger extends React.Component { render() { let value = this.props.value; + + // Need to set the initial value to '' to tell React this is a + // controlled component. if (_.isUndefined(value)) { - value = 0; + value = ''; } + return ( <>