Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# Kind can be one of:
# - breaking-change: a change to previously-documented behavior
# - deprecation: functionality that is being removed in a later release
# - bug-fix: fixes a problem in a previous version
# - enhancement: extends functionality but does not break or fix existing behavior
# - feature: new functionality
# - known-issue: problems that we are aware of in a given version
# - security: impacts on the security of a product or a user’s deployment.
# - upgrade: important information for someone upgrading from a prior version
# - other: does not fit into any of the other categories
kind: enhancement

# Change summary; a 80ish characters long description of the change.
summary: Makes file storage size configurable

# Long description; in case the summary is not enough to describe the change
# this field accommodate a description without length limits.
# NOTE: This field will be rendered only for breaking-change and known-issue kinds at the moment.
#description:

# Affected component; a word indicating the component this changeset affects.
component: fleet-server

# PR URL; optional; the PR number that added the changeset.
# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added.
# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number.
# Please provide it if you are adding a fragment for a different PR.
#pr: https://github.com/owner/repo/1234
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
#pr: https://github.com/owner/repo/1234
pr: https://github.com/elastic/fleet-server/pull/5478


# Issue URL; optional; the GitHub issue related to this changeset (either closes or is part of).
# If not present is automatically filled by the tooling with the issue linked to the PR number.
#issue: https://github.com/owner/repo/1234
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

small nit, if there is a parent issue it would be nice to have it here

2 changes: 1 addition & 1 deletion internal/pkg/api/handleFileDelivery.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ func NewFileDeliveryT(cfg *config.Server, bulker bulk.Bulk, chunkClient *elastic
return &FileDeliveryT{
bulker: bulker,
cache: cache,
deliverer: delivery.New(chunkClient, bulker, maxFileSize),
deliverer: delivery.New(chunkClient, bulker, cfg.Limits.MaxFileStorageByteSize),
authAgent: authAgent,
}
}
Expand Down
2 changes: 1 addition & 1 deletion internal/pkg/api/handleFileDelivery_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ func prepareFileDeliveryMock(t *testing.T) (http.Handler, apiServer, *MockTransp
ft: &FileDeliveryT{
bulker: fakebulk,
cache: c,
deliverer: delivery.New(mockES, fakebulk, maxFileSize),
deliverer: delivery.New(mockES, fakebulk, 0),
authAgent: func(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) {
return &model.Agent{
ESDocument: model.ESDocument{
Expand Down
4 changes: 1 addition & 3 deletions internal/pkg/api/handleUpload.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ import (
)

const (
// TODO: move to a config
maxFileSize = 104857600 // 100 MiB
maxUploadTimer = 24 * time.Hour
)

Expand All @@ -58,7 +56,7 @@ func NewUploadT(cfg *config.Server, bulker bulk.Bulk, chunkClient *elasticsearch
chunkClient: chunkClient,
bulker: bulker,
cache: cache,
uploader: uploader.New(chunkClient, bulker, cache, maxFileSize, maxUploadTimer),
uploader: uploader.New(chunkClient, bulker, cache, cfg.Limits.MaxFileStorageByteSize, maxUploadTimer),
authAgent: authAgent,
authAPIKey: authAPIKey,
}
Expand Down
73 changes: 54 additions & 19 deletions internal/pkg/api/handleUpload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
Expand Down Expand Up @@ -71,18 +72,6 @@ func TestUploadBeginValidation(t *testing.T) {
"src": "agent"
}`,
},
{"Oversized file should be rejected", http.StatusBadRequest, "size",
`{
"file": {
"size": ` + strconv.Itoa(maxFileSize+1024) + `,
"name": "foo.png",
"mime_type": "image/png"
},
"agent_id": "foo",
"action_id": "123",
"src": "agent"
}`,
},
{"zero size file should be rejected", http.StatusBadRequest, "size",
`{
"file": {
Expand Down Expand Up @@ -346,6 +335,48 @@ func TestUploadBeginBadRequest(t *testing.T) {
assert.Equal(t, http.StatusBadRequest, rec.Code)
}

func TestUploadBeginFileSize(t *testing.T) {

mockFile := func(size int64) string {
return fmt.Sprintf(`{
"file": {
"size": %d,
"name": "foo.png",
"mime_type": "image/png"
},
"agent_id": "foo",
"action_id": "123",
"src": "agent"
}`, size)
}

// now test various body contents
tests := []struct {
Name string
MaxSize int64
ExpectStatus int
InputSize int64
}{
{"MaxSize 0 allows uploads", 0, http.StatusOK, 1000},
{"MaxSize 0 allows large uploads", 0, http.StatusOK, 1024 * 1024 * 1024 * 2},
{"MaxSize 0 does not allow 0-length files", 0, http.StatusBadRequest, 0},
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems a little odd? If I set MaxSize 1, would I allow 0-length files?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nope! This test was just to make sure a size limit of "0" is not interpreted as a literal value, making "0 length files" suddenly succeed, when the limit is 0.

0-length files should always fail, but this makes sure we don't trip on an edge case when the limit is also 0.

{"Sizes larger than MaxSize are denied", 1024, http.StatusBadRequest, 2048},
{"Sizes smaller than MaxSize are allowed", 1024, http.StatusOK, 900},
}

for _, tc := range tests {
t.Run(tc.Name, func(t *testing.T) {

hr, _, _, _ := configureUploaderMock(t, tc.MaxSize)
rec := httptest.NewRecorder()
req := httptest.NewRequest(http.MethodPost, RouteUploadBegin, strings.NewReader(mockFile(tc.InputSize)))
hr.ServeHTTP(rec, req)
assert.Equal(t, tc.ExpectStatus, rec.Code)
})
}

}

/*
Chunk data upload route
*/
Expand Down Expand Up @@ -377,7 +408,7 @@ func TestChunkUploadRouteParams(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: file.MaxChunkSize + 1,
Count: 2, // this is a 2-chunk "file" based on size above
Start: time.Now(),
Expand Down Expand Up @@ -410,7 +441,7 @@ func TestChunkUploadRequiresChunkHashHeader(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now(),
Expand Down Expand Up @@ -458,7 +489,7 @@ func TestChunkUploadStatus(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now(),
Expand Down Expand Up @@ -509,7 +540,7 @@ func TestChunkUploadExpiry(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: tc.StartTime,
Expand Down Expand Up @@ -547,7 +578,7 @@ func TestChunkUploadWritesTimestamp(t *testing.T) {
mockUploadInfoResult(fakebulk, file.Info{
DocID: "bar.foo",
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now(),
Expand Down Expand Up @@ -597,7 +628,7 @@ func TestUploadCompleteRequiresMatchingAuth(t *testing.T) {
mockInfo := file.Info{
DocID: "bar." + tc.AgentInFileRecord,
ID: mockUploadID,
ChunkSize: maxFileSize,
ChunkSize: file.MaxChunkSize,
Total: 10,
Count: 1,
Start: time.Now().Add(-time.Minute),
Expand Down Expand Up @@ -998,6 +1029,10 @@ func TestUploadCompleteBadRequests(t *testing.T) {

// prepareUploaderMock sets up common dependencies and registers upload routes to a returned router
func prepareUploaderMock(t *testing.T) (http.Handler, apiServer, *itesting.MockBulk, *MockTransport) {
return configureUploaderMock(t, 0)
}

func configureUploaderMock(t *testing.T, fileSize int64) (http.Handler, apiServer, *itesting.MockBulk, *MockTransport) {
// chunk index operations skip the bulker in order to send binary docs directly
// so a mock *elasticsearch.Client needs to be be prepared
es, tx := mockESClient(t)
Expand Down Expand Up @@ -1034,7 +1069,7 @@ func prepareUploaderMock(t *testing.T) (http.Handler, apiServer, *itesting.MockB
bulker: fakebulk,
chunkClient: es,
cache: c,
uploader: uploader.New(es, fakebulk, c, maxFileSize, maxUploadTimer),
uploader: uploader.New(es, fakebulk, c, fileSize, maxUploadTimer),
authAgent: func(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) {
return &model.Agent{
ESDocument: model.ESDocument{
Expand Down
12 changes: 8 additions & 4 deletions internal/pkg/config/env_defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,8 @@ const (
defaultStatusMax = 50
defaultStatusMaxBody = 0

defaultFileStorageByteSize = 0 // no limit

defaultUploadStartInterval = time.Second * 2
defaultUploadStartBurst = 5
defaultUploadStartMax = 10
Expand All @@ -70,7 +72,7 @@ const (
defaultUploadChunkInterval = time.Millisecond * 3
defaultUploadChunkBurst = 5
defaultUploadChunkMax = 10
defaultUploadChunkMaxBody = 1024 * 1024 * 4 // this is also enforced in handler, a chunk MAY NOT be larger than 4 MiB
defaultUploadChunkMaxBody = 1024 * 1024 * 4 // this is also enforced in handler, a chunk MUST NOT be larger than 4 MiB

defaultFileDelivInterval = time.Millisecond * 100
defaultFileDelivBurst = 5
Expand Down Expand Up @@ -141,8 +143,9 @@ type limit struct {
}

type serverLimitDefaults struct {
PolicyThrottle time.Duration `config:"policy_throttle"` // deprecated: replaced by policy_limit
MaxConnections int `config:"max_connections"`
PolicyThrottle time.Duration `config:"policy_throttle"` // deprecated: replaced by policy_limit
MaxConnections int `config:"max_connections"`
MaxFileStorageByteSize int64 `config:"max_file_storage_size"`

ActionLimit limit `config:"action_limit"`
PolicyLimit limit `config:"policy_limit"`
Expand All @@ -161,7 +164,8 @@ type serverLimitDefaults struct {

func defaultserverLimitDefaults() *serverLimitDefaults {
return &serverLimitDefaults{
MaxConnections: defaultMaxConnections,
MaxConnections: defaultMaxConnections,
MaxFileStorageByteSize: defaultFileStorageByteSize,
ActionLimit: limit{
Interval: defaultActionInterval,
Burst: defaultActionBurst,
Expand Down
8 changes: 5 additions & 3 deletions internal/pkg/config/limits.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,10 @@ type Limit struct {
}

type ServerLimits struct {
MaxAgents int `config:"max_agents"`
MaxHeaderByteSize int `config:"max_header_byte_size"`
MaxConnections int `config:"max_connections"`
MaxAgents int `config:"max_agents"`
MaxHeaderByteSize int `config:"max_header_byte_size"`
MaxConnections int `config:"max_connections"`
MaxFileStorageByteSize int64 `config:"max_file_storage_size"`
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder, for consistency, if this should have been max_file_storage_byte_size, (even if my preference would have been to have the unit come last, if we include it at all) to align with max_header_byte_size, two lines above, and align with the variable name.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Naming is hard and begs some questions, so please pardon me for asking a question with maybe an obvious answer, but: Why is it not max_file_size (e.g why is storage part of it -- could it be simplified but still describing exactly what it is)? It raises some "storage size vs display size"-connotations to me, which may be completely irrelevant.

Copy link
Member Author

@pzl pzl Sep 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I tried to avoid an overly-broad name in case fleet server ever needs to deal with files in another context (e.g. supporting files on the host its on, cache files, or any other situation involving "files" in fleet). I thought of this feature (file transfer to/from an agent and elasticsearch) as "file storage" to give it a narrower namespace. But it can be whatever, no attachment to the name


ActionLimit Limit `config:"action_limit"`
PolicyLimit Limit `config:"policy_limit"`
Expand Down Expand Up @@ -47,6 +48,7 @@ func (c *ServerLimits) LoadLimits(limits *envLimits) {
if c.MaxConnections == 0 {
c.MaxConnections = l.MaxConnections
}
c.MaxFileStorageByteSize = l.MaxFileStorageByteSize

c.ActionLimit = mergeEnvLimit(c.ActionLimit, l.ActionLimit)
c.PolicyLimit = mergeEnvLimit(c.PolicyLimit, l.PolicyLimit)
Expand Down
2 changes: 1 addition & 1 deletion internal/pkg/file/uploader/upload.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func (u *Uploader) Begin(ctx context.Context, namespaces []string, data JSDict)
}

size, _ := data.Int64("file", "size")
if size > u.sizeLimit {
if u.sizeLimit != 0 && size > u.sizeLimit {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I understand the u.sizeLimit != 0 clause here is for allowing uploads of any size. So 0 is the sentinel value for signaling to the code that uploads of any size are allowed.

I'm wondering if we should use 0 to signal that uploads are not allowed and -1 (or any other value < 0) to signal that uploads of any size are allowed?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Disallowing in just this manner would lead to some wacky and hard-to-debug errors in kibana features that rely on the file transfer. Kibana features (especially in Defend) would still appear enabled and allow user actions to take place that would fail when the agent (or endpoint) receives an action and then proceeds to upload.

I do see the arbitrary nature of 0's and negatives as not particularly meaningful to whichever disabled/unlimited mode here. I guess they happen to be flipped here. -1 (or any negative value) will fail to upload, as file size will always be larger than that. Negatives seem to be the disabler here (not by design, just by happenstance). 0 seemed to be a common unset/default/unlimited value elsewhere in these configs (and go) and I went with that by convention. I think the unset/default value of the language probably shouldn't lead to the feature being disabled.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did we consider making an explicit [something]Enabled: true/false flag (and require the max*Size to unsigned) to differentiate and avoid magic numbers (at least in the user-facing interface)?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure I understand the suggestion here.

change the config's type to uint64, and then:

// with other the consts
var fileSizeLimitDisabled = 0

...

// when setting defaults
defaultFileSizeLimit = fileSizeLimitDisabled

?

That's good with me, I'm happy with that.


Just to restate the general case here: the end goal is to remove the file size cap, that is the specific need. I'd like to leave it configurable in case some user has strange edge cases I cannot predict and wants to bound it for their own reasons. But it is not essential to keep this a config. We can simplify the PR if we just want to remove the cap. The actual protections in place are the rate limits. If having a sentinel value here at 0 is the blocker, should we go the other way?

Copy link
Member

@pchila pchila Sep 16, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I was about to add a comment about the config type to be changed to uint64, then I saw this comment so I'll try to keep the number of open threads to a minimum.

I don't want to hijack @nkvoll comment and will leave it to him to get back here, I would love to see the uint64 type if negative values are not allowed and a constant that will read something like wrote in your comment.

About the sentinel value/explicit enabled/disabled flag I am not sure: if we have such a flag we could leave the default limit to 100MB as today (still configurable using MaxFileStorageByteSize) and just turn off the limit using the FileStorageLimitEnabled: false (name still open to change 😉 ) but I am not 100% sure that this is what @nkvoll envisioned

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the general-case explanation @pzl, much appreciated!

What I'm thinking is pretty much what @pchila is saying, but maybe with a slight variant to avoid discussing the naming, by having it be a pointer. If the pointer is nil, there is no max.

MaxFileStorageByteSize *int64 `config:"max_file_storage_size"`

If I understand correctly, we want to default to "no maximum configured", and this being a pointer, usually defaults to nil, and is different from the any other legal value, which intuitively would be compared to the file size on disk?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in re: negative numbers being nonsensical values here -- do we want this to be *uint64?

Also what should happen if it is configured and set to 0? Feature disabled?

vSpan.End()
return file.Info{}, ErrFileSizeTooLarge
}
Expand Down
17 changes: 9 additions & 8 deletions internal/pkg/file/uploader/upload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ func TestUploadBeginReturnsCorrectInfo(t *testing.T) {
c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000})
require.NoError(t, err)
u := New(nil, fakeBulk, c, int64(size), time.Hour)
info, err := u.Begin(context.Background(), []string{}, data)
info, err := u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)

assert.Equal(t, int64(size), info.Total)
Expand Down Expand Up @@ -127,7 +127,7 @@ func TestUploadBeginWritesDocumentFromInputs(t *testing.T) {
c, err := cache.New(config.Cache{NumCounters: 100, MaxCost: 100000})
require.NoError(t, err)
u := New(nil, fakeBulk, c, int64(size), time.Hour)
_, err = u.Begin(context.Background(), []string{}, data)
_, err = u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)

payload, ok := fakeBulk.Calls[0].Arguments[3].([]byte)
Expand Down Expand Up @@ -171,7 +171,7 @@ func TestUploadBeginCalculatesCorrectChunkCount(t *testing.T) {
data := makeUploadRequestDict(map[string]interface{}{
"file.size": tc.FileSize,
})
info, err := u.Begin(context.Background(), []string{}, data)
info, err := u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)
assert.Equal(t, tc.ExpectedCount, info.Count)
})
Expand All @@ -185,6 +185,7 @@ func TestUploadBeginMaxFileSize(t *testing.T) {
ShouldError bool
Name string
}{
{0, 1024 * 1024 * 300, false, "0 limit is unlimited"},
{500, 800, true, "800 is too large"},
{800, 500, false, "file within limits"},
{1024, 1023, false, "1-less than limit"},
Expand All @@ -210,7 +211,7 @@ func TestUploadBeginMaxFileSize(t *testing.T) {
data := makeUploadRequestDict(map[string]interface{}{
"file.size": tc.FileSize,
})
_, err := u.Begin(context.Background(), []string{}, data)
_, err := u.Begin(t.Context(), []string{}, data)
if tc.ShouldError {
assert.ErrorIs(t, err, ErrFileSizeTooLarge)
} else {
Expand Down Expand Up @@ -264,7 +265,7 @@ func TestUploadRejectsMissingRequiredFields(t *testing.T) {
}
}

_, err = u.Begin(context.Background(), []string{}, data)
_, err = u.Begin(t.Context(), []string{}, data)
assert.Errorf(t, err, "%s is a required field and should error if not provided", field)
})

Expand Down Expand Up @@ -342,21 +343,21 @@ func TestChunkMarksFinal(t *testing.T) {
"file.size": tc.FileSize,
})

info, err := u.Begin(context.Background(), []string{}, data)
info, err := u.Begin(t.Context(), []string{}, data)
assert.NoError(t, err)

// for anything larger than 1-chunk, check for off-by-ones
if tc.FinalChunk > 0 {
mockUploadInfoResult(fakeBulk, info)
_, prev, err := u.Chunk(context.Background(), info.ID, tc.FinalChunk-1, "")
_, prev, err := u.Chunk(t.Context(), info.ID, tc.FinalChunk-1, "")
assert.NoError(t, err)
assert.Falsef(t, prev.Last, "penultimate chunk number (%d) should not be marked final", tc.FinalChunk-1)
}

mockUploadInfoResult(fakeBulk, info)

// make sure the final chunk is marked as such
_, chunk, err := u.Chunk(context.Background(), info.ID, tc.FinalChunk, "")
_, chunk, err := u.Chunk(t.Context(), info.ID, tc.FinalChunk, "")
assert.NoError(t, err)
assert.Truef(t, chunk.Last, "chunk number %d should be marked as Last", tc.FinalChunk)
})
Expand Down