Reduce data races (#14549)
* Add race conditions into test * Fix Race in GetManager() * DataAsync() use error chan * just log no chan * finish
This commit is contained in:
parent
0d1444751f
commit
87009ab40a
3 changed files with 18 additions and 6 deletions
|
@ -11,6 +11,8 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
gitea_log "code.gitea.io/gitea/modules/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Blob represents a Git object.
|
// Blob represents a Git object.
|
||||||
|
@ -27,13 +29,13 @@ type Blob struct {
|
||||||
// Calling the Close function on the result will discard all unread output.
|
// Calling the Close function on the result will discard all unread output.
|
||||||
func (b *Blob) DataAsync() (io.ReadCloser, error) {
|
func (b *Blob) DataAsync() (io.ReadCloser, error) {
|
||||||
stdoutReader, stdoutWriter := io.Pipe()
|
stdoutReader, stdoutWriter := io.Pipe()
|
||||||
var err error
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
stderr := &strings.Builder{}
|
stderr := &strings.Builder{}
|
||||||
err = NewCommand("cat-file", "--batch").RunInDirFullPipeline(b.repoPath, stdoutWriter, stderr, strings.NewReader(b.ID.String()+"\n"))
|
err := NewCommand("cat-file", "--batch").RunInDirFullPipeline(b.repoPath, stdoutWriter, stderr, strings.NewReader(b.ID.String()+"\n"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = ConcatenateError(err, stderr.String())
|
err = ConcatenateError(err, stderr.String())
|
||||||
|
gitea_log.Error("Blob.DataAsync Error: %v", err)
|
||||||
_ = stdoutWriter.CloseWithError(err)
|
_ = stdoutWriter.CloseWithError(err)
|
||||||
} else {
|
} else {
|
||||||
_ = stdoutWriter.Close()
|
_ = stdoutWriter.Close()
|
||||||
|
@ -50,8 +52,8 @@ func (b *Blob) DataAsync() (io.ReadCloser, error) {
|
||||||
return &LimitedReaderCloser{
|
return &LimitedReaderCloser{
|
||||||
R: bufReader,
|
R: bufReader,
|
||||||
C: stdoutReader,
|
C: stdoutReader,
|
||||||
N: int64(size),
|
N: size,
|
||||||
}, err
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size returns the uncompressed size of the blob
|
// Size returns the uncompressed size of the blob
|
||||||
|
|
|
@ -25,6 +25,7 @@ var (
|
||||||
// ErrExecTimeout represent a timeout error
|
// ErrExecTimeout represent a timeout error
|
||||||
ErrExecTimeout = errors.New("Process execution timeout")
|
ErrExecTimeout = errors.New("Process execution timeout")
|
||||||
manager *Manager
|
manager *Manager
|
||||||
|
managerInit sync.Once
|
||||||
|
|
||||||
// DefaultContext is the default context to run processing commands in
|
// DefaultContext is the default context to run processing commands in
|
||||||
DefaultContext = context.Background()
|
DefaultContext = context.Background()
|
||||||
|
@ -48,11 +49,11 @@ type Manager struct {
|
||||||
|
|
||||||
// GetManager returns a Manager and initializes one as singleton if there's none yet
|
// GetManager returns a Manager and initializes one as singleton if there's none yet
|
||||||
func GetManager() *Manager {
|
func GetManager() *Manager {
|
||||||
if manager == nil {
|
managerInit.Do(func() {
|
||||||
manager = &Manager{
|
manager = &Manager{
|
||||||
processes: make(map[int64]*Process),
|
processes: make(map[int64]*Process),
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
return manager
|
return manager
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,15 @@ import (
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestGetManager(t *testing.T) {
|
||||||
|
go func() {
|
||||||
|
// test race protection
|
||||||
|
_ = GetManager()
|
||||||
|
}()
|
||||||
|
pm := GetManager()
|
||||||
|
assert.NotNil(t, pm)
|
||||||
|
}
|
||||||
|
|
||||||
func TestManager_Add(t *testing.T) {
|
func TestManager_Add(t *testing.T) {
|
||||||
pm := Manager{processes: make(map[int64]*Process)}
|
pm := Manager{processes: make(map[int64]*Process)}
|
||||||
|
|
||||||
|
|
Reference in a new issue