PostgreSQL: PGX fix multiple results handling (#110452)
* PostgreSQL: FIx multiple results handling - Added tests for handling multiple result sets, including compatible and incompatible structures, ensuring no panics occur. - Improved `convertResultsToFrame` function to validate column compatibility and handle null values correctly. - Introduced a new helper function `convertPostgresValue` for converting raw PostgreSQL values to appropriate Go types. - Added comprehensive unit tests for `convertResultsToFrame` covering various scenarios including row limits and mixed result types. * Add more test case
This commit is contained in:
@@ -388,35 +388,56 @@ func (e *DataSourceHandler) newProcessCfgPGX(queryContext context.Context, query
|
||||
}
|
||||
|
||||
func convertResultsToFrame(results []*pgconn.Result, rowLimit int64) (*data.Frame, error) {
|
||||
frame := data.Frame{}
|
||||
m := pgtype.NewMap()
|
||||
|
||||
// Find the first SELECT result to establish the frame structure
|
||||
var firstSelectResult *pgconn.Result
|
||||
for _, result := range results {
|
||||
// Skip non-select statements
|
||||
if !result.CommandTag.Select() {
|
||||
continue
|
||||
if result.CommandTag.Select() {
|
||||
firstSelectResult = result
|
||||
break
|
||||
}
|
||||
fields := make(data.Fields, len(result.FieldDescriptions))
|
||||
|
||||
fieldTypes, err := getFieldTypesFromDescriptions(result.FieldDescriptions, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, v := range result.FieldDescriptions {
|
||||
fields[i] = data.NewFieldFromFieldType(fieldTypes[i], 0)
|
||||
fields[i].Name = v.Name
|
||||
}
|
||||
// Create a new frame
|
||||
frame = *data.NewFrame("", fields...)
|
||||
}
|
||||
|
||||
// Add rows to the frame
|
||||
// If no SELECT results found, return empty frame
|
||||
if firstSelectResult == nil {
|
||||
return data.NewFrame(""), nil
|
||||
}
|
||||
|
||||
// Create frame structure based on the first SELECT result
|
||||
fields := make(data.Fields, len(firstSelectResult.FieldDescriptions))
|
||||
fieldTypes, err := getFieldTypesFromDescriptions(firstSelectResult.FieldDescriptions, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, v := range firstSelectResult.FieldDescriptions {
|
||||
fields[i] = data.NewFieldFromFieldType(fieldTypes[i], 0)
|
||||
fields[i].Name = v.Name
|
||||
}
|
||||
frame := *data.NewFrame("", fields...)
|
||||
|
||||
// Process all SELECT results, but validate column compatibility
|
||||
for _, result := range results {
|
||||
// Skip non-select statements
|
||||
if !result.CommandTag.Select() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate that this result has the same structure as the frame
|
||||
if len(result.FieldDescriptions) != len(frame.Fields) {
|
||||
return nil, fmt.Errorf("incompatible result structure: expected %d columns, got %d columns",
|
||||
len(frame.Fields), len(result.FieldDescriptions))
|
||||
}
|
||||
|
||||
// Validate column names and types match
|
||||
for i, fd := range result.FieldDescriptions {
|
||||
if fd.Name != frame.Fields[i].Name {
|
||||
return nil, fmt.Errorf("column name mismatch at position %d: expected %q, got %q",
|
||||
i, frame.Fields[i].Name, fd.Name)
|
||||
}
|
||||
}
|
||||
|
||||
fieldDescriptions := result.FieldDescriptions
|
||||
for rowIdx := range result.Rows {
|
||||
if rowIdx == int(rowLimit) {
|
||||
@@ -429,98 +450,25 @@ func convertResultsToFrame(results []*pgconn.Result, rowLimit int64) (*data.Fram
|
||||
row := make([]any, len(fieldDescriptions))
|
||||
for colIdx, fd := range fieldDescriptions {
|
||||
rawValue := result.Rows[rowIdx][colIdx]
|
||||
dataTypeOID := fd.DataTypeOID
|
||||
format := fd.Format
|
||||
|
||||
if rawValue == nil {
|
||||
row[colIdx] = nil
|
||||
continue
|
||||
}
|
||||
|
||||
// Convert based on type
|
||||
switch fd.DataTypeOID {
|
||||
case pgtype.Int2OID:
|
||||
var d *int16
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
row[colIdx] = d
|
||||
case pgtype.Int4OID:
|
||||
var d *int32
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
row[colIdx] = d
|
||||
case pgtype.Int8OID:
|
||||
var d *int64
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
row[colIdx] = d
|
||||
case pgtype.NumericOID, pgtype.Float8OID, pgtype.Float4OID:
|
||||
var d *float64
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
row[colIdx] = d
|
||||
case pgtype.BoolOID:
|
||||
var d *bool
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
row[colIdx] = d
|
||||
case pgtype.ByteaOID:
|
||||
d, err := pgtype.ByteaCodec.DecodeValue(pgtype.ByteaCodec{}, m, dataTypeOID, format, rawValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
str := string(d.([]byte))
|
||||
row[colIdx] = &str
|
||||
case pgtype.TimestampOID, pgtype.TimestamptzOID, pgtype.DateOID:
|
||||
var d *time.Time
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
row[colIdx] = d
|
||||
case pgtype.TimeOID, pgtype.TimetzOID:
|
||||
var d *string
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
row[colIdx] = d
|
||||
case pgtype.JSONOID, pgtype.JSONBOID:
|
||||
var d *string
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j := json.RawMessage(*d)
|
||||
row[colIdx] = &j
|
||||
default:
|
||||
var d *string
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
row[colIdx] = d
|
||||
convertedValue, err := convertPostgresValue(rawValue, fd, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
row[colIdx] = convertedValue
|
||||
}
|
||||
|
||||
// Validate row length matches frame field count before appending
|
||||
if len(row) != len(frame.Fields) {
|
||||
return nil, fmt.Errorf("row data length mismatch: expected %d values, got %d values",
|
||||
len(frame.Fields), len(row))
|
||||
}
|
||||
|
||||
frame.AppendRow(row...)
|
||||
}
|
||||
}
|
||||
@@ -528,6 +476,96 @@ func convertResultsToFrame(results []*pgconn.Result, rowLimit int64) (*data.Fram
|
||||
return &frame, nil
|
||||
}
|
||||
|
||||
// convertPostgresValue converts a raw PostgreSQL value to the appropriate Go type
|
||||
func convertPostgresValue(rawValue []byte, fd pgconn.FieldDescription, m *pgtype.Map) (interface{}, error) {
|
||||
dataTypeOID := fd.DataTypeOID
|
||||
format := fd.Format
|
||||
|
||||
// Convert based on type
|
||||
switch fd.DataTypeOID {
|
||||
case pgtype.Int2OID:
|
||||
var d *int16
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
case pgtype.Int4OID:
|
||||
var d *int32
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
case pgtype.Int8OID:
|
||||
var d *int64
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
case pgtype.NumericOID, pgtype.Float8OID, pgtype.Float4OID:
|
||||
var d *float64
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
case pgtype.BoolOID:
|
||||
var d *bool
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
case pgtype.ByteaOID:
|
||||
d, err := pgtype.ByteaCodec.DecodeValue(pgtype.ByteaCodec{}, m, dataTypeOID, format, rawValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
str := string(d.([]byte))
|
||||
return &str, nil
|
||||
case pgtype.TimestampOID, pgtype.TimestamptzOID, pgtype.DateOID:
|
||||
var d *time.Time
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
case pgtype.TimeOID, pgtype.TimetzOID:
|
||||
var d *string
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
case pgtype.JSONOID, pgtype.JSONBOID:
|
||||
var d *string
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j := json.RawMessage(*d)
|
||||
return &j, nil
|
||||
default:
|
||||
var d *string
|
||||
scanPlan := m.PlanScan(dataTypeOID, format, &d)
|
||||
err := scanPlan.Scan(rawValue, &d)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
}
|
||||
|
||||
func getFieldTypesFromDescriptions(fieldDescriptions []pgconn.FieldDescription, m *pgtype.Map) ([]data.FieldType, error) {
|
||||
fieldTypes := make([]data.FieldType, len(fieldDescriptions))
|
||||
for i, v := range fieldDescriptions {
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"github.com/grafana/grafana-plugin-sdk-go/backend"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data"
|
||||
"github.com/grafana/grafana-plugin-sdk-go/data/sqlutil"
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
"github.com/jackc/pgx/v5/pgtype"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -425,6 +427,246 @@ func TestSQLEngine(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertResultsToFrame(t *testing.T) {
|
||||
// Import the pgx packages needed for testing
|
||||
// These imports are included in the main file but need to be accessible for tests
|
||||
t.Run("convertResultsToFrame with single result", func(t *testing.T) {
|
||||
// Create mock field descriptions
|
||||
fieldDescs := []pgconn.FieldDescription{
|
||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||
{Name: "name", DataTypeOID: pgtype.TextOID},
|
||||
{Name: "value", DataTypeOID: pgtype.Float8OID},
|
||||
}
|
||||
|
||||
// Create mock result data
|
||||
mockRows := [][][]byte{
|
||||
{[]byte("1"), []byte("test1"), []byte("10.5")},
|
||||
{[]byte("2"), []byte("test2"), []byte("20.7")},
|
||||
}
|
||||
|
||||
// Create mock result
|
||||
result := &pgconn.Result{
|
||||
FieldDescriptions: fieldDescs,
|
||||
Rows: mockRows,
|
||||
}
|
||||
result.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
||||
|
||||
results := []*pgconn.Result{result}
|
||||
|
||||
frame, err := convertResultsToFrame(results, 1000)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, frame)
|
||||
require.Equal(t, 3, len(frame.Fields))
|
||||
require.Equal(t, 2, frame.Rows())
|
||||
|
||||
// Verify field names
|
||||
require.Equal(t, "id", frame.Fields[0].Name)
|
||||
require.Equal(t, "name", frame.Fields[1].Name)
|
||||
require.Equal(t, "value", frame.Fields[2].Name)
|
||||
})
|
||||
|
||||
t.Run("convertResultsToFrame with multiple compatible results", func(t *testing.T) {
|
||||
// Create mock field descriptions (same structure for both results)
|
||||
fieldDescs := []pgconn.FieldDescription{
|
||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||
{Name: "name", DataTypeOID: pgtype.TextOID},
|
||||
}
|
||||
|
||||
// Create first result
|
||||
mockRows1 := [][][]byte{
|
||||
{[]byte("1"), []byte("test1")},
|
||||
{[]byte("2"), []byte("test2")},
|
||||
}
|
||||
result1 := &pgconn.Result{
|
||||
FieldDescriptions: fieldDescs,
|
||||
Rows: mockRows1,
|
||||
}
|
||||
result1.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
||||
|
||||
// Create second result with same structure
|
||||
mockRows2 := [][][]byte{
|
||||
{[]byte("3"), []byte("test3")},
|
||||
{[]byte("4"), []byte("test4")},
|
||||
}
|
||||
result2 := &pgconn.Result{
|
||||
FieldDescriptions: fieldDescs,
|
||||
Rows: mockRows2,
|
||||
}
|
||||
result2.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
||||
|
||||
results := []*pgconn.Result{result1, result2}
|
||||
|
||||
frame, err := convertResultsToFrame(results, 1000)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, frame)
|
||||
require.Equal(t, 2, len(frame.Fields))
|
||||
require.Equal(t, 4, frame.Rows()) // Should have rows from both results
|
||||
|
||||
// Verify field names
|
||||
require.Equal(t, "id", frame.Fields[0].Name)
|
||||
require.Equal(t, "name", frame.Fields[1].Name)
|
||||
})
|
||||
|
||||
t.Run("convertResultsToFrame with row limit", func(t *testing.T) {
|
||||
// Create mock field descriptions
|
||||
fieldDescs := []pgconn.FieldDescription{
|
||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||
}
|
||||
|
||||
// Create mock result data with 3 rows
|
||||
mockRows := [][][]byte{
|
||||
{[]byte("1")},
|
||||
{[]byte("2")},
|
||||
{[]byte("3")},
|
||||
}
|
||||
|
||||
result := &pgconn.Result{
|
||||
FieldDescriptions: fieldDescs,
|
||||
Rows: mockRows,
|
||||
}
|
||||
result.CommandTag = pgconn.NewCommandTag("SELECT 3")
|
||||
|
||||
results := []*pgconn.Result{result}
|
||||
|
||||
// Set row limit to 2
|
||||
frame, err := convertResultsToFrame(results, 2)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, frame)
|
||||
require.Equal(t, 1, len(frame.Fields))
|
||||
require.Equal(t, 2, frame.Rows()) // Should be limited to 2 rows
|
||||
|
||||
// Should have a notice about the limit
|
||||
require.NotNil(t, frame.Meta)
|
||||
require.Len(t, frame.Meta.Notices, 1)
|
||||
require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2")
|
||||
})
|
||||
|
||||
t.Run("convertResultsToFrame with mixed SELECT and non-SELECT results", func(t *testing.T) {
|
||||
// Create a non-SELECT result (should be skipped)
|
||||
nonSelectResult := &pgconn.Result{}
|
||||
nonSelectResult.CommandTag = pgconn.NewCommandTag("UPDATE 1")
|
||||
|
||||
// Create a SELECT result
|
||||
fieldDescs := []pgconn.FieldDescription{
|
||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||
}
|
||||
mockRows := [][][]byte{
|
||||
{[]byte("1")},
|
||||
}
|
||||
selectResult := &pgconn.Result{
|
||||
FieldDescriptions: fieldDescs,
|
||||
Rows: mockRows,
|
||||
}
|
||||
selectResult.CommandTag = pgconn.NewCommandTag("SELECT 1")
|
||||
|
||||
results := []*pgconn.Result{nonSelectResult, selectResult}
|
||||
|
||||
frame, err := convertResultsToFrame(results, 1000)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, frame)
|
||||
require.Equal(t, 1, len(frame.Fields))
|
||||
require.Equal(t, 1, frame.Rows())
|
||||
})
|
||||
|
||||
t.Run("convertResultsToFrame with no SELECT results", func(t *testing.T) {
|
||||
// Create only non-SELECT results
|
||||
result1 := &pgconn.Result{}
|
||||
result1.CommandTag = pgconn.NewCommandTag("UPDATE 1")
|
||||
|
||||
result2 := &pgconn.Result{}
|
||||
result2.CommandTag = pgconn.NewCommandTag("INSERT 1")
|
||||
|
||||
results := []*pgconn.Result{result1, result2}
|
||||
|
||||
frame, err := convertResultsToFrame(results, 1000)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, frame)
|
||||
require.Equal(t, 0, len(frame.Fields))
|
||||
require.Equal(t, 0, frame.Rows())
|
||||
})
|
||||
|
||||
t.Run("convertResultsToFrame with multiple results and row limit per result", func(t *testing.T) {
|
||||
// Create mock field descriptions (same structure for both results)
|
||||
fieldDescs := []pgconn.FieldDescription{
|
||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||
}
|
||||
|
||||
// Create first result with 3 rows
|
||||
mockRows1 := [][][]byte{
|
||||
{[]byte("1")},
|
||||
{[]byte("2")},
|
||||
{[]byte("3")},
|
||||
}
|
||||
result1 := &pgconn.Result{
|
||||
FieldDescriptions: fieldDescs,
|
||||
Rows: mockRows1,
|
||||
}
|
||||
result1.CommandTag = pgconn.NewCommandTag("SELECT 3")
|
||||
|
||||
// Create second result with 3 rows
|
||||
mockRows2 := [][][]byte{
|
||||
{[]byte("4")},
|
||||
{[]byte("5")},
|
||||
{[]byte("6")},
|
||||
}
|
||||
result2 := &pgconn.Result{
|
||||
FieldDescriptions: fieldDescs,
|
||||
Rows: mockRows2,
|
||||
}
|
||||
result2.CommandTag = pgconn.NewCommandTag("SELECT 3")
|
||||
|
||||
results := []*pgconn.Result{result1, result2}
|
||||
|
||||
// Set row limit to 2 (should limit each result to 2 rows)
|
||||
frame, err := convertResultsToFrame(results, 2)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, frame)
|
||||
require.Equal(t, 1, len(frame.Fields))
|
||||
require.Equal(t, 4, frame.Rows()) // 2 rows from each result
|
||||
|
||||
// Should have notices about the limit from both results
|
||||
require.NotNil(t, frame.Meta)
|
||||
require.Len(t, frame.Meta.Notices, 2)
|
||||
require.Contains(t, frame.Meta.Notices[0].Text, "Results have been limited to 2")
|
||||
require.Contains(t, frame.Meta.Notices[1].Text, "Results have been limited to 2")
|
||||
})
|
||||
|
||||
t.Run("convertResultsToFrame handles null values correctly", func(t *testing.T) {
|
||||
// Create mock field descriptions
|
||||
fieldDescs := []pgconn.FieldDescription{
|
||||
{Name: "id", DataTypeOID: pgtype.Int4OID},
|
||||
{Name: "name", DataTypeOID: pgtype.TextOID},
|
||||
}
|
||||
|
||||
// Create mock result data with null values
|
||||
mockRows := [][][]byte{
|
||||
{[]byte("1"), nil}, // null name
|
||||
{nil, []byte("test2")}, // null id
|
||||
}
|
||||
|
||||
result := &pgconn.Result{
|
||||
FieldDescriptions: fieldDescs,
|
||||
Rows: mockRows,
|
||||
}
|
||||
result.CommandTag = pgconn.NewCommandTag("SELECT 2")
|
||||
|
||||
results := []*pgconn.Result{result}
|
||||
|
||||
frame, err := convertResultsToFrame(results, 1000)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, frame)
|
||||
require.Equal(t, 2, len(frame.Fields))
|
||||
require.Equal(t, 2, frame.Rows())
|
||||
|
||||
// Check that null values are handled correctly
|
||||
// The exact representation depends on the field type, but should not panic
|
||||
require.NotPanics(t, func() {
|
||||
frame.Fields[0].At(1) // null id
|
||||
frame.Fields[1].At(0) // null name
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
type testQueryResultTransformer struct {
|
||||
transformQueryErrorWasCalled bool
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user