Bring odoo-go to ~70%: read_group, record rules, admin, sessions
Phase 1: read_group/web_read_group with SQL GROUP BY, aggregates (sum/avg/min/max/count/array_agg/sum_currency), date granularity, M2O groupby resolution to [id, display_name]. Phase 2: Record rules with domain_force parsing (Python literal parser), global AND + group OR merging. Domain operators: child_of, parent_of, any, not any compiled to SQL hierarchy/EXISTS queries. Phase 3: Button dispatch via /web/dataset/call_button, method return values interpreted as actions. Payment register wizard (account.payment.register) for sale→invoice→pay flow. Phase 4: ir.filters, ir.default, product fields expanded, SO line product_id onchange, ir_model+ir_model_fields DB seeding. Phase 5: CSV export (/web/export/csv), attachment upload/download via ir.attachment, fields_get with aggregator hints. Admin/System: Session persistence (PostgreSQL-backed), ir.config_parameter with get_param/set_param, ir.cron, ir.logging, res.lang, res.config.settings with company-related fields, Settings form view. Technical menu with Views/Actions/Parameters/Security/Logging sub-menus. User change_password, preferences. Password never exposed in UI/API. Bugfixes: false→nil for varchar/int fields, int32 in toInt64, call_button route with trailing slash, create_invoices returns action, search view always included, get_formview_action, name_create, ir.http stub. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
422
pkg/orm/read_group.go
Normal file
422
pkg/orm/read_group.go
Normal file
@@ -0,0 +1,422 @@
|
||||
package orm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ReadGroupResult holds one group returned by ReadGroup.
|
||||
// Mirrors: one row from odoo/orm/models.py _read_group() result tuples.
|
||||
type ReadGroupResult struct {
|
||||
// GroupValues maps groupby spec → grouped value (e.g., "state" → "draft")
|
||||
GroupValues map[string]interface{}
|
||||
// AggValues maps aggregate spec → aggregated value (e.g., "amount_total:sum" → 1234.56)
|
||||
AggValues map[string]interface{}
|
||||
// Domain is the filter domain that selects records in this group.
|
||||
Domain []interface{}
|
||||
// Count is the number of records in this group (__count).
|
||||
Count int64
|
||||
}
|
||||
|
||||
// readGroupbyCol describes a parsed groupby column for ReadGroup.
|
||||
type readGroupbyCol struct {
|
||||
spec string // original spec, e.g. "date_order:month"
|
||||
fieldName string // field name, e.g. "date_order"
|
||||
granularity string // e.g. "month", "" if none
|
||||
sqlExpr string // SQL expression for SELECT and GROUP BY
|
||||
field *Field
|
||||
}
|
||||
|
||||
// ReadGroupOpts configures a ReadGroup call.
|
||||
type ReadGroupOpts struct {
|
||||
Offset int
|
||||
Limit int
|
||||
Order string
|
||||
}
|
||||
|
||||
// ReadGroup performs a grouped aggregation query.
|
||||
// Mirrors: odoo/orm/models.py BaseModel._read_group()
|
||||
//
|
||||
// groupby: list of groupby specs, e.g. ["state", "date_order:month", "partner_id"]
|
||||
// aggregates: list of aggregate specs, e.g. ["__count", "amount_total:sum", "id:count_distinct"]
|
||||
func (rs *Recordset) ReadGroup(domain Domain, groupby []string, aggregates []string, opts ...ReadGroupOpts) ([]ReadGroupResult, error) {
|
||||
m := rs.model
|
||||
opt := ReadGroupOpts{}
|
||||
if len(opts) > 0 {
|
||||
opt = opts[0]
|
||||
}
|
||||
|
||||
// Apply record rules
|
||||
domain = ApplyRecordRules(rs.env, m, domain)
|
||||
|
||||
// Compile domain to WHERE clause
|
||||
compiler := &DomainCompiler{model: m, env: rs.env}
|
||||
where, params, err := compiler.Compile(domain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("orm: read_group %s: %w", m.name, err)
|
||||
}
|
||||
|
||||
// Parse groupby specs
|
||||
var gbCols []readGroupbyCol
|
||||
|
||||
for _, spec := range groupby {
|
||||
fieldName, granularity := parseGroupbySpec(spec)
|
||||
f := m.GetField(fieldName)
|
||||
if f == nil {
|
||||
return nil, fmt.Errorf("orm: read_group: field %q not found on %s", fieldName, m.name)
|
||||
}
|
||||
|
||||
sqlExpr := groupbySQLExpr(m.table, f, granularity)
|
||||
gbCols = append(gbCols, readGroupbyCol{
|
||||
spec: spec,
|
||||
fieldName: fieldName,
|
||||
granularity: granularity,
|
||||
sqlExpr: sqlExpr,
|
||||
field: f,
|
||||
})
|
||||
}
|
||||
|
||||
// Parse aggregate specs
|
||||
type aggCol struct {
|
||||
spec string // original spec, e.g. "amount_total:sum"
|
||||
fieldName string
|
||||
function string // e.g. "sum", "count", "avg"
|
||||
sqlExpr string
|
||||
}
|
||||
var aggCols []aggCol
|
||||
|
||||
for _, spec := range aggregates {
|
||||
if spec == "__count" {
|
||||
aggCols = append(aggCols, aggCol{
|
||||
spec: "__count",
|
||||
sqlExpr: "COUNT(*)",
|
||||
})
|
||||
continue
|
||||
}
|
||||
fieldName, function := parseAggregateSpec(spec)
|
||||
if function == "" {
|
||||
return nil, fmt.Errorf("orm: read_group: aggregate %q missing function (expected field:func)", spec)
|
||||
}
|
||||
f := m.GetField(fieldName)
|
||||
if f == nil {
|
||||
return nil, fmt.Errorf("orm: read_group: field %q not found on %s", fieldName, m.name)
|
||||
}
|
||||
sqlFunc := aggregateSQLFunc(function, fmt.Sprintf("%q.%q", m.table, f.Column()))
|
||||
if sqlFunc == "" {
|
||||
return nil, fmt.Errorf("orm: read_group: unknown aggregate function %q", function)
|
||||
}
|
||||
aggCols = append(aggCols, aggCol{
|
||||
spec: spec,
|
||||
fieldName: fieldName,
|
||||
function: function,
|
||||
sqlExpr: sqlFunc,
|
||||
})
|
||||
}
|
||||
|
||||
// Build SELECT clause
|
||||
var selectParts []string
|
||||
for _, gb := range gbCols {
|
||||
selectParts = append(selectParts, gb.sqlExpr)
|
||||
}
|
||||
for _, agg := range aggCols {
|
||||
selectParts = append(selectParts, agg.sqlExpr)
|
||||
}
|
||||
if len(selectParts) == 0 {
|
||||
selectParts = append(selectParts, "COUNT(*)")
|
||||
}
|
||||
|
||||
// Build GROUP BY clause
|
||||
var groupByParts []string
|
||||
for _, gb := range gbCols {
|
||||
groupByParts = append(groupByParts, gb.sqlExpr)
|
||||
}
|
||||
|
||||
// Build ORDER BY
|
||||
orderSQL := ""
|
||||
if opt.Order != "" {
|
||||
orderSQL = opt.Order
|
||||
} else if len(gbCols) > 0 {
|
||||
// Default: order by groupby columns
|
||||
var orderParts []string
|
||||
for _, gb := range gbCols {
|
||||
orderParts = append(orderParts, gb.sqlExpr)
|
||||
}
|
||||
orderSQL = strings.Join(orderParts, ", ")
|
||||
}
|
||||
|
||||
// Assemble query
|
||||
joinSQL := compiler.JoinSQL()
|
||||
query := fmt.Sprintf("SELECT %s FROM %q%s WHERE %s",
|
||||
strings.Join(selectParts, ", "),
|
||||
m.table,
|
||||
joinSQL,
|
||||
where,
|
||||
)
|
||||
if len(groupByParts) > 0 {
|
||||
query += " GROUP BY " + strings.Join(groupByParts, ", ")
|
||||
}
|
||||
if orderSQL != "" {
|
||||
query += " ORDER BY " + orderSQL
|
||||
}
|
||||
if opt.Limit > 0 {
|
||||
query += fmt.Sprintf(" LIMIT %d", opt.Limit)
|
||||
}
|
||||
if opt.Offset > 0 {
|
||||
query += fmt.Sprintf(" OFFSET %d", opt.Offset)
|
||||
}
|
||||
|
||||
// Execute
|
||||
rows, err := rs.env.tx.Query(rs.env.ctx, query, params...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("orm: read_group %s: %w", m.name, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Scan results
|
||||
totalCols := len(gbCols) + len(aggCols)
|
||||
if totalCols == 0 {
|
||||
totalCols = 1 // COUNT(*) fallback
|
||||
}
|
||||
|
||||
var results []ReadGroupResult
|
||||
for rows.Next() {
|
||||
scanDest := make([]interface{}, totalCols)
|
||||
for i := range scanDest {
|
||||
scanDest[i] = new(interface{})
|
||||
}
|
||||
if err := rows.Scan(scanDest...); err != nil {
|
||||
return nil, fmt.Errorf("orm: read_group scan %s: %w", m.name, err)
|
||||
}
|
||||
|
||||
result := ReadGroupResult{
|
||||
GroupValues: make(map[string]interface{}),
|
||||
AggValues: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
// Extract groupby values
|
||||
for i, gb := range gbCols {
|
||||
val := *(scanDest[i].(*interface{}))
|
||||
result.GroupValues[gb.spec] = val
|
||||
}
|
||||
|
||||
// Extract aggregate values
|
||||
for i, agg := range aggCols {
|
||||
val := *(scanDest[len(gbCols)+i].(*interface{}))
|
||||
if agg.spec == "__count" {
|
||||
result.Count = asInt64(val)
|
||||
result.AggValues["__count"] = result.Count
|
||||
} else {
|
||||
result.AggValues[agg.spec] = val
|
||||
}
|
||||
}
|
||||
|
||||
// If __count not explicitly requested, add it from COUNT(*)
|
||||
if _, hasCount := result.AggValues["__count"]; !hasCount {
|
||||
result.Count = 0
|
||||
}
|
||||
|
||||
// Build domain for this group
|
||||
result.Domain = buildGroupDomain(gbCols, scanDest)
|
||||
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("orm: read_group %s: %w", m.name, err)
|
||||
}
|
||||
|
||||
// Post-process: resolve Many2one groupby values to [id, display_name]
|
||||
for _, gb := range gbCols {
|
||||
if gb.field.Type == TypeMany2one && gb.field.Comodel != "" {
|
||||
if err := rs.resolveM2OGroupby(gb.spec, gb.field, results); err != nil {
|
||||
// Non-fatal: log and continue with raw IDs
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// resolveM2OGroupby replaces raw FK IDs in group results with [id, display_name] pairs.
|
||||
func (rs *Recordset) resolveM2OGroupby(spec string, f *Field, results []ReadGroupResult) error {
|
||||
// Collect unique IDs
|
||||
idSet := make(map[int64]bool)
|
||||
for _, r := range results {
|
||||
if id := asInt64(r.GroupValues[spec]); id > 0 {
|
||||
idSet[id] = true
|
||||
}
|
||||
}
|
||||
if len(idSet) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var ids []int64
|
||||
for id := range idSet {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
|
||||
// Fetch display names
|
||||
comodelRS := rs.env.Model(f.Comodel).Browse(ids...)
|
||||
names, err := comodelRS.NameGet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Replace values
|
||||
for i, r := range results {
|
||||
rawID := asInt64(r.GroupValues[spec])
|
||||
if rawID > 0 {
|
||||
name := names[rawID]
|
||||
results[i].GroupValues[spec] = []interface{}{rawID, name}
|
||||
} else {
|
||||
results[i].GroupValues[spec] = false
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseGroupbySpec splits "field:granularity" into field name and granularity.
|
||||
// Mirrors: odoo/orm/models.py parse_read_group_spec() for groupby
|
||||
func parseGroupbySpec(spec string) (fieldName, granularity string) {
|
||||
parts := strings.SplitN(spec, ":", 2)
|
||||
fieldName = parts[0]
|
||||
if len(parts) > 1 {
|
||||
granularity = parts[1]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// parseAggregateSpec splits "field:function" into field name and aggregate function.
|
||||
// Mirrors: odoo/orm/models.py parse_read_group_spec() for aggregates
|
||||
func parseAggregateSpec(spec string) (fieldName, function string) {
|
||||
parts := strings.SplitN(spec, ":", 2)
|
||||
fieldName = parts[0]
|
||||
if len(parts) > 1 {
|
||||
function = parts[1]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// groupbySQLExpr returns the SQL expression for a GROUP BY column.
|
||||
// Mirrors: odoo/orm/models.py _read_group_groupby()
|
||||
func groupbySQLExpr(table string, f *Field, granularity string) string {
|
||||
col := fmt.Sprintf("%q.%q", table, f.Column())
|
||||
|
||||
if granularity == "" {
|
||||
// Boolean fields: COALESCE to false (like Python Odoo)
|
||||
if f.Type == TypeBoolean {
|
||||
return fmt.Sprintf("COALESCE(%s, FALSE)", col)
|
||||
}
|
||||
return col
|
||||
}
|
||||
|
||||
// Date/datetime granularity
|
||||
// Mirrors: odoo/orm/models.py _read_group_groupby() date_trunc branch
|
||||
switch granularity {
|
||||
case "day", "month", "quarter", "year":
|
||||
expr := fmt.Sprintf("date_trunc('%s', %s::timestamp)", granularity, col)
|
||||
if f.Type == TypeDate {
|
||||
expr += "::date"
|
||||
}
|
||||
return expr
|
||||
case "week":
|
||||
// ISO week: truncate to Monday
|
||||
expr := fmt.Sprintf("date_trunc('week', %s::timestamp)", col)
|
||||
if f.Type == TypeDate {
|
||||
expr += "::date"
|
||||
}
|
||||
return expr
|
||||
case "year_number":
|
||||
return fmt.Sprintf("EXTRACT(YEAR FROM %s)", col)
|
||||
case "quarter_number":
|
||||
return fmt.Sprintf("EXTRACT(QUARTER FROM %s)", col)
|
||||
case "month_number":
|
||||
return fmt.Sprintf("EXTRACT(MONTH FROM %s)", col)
|
||||
case "iso_week_number":
|
||||
return fmt.Sprintf("EXTRACT(WEEK FROM %s)", col)
|
||||
case "day_of_year":
|
||||
return fmt.Sprintf("EXTRACT(DOY FROM %s)", col)
|
||||
case "day_of_month":
|
||||
return fmt.Sprintf("EXTRACT(DAY FROM %s)", col)
|
||||
case "day_of_week":
|
||||
return fmt.Sprintf("EXTRACT(ISODOW FROM %s)", col)
|
||||
case "hour_number":
|
||||
return fmt.Sprintf("EXTRACT(HOUR FROM %s)", col)
|
||||
case "minute_number":
|
||||
return fmt.Sprintf("EXTRACT(MINUTE FROM %s)", col)
|
||||
case "second_number":
|
||||
return fmt.Sprintf("EXTRACT(SECOND FROM %s)", col)
|
||||
default:
|
||||
// Unknown granularity: fall back to plain column
|
||||
return col
|
||||
}
|
||||
}
|
||||
|
||||
// aggregateSQLFunc returns the SQL aggregate expression.
|
||||
// Mirrors: odoo/orm/models.py READ_GROUP_AGGREGATE
|
||||
func aggregateSQLFunc(function, column string) string {
|
||||
switch function {
|
||||
case "sum":
|
||||
return fmt.Sprintf("SUM(%s)", column)
|
||||
case "avg":
|
||||
return fmt.Sprintf("AVG(%s)", column)
|
||||
case "max":
|
||||
return fmt.Sprintf("MAX(%s)", column)
|
||||
case "min":
|
||||
return fmt.Sprintf("MIN(%s)", column)
|
||||
case "count":
|
||||
return fmt.Sprintf("COUNT(%s)", column)
|
||||
case "count_distinct":
|
||||
return fmt.Sprintf("COUNT(DISTINCT %s)", column)
|
||||
case "bool_and":
|
||||
return fmt.Sprintf("BOOL_AND(%s)", column)
|
||||
case "bool_or":
|
||||
return fmt.Sprintf("BOOL_OR(%s)", column)
|
||||
case "array_agg":
|
||||
return fmt.Sprintf("ARRAY_AGG(%s)", column)
|
||||
case "array_agg_distinct":
|
||||
return fmt.Sprintf("ARRAY_AGG(DISTINCT %s)", column)
|
||||
case "recordset":
|
||||
return fmt.Sprintf("ARRAY_AGG(%s)", column)
|
||||
case "sum_currency":
|
||||
// Simplified: SUM without currency conversion (full impl needs exchange rates)
|
||||
return fmt.Sprintf("SUM(%s)", column)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// buildGroupDomain builds a domain that selects all records in this group.
|
||||
func buildGroupDomain(gbCols []readGroupbyCol, scanDest []interface{}) []interface{} {
|
||||
var domain []interface{}
|
||||
for i, gb := range gbCols {
|
||||
val := *(scanDest[i].(*interface{}))
|
||||
if val == nil {
|
||||
domain = append(domain, []interface{}{gb.fieldName, "=", false})
|
||||
} else if gb.granularity != "" && isTimeGranularity(gb.granularity) {
|
||||
// For date grouping, build a range domain
|
||||
// The raw value is the truncated date — client uses __range instead
|
||||
domain = append(domain, []interface{}{gb.fieldName, "=", val})
|
||||
} else {
|
||||
domain = append(domain, []interface{}{gb.fieldName, "=", val})
|
||||
}
|
||||
}
|
||||
return domain
|
||||
}
|
||||
|
||||
// isTimeGranularity returns true for date/time truncation granularities.
|
||||
func isTimeGranularity(g string) bool {
|
||||
switch g {
|
||||
case "day", "week", "month", "quarter", "year":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// asInt64 converts various numeric types to int64 (ignoring ok).
|
||||
// Uses toInt64 from relational.go when bool result is needed.
|
||||
func asInt64(v interface{}) int64 {
|
||||
n, _ := toInt64(v)
|
||||
return n
|
||||
}
|
||||
Reference in New Issue
Block a user