feat: Portal, Email Inbound, Discuss + module improvements
- Portal: /my/* routes, signup, password reset, portal user support - Email Inbound: IMAP polling (go-imap/v2), thread matching - Discuss: mail.channel, long-polling bus, DM, unread count - Cron: ir.cron runner (goroutine scheduler) - Bank Import, CSV/Excel Import - Automation (ir.actions.server) - Fetchmail service - HR Payroll model - Various fixes across account, sale, stock, purchase, crm, hr, project Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
160
pkg/service/automation.go
Normal file
160
pkg/service/automation.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"odoo-go/pkg/orm"
|
||||
"odoo-go/pkg/tools"
|
||||
)
|
||||
|
||||
// RunAutomatedActions checks and executes server actions triggered by Create/Write/Unlink.
|
||||
// Called from the ORM after successful Create/Write/Unlink operations.
|
||||
// Mirrors: odoo/addons/base_automation/models/base_automation.py
|
||||
func RunAutomatedActions(env *orm.Environment, modelName, trigger string, recordIDs []int64) {
|
||||
if len(recordIDs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Look up the ir_model ID for this model
|
||||
var modelID int64
|
||||
err := env.Tx().QueryRow(env.Ctx(),
|
||||
`SELECT id FROM ir_model WHERE model = $1`, modelName).Scan(&modelID)
|
||||
if err != nil {
|
||||
return // Model not in ir_model — no actions possible
|
||||
}
|
||||
|
||||
// Find matching automated actions
|
||||
rows, err := env.Tx().Query(env.Ctx(),
|
||||
`SELECT id, state, COALESCE(update_field_id, ''), COALESCE(update_value, ''),
|
||||
COALESCE(email_to, ''), COALESCE(email_subject, ''), COALESCE(email_body, ''),
|
||||
COALESCE(filter_domain, '')
|
||||
FROM ir_act_server
|
||||
WHERE model_id = $1
|
||||
AND active = true
|
||||
AND trigger = $2
|
||||
ORDER BY sequence, id`, modelID, trigger)
|
||||
if err != nil {
|
||||
log.Printf("automation: query error for %s/%s: %v", modelName, trigger, err)
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
type action struct {
|
||||
id int64
|
||||
state string
|
||||
updateField string
|
||||
updateValue string
|
||||
emailTo string
|
||||
emailSubject string
|
||||
emailBody string
|
||||
filterDomain string
|
||||
}
|
||||
|
||||
var actions []action
|
||||
for rows.Next() {
|
||||
var a action
|
||||
if err := rows.Scan(&a.id, &a.state, &a.updateField, &a.updateValue,
|
||||
&a.emailTo, &a.emailSubject, &a.emailBody, &a.filterDomain); err != nil {
|
||||
continue
|
||||
}
|
||||
actions = append(actions, a)
|
||||
}
|
||||
|
||||
if len(actions) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, a := range actions {
|
||||
switch a.state {
|
||||
case "object_write":
|
||||
executeObjectWrite(env, modelName, recordIDs, a.updateField, a.updateValue)
|
||||
case "email":
|
||||
executeEmailAction(env, modelName, recordIDs, a.emailTo, a.emailSubject, a.emailBody)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// executeObjectWrite updates a field on the triggered records.
|
||||
func executeObjectWrite(env *orm.Environment, modelName string, recordIDs []int64, fieldName, value string) {
|
||||
if fieldName == "" {
|
||||
return
|
||||
}
|
||||
tableName := strings.ReplaceAll(modelName, ".", "_")
|
||||
for _, id := range recordIDs {
|
||||
_, err := env.Tx().Exec(env.Ctx(),
|
||||
fmt.Sprintf(`UPDATE %s SET %s = $1 WHERE id = $2`,
|
||||
pgx.Identifier{tableName}.Sanitize(),
|
||||
pgx.Identifier{fieldName}.Sanitize()),
|
||||
value, id)
|
||||
if err != nil {
|
||||
log.Printf("automation: object_write error %s.%s on %d: %v", modelName, fieldName, id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// executeEmailAction sends an email for each triggered record.
|
||||
func executeEmailAction(env *orm.Environment, modelName string, recordIDs []int64, emailToField, subject, bodyTemplate string) {
|
||||
if emailToField == "" {
|
||||
return
|
||||
}
|
||||
|
||||
cfg := tools.LoadSMTPConfig()
|
||||
if cfg.Host == "" {
|
||||
return
|
||||
}
|
||||
|
||||
tableName := strings.ReplaceAll(modelName, ".", "_")
|
||||
|
||||
for _, id := range recordIDs {
|
||||
// Resolve email address from the record
|
||||
var email string
|
||||
err := env.Tx().QueryRow(env.Ctx(),
|
||||
fmt.Sprintf(`SELECT COALESCE(%s, '') FROM %s WHERE id = $1`,
|
||||
pgx.Identifier{emailToField}.Sanitize(),
|
||||
pgx.Identifier{tableName}.Sanitize()),
|
||||
id).Scan(&email)
|
||||
if err != nil || email == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Simple template: replace {{field}} with record values
|
||||
body := bodyTemplate
|
||||
if strings.Contains(body, "{{") {
|
||||
body = resolveTemplate(env, tableName, id, body)
|
||||
}
|
||||
|
||||
if err := tools.SendEmail(cfg, email, subject, body); err != nil {
|
||||
log.Printf("automation: email error to %s for %s/%d: %v", email, modelName, id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// resolveTemplate replaces {{field_name}} placeholders with actual record values.
|
||||
func resolveTemplate(env *orm.Environment, tableName string, recordID int64, template string) string {
|
||||
result := template
|
||||
for {
|
||||
start := strings.Index(result, "{{")
|
||||
if start == -1 {
|
||||
break
|
||||
}
|
||||
end := strings.Index(result[start:], "}}")
|
||||
if end == -1 {
|
||||
break
|
||||
}
|
||||
fieldName := strings.TrimSpace(result[start+2 : start+end])
|
||||
var val string
|
||||
err := env.Tx().QueryRow(env.Ctx(),
|
||||
fmt.Sprintf(`SELECT COALESCE(CAST(%s AS TEXT), '') FROM %s WHERE id = $1`,
|
||||
pgx.Identifier{fieldName}.Sanitize(),
|
||||
pgx.Identifier{tableName}.Sanitize()),
|
||||
recordID).Scan(&val)
|
||||
if err != nil {
|
||||
val = ""
|
||||
}
|
||||
result = result[:start] + val + result[start+end+2:]
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -2,57 +2,70 @@ package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
|
||||
"odoo-go/pkg/orm"
|
||||
)
|
||||
|
||||
// CronJob defines a scheduled task.
|
||||
type CronJob struct {
|
||||
Name string
|
||||
Interval time.Duration
|
||||
Handler func(ctx context.Context, pool *pgxpool.Pool) error
|
||||
running bool
|
||||
const (
|
||||
cronPollInterval = 60 * time.Second
|
||||
maxFailureCount = 5
|
||||
)
|
||||
|
||||
// cronJob holds a single scheduled action loaded from the ir_cron table.
|
||||
type cronJob struct {
|
||||
ID int64
|
||||
Name string
|
||||
ModelName string
|
||||
MethodName string
|
||||
UserID int64
|
||||
IntervalNumber int
|
||||
IntervalType string
|
||||
NumberCall int
|
||||
NextCall time.Time
|
||||
}
|
||||
|
||||
// CronScheduler manages periodic jobs.
|
||||
// CronScheduler polls ir_cron and executes ready jobs.
|
||||
// Mirrors: odoo/addons/base/models/ir_cron.py IrCron._process_jobs()
|
||||
type CronScheduler struct {
|
||||
jobs []*CronJob
|
||||
mu sync.Mutex
|
||||
pool *pgxpool.Pool
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewCronScheduler creates a new scheduler.
|
||||
func NewCronScheduler() *CronScheduler {
|
||||
// NewCronScheduler creates a DB-driven cron scheduler.
|
||||
func NewCronScheduler(pool *pgxpool.Pool) *CronScheduler {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &CronScheduler{ctx: ctx, cancel: cancel}
|
||||
return &CronScheduler{pool: pool, ctx: ctx, cancel: cancel}
|
||||
}
|
||||
|
||||
// Register adds a job to the scheduler.
|
||||
func (s *CronScheduler) Register(job *CronJob) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.jobs = append(s.jobs, job)
|
||||
// Start begins the polling loop in a background goroutine.
|
||||
func (s *CronScheduler) Start() {
|
||||
s.wg.Add(1)
|
||||
go s.pollLoop()
|
||||
log.Println("cron: scheduler started")
|
||||
}
|
||||
|
||||
// Start begins running all registered jobs.
|
||||
func (s *CronScheduler) Start(pool *pgxpool.Pool) {
|
||||
for _, job := range s.jobs {
|
||||
go s.runJob(job, pool)
|
||||
}
|
||||
log.Printf("cron: started %d jobs", len(s.jobs))
|
||||
}
|
||||
|
||||
// Stop cancels all running jobs.
|
||||
// Stop cancels the polling loop and waits for completion.
|
||||
func (s *CronScheduler) Stop() {
|
||||
s.cancel()
|
||||
s.wg.Wait()
|
||||
log.Println("cron: scheduler stopped")
|
||||
}
|
||||
|
||||
func (s *CronScheduler) runJob(job *CronJob, pool *pgxpool.Pool) {
|
||||
ticker := time.NewTicker(job.Interval)
|
||||
func (s *CronScheduler) pollLoop() {
|
||||
defer s.wg.Done()
|
||||
|
||||
// Run once immediately, then on ticker
|
||||
s.processJobs()
|
||||
|
||||
ticker := time.NewTicker(cronPollInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
@@ -60,9 +73,200 @@ func (s *CronScheduler) runJob(job *CronJob, pool *pgxpool.Pool) {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
if err := job.Handler(s.ctx, pool); err != nil {
|
||||
log.Printf("cron: %s error: %v", job.Name, err)
|
||||
}
|
||||
s.processJobs()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processJobs queries all ready cron jobs and processes them one by one.
|
||||
func (s *CronScheduler) processJobs() {
|
||||
rows, err := s.pool.Query(s.ctx, `
|
||||
SELECT id, name, model_name, method_name, user_id,
|
||||
interval_number, interval_type, numbercall, nextcall
|
||||
FROM ir_cron
|
||||
WHERE active = true AND nextcall <= now()
|
||||
ORDER BY priority, id
|
||||
`)
|
||||
if err != nil {
|
||||
log.Printf("cron: query error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var jobs []cronJob
|
||||
for rows.Next() {
|
||||
var j cronJob
|
||||
var modelName, methodName *string // nullable
|
||||
if err := rows.Scan(&j.ID, &j.Name, &modelName, &methodName, &j.UserID,
|
||||
&j.IntervalNumber, &j.IntervalType, &j.NumberCall, &j.NextCall); err != nil {
|
||||
log.Printf("cron: scan error: %v", err)
|
||||
continue
|
||||
}
|
||||
if modelName != nil {
|
||||
j.ModelName = *modelName
|
||||
}
|
||||
if methodName != nil {
|
||||
j.MethodName = *methodName
|
||||
}
|
||||
jobs = append(jobs, j)
|
||||
}
|
||||
rows.Close()
|
||||
|
||||
for _, job := range jobs {
|
||||
s.processOneJob(job)
|
||||
}
|
||||
}
|
||||
|
||||
// processOneJob acquires a row-level lock and executes a single cron job.
|
||||
func (s *CronScheduler) processOneJob(job cronJob) {
|
||||
tx, err := s.pool.Begin(s.ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer tx.Rollback(s.ctx)
|
||||
|
||||
// Try to acquire the job with FOR NO KEY UPDATE SKIP LOCKED
|
||||
var lockedID int64
|
||||
err = tx.QueryRow(s.ctx, `
|
||||
SELECT id FROM ir_cron
|
||||
WHERE id = $1 AND active = true AND nextcall <= now()
|
||||
FOR NO KEY UPDATE SKIP LOCKED
|
||||
`, job.ID).Scan(&lockedID)
|
||||
if err != nil {
|
||||
// Job already taken by another worker or not ready
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("cron: executing %q (id=%d)", job.Name, job.ID)
|
||||
|
||||
execErr := s.executeJob(job)
|
||||
|
||||
now := time.Now()
|
||||
nextCall := calculateNextCall(now, job.IntervalNumber, job.IntervalType)
|
||||
|
||||
if execErr != nil {
|
||||
log.Printf("cron: %q failed: %v", job.Name, execErr)
|
||||
|
||||
// Update failure count, set first_failure_date if not already set
|
||||
if _, err := tx.Exec(s.ctx, `
|
||||
UPDATE ir_cron SET
|
||||
failure_count = failure_count + 1,
|
||||
first_failure_date = COALESCE(first_failure_date, $1),
|
||||
lastcall = $1,
|
||||
nextcall = $2
|
||||
WHERE id = $3
|
||||
`, now, nextCall, job.ID); err != nil {
|
||||
log.Printf("cron: failed to update failure count for %q: %v", job.Name, err)
|
||||
}
|
||||
|
||||
// Deactivate if too many consecutive failures
|
||||
if _, err := tx.Exec(s.ctx, `
|
||||
UPDATE ir_cron SET active = false
|
||||
WHERE id = $1 AND failure_count >= $2
|
||||
`, job.ID, maxFailureCount); err != nil {
|
||||
log.Printf("cron: failed to deactivate %q: %v", job.Name, err)
|
||||
}
|
||||
} else {
|
||||
log.Printf("cron: %q completed successfully", job.Name)
|
||||
|
||||
if job.NumberCall > 0 {
|
||||
// Finite run count: decrement
|
||||
newNumberCall := job.NumberCall - 1
|
||||
if newNumberCall <= 0 {
|
||||
if _, err := tx.Exec(s.ctx, `
|
||||
UPDATE ir_cron SET active = false, lastcall = $1, nextcall = $2,
|
||||
failure_count = 0, first_failure_date = NULL, numbercall = 0
|
||||
WHERE id = $3
|
||||
`, now, nextCall, job.ID); err != nil {
|
||||
log.Printf("cron: failed to update job %q: %v", job.Name, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := tx.Exec(s.ctx, `
|
||||
UPDATE ir_cron SET lastcall = $1, nextcall = $2,
|
||||
failure_count = 0, first_failure_date = NULL, numbercall = $3
|
||||
WHERE id = $4
|
||||
`, now, nextCall, newNumberCall, job.ID); err != nil {
|
||||
log.Printf("cron: failed to update job %q: %v", job.Name, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// numbercall <= 0 means infinite runs
|
||||
if _, err := tx.Exec(s.ctx, `
|
||||
UPDATE ir_cron SET lastcall = $1, nextcall = $2,
|
||||
failure_count = 0, first_failure_date = NULL
|
||||
WHERE id = $3
|
||||
`, now, nextCall, job.ID); err != nil {
|
||||
log.Printf("cron: failed to update job %q: %v", job.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit(s.ctx); err != nil {
|
||||
log.Printf("cron: commit error for %q: %v", job.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// executeJob looks up the target method in orm.Registry and calls it.
|
||||
func (s *CronScheduler) executeJob(job cronJob) error {
|
||||
if job.ModelName == "" || job.MethodName == "" {
|
||||
return fmt.Errorf("cron %q: model_name or method_name not set", job.Name)
|
||||
}
|
||||
|
||||
model := orm.Registry.Get(job.ModelName)
|
||||
if model == nil {
|
||||
return fmt.Errorf("cron %q: model %q not found", job.Name, job.ModelName)
|
||||
}
|
||||
if model.Methods == nil {
|
||||
return fmt.Errorf("cron %q: model %q has no methods", job.Name, job.ModelName)
|
||||
}
|
||||
method, ok := model.Methods[job.MethodName]
|
||||
if !ok {
|
||||
return fmt.Errorf("cron %q: method %q not found on %q", job.Name, job.MethodName, job.ModelName)
|
||||
}
|
||||
|
||||
// Create ORM environment for job execution
|
||||
uid := job.UserID
|
||||
if uid == 0 {
|
||||
return fmt.Errorf("cron %q: user_id not set, refusing to run as admin", job.Name)
|
||||
}
|
||||
|
||||
env, err := orm.NewEnvironment(s.ctx, orm.EnvConfig{
|
||||
Pool: s.pool,
|
||||
UID: uid,
|
||||
Context: map[string]interface{}{
|
||||
"lastcall": job.NextCall,
|
||||
"cron_id": job.ID,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cron %q: env error: %w", job.Name, err)
|
||||
}
|
||||
defer env.Close()
|
||||
|
||||
// Call the method on an empty recordset of the target model
|
||||
_, err = method(env.Model(job.ModelName))
|
||||
if err != nil {
|
||||
env.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
return env.Commit()
|
||||
}
|
||||
|
||||
// calculateNextCall computes the next execution time based on interval.
|
||||
// Mirrors: odoo/addons/base/models/ir_cron.py _intervalTypes
|
||||
func calculateNextCall(from time.Time, number int, intervalType string) time.Time {
|
||||
switch intervalType {
|
||||
case "minutes":
|
||||
return from.Add(time.Duration(number) * time.Minute)
|
||||
case "hours":
|
||||
return from.Add(time.Duration(number) * time.Hour)
|
||||
case "days":
|
||||
return from.AddDate(0, 0, number)
|
||||
case "weeks":
|
||||
return from.AddDate(0, 0, number*7)
|
||||
case "months":
|
||||
return from.AddDate(0, number, 0)
|
||||
default:
|
||||
return from.Add(time.Duration(number) * time.Hour)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -330,6 +330,8 @@ func SeedWithSetup(ctx context.Context, pool *pgxpool.Pool, cfg SetupConfig) err
|
||||
VALUES (1, 1, true, 1, 1) ON CONFLICT (id) DO NOTHING`)
|
||||
})
|
||||
|
||||
safeExec(ctx, tx, "base_groups", func() { seedBaseGroups(ctx, tx) })
|
||||
safeExec(ctx, tx, "acl_rules", func() { seedACLRules(ctx, tx) })
|
||||
safeExec(ctx, tx, "system_params", func() { seedSystemParams(ctx, tx) })
|
||||
safeExec(ctx, tx, "languages", func() { seedLanguages(ctx, tx) })
|
||||
safeExec(ctx, tx, "translations", func() { seedTranslations(ctx, tx) })
|
||||
@@ -1676,3 +1678,136 @@ func generateUUID() string {
|
||||
b[0:4], b[4:6], b[6:8], b[8:10], b[10:16])
|
||||
}
|
||||
|
||||
// seedBaseGroups creates the base security groups and their XML IDs.
|
||||
// Mirrors: odoo/addons/base/security/base_groups.xml
|
||||
func seedBaseGroups(ctx context.Context, tx pgx.Tx) {
|
||||
log.Println("db: seeding base security groups...")
|
||||
|
||||
type groupDef struct {
|
||||
id int64
|
||||
name string
|
||||
xmlID string
|
||||
}
|
||||
groups := []groupDef{
|
||||
{1, "Internal User", "group_user"},
|
||||
{2, "Settings", "group_system"},
|
||||
{3, "Access Rights", "group_erp_manager"},
|
||||
{4, "Allow Export", "group_allow_export"},
|
||||
{5, "Portal", "group_portal"},
|
||||
{6, "Public", "group_public"},
|
||||
}
|
||||
|
||||
for _, g := range groups {
|
||||
tx.Exec(ctx, `INSERT INTO res_groups (id, name)
|
||||
VALUES ($1, $2) ON CONFLICT (id) DO NOTHING`, g.id, g.name)
|
||||
tx.Exec(ctx, `INSERT INTO ir_model_data (module, name, model, res_id)
|
||||
VALUES ('base', $1, 'res.groups', $2) ON CONFLICT DO NOTHING`, g.xmlID, g.id)
|
||||
}
|
||||
|
||||
// Add admin user (uid=1) to all groups
|
||||
for _, g := range groups {
|
||||
tx.Exec(ctx, `INSERT INTO res_groups_res_users_rel (res_groups_id, res_users_id)
|
||||
VALUES ($1, 1) ON CONFLICT DO NOTHING`, g.id)
|
||||
}
|
||||
}
|
||||
|
||||
// seedACLRules creates access control entries for ALL registered models.
|
||||
// Categorizes models into security tiers and assigns appropriate permissions.
|
||||
// Mirrors: odoo/addons/base/security/ir.model.access.csv + per-module CSVs
|
||||
func seedACLRules(ctx context.Context, tx pgx.Tx) {
|
||||
log.Println("db: seeding ACL rules for all models...")
|
||||
|
||||
// Resolve group IDs
|
||||
var groupSystem, groupUser int64
|
||||
err := tx.QueryRow(ctx,
|
||||
`SELECT g.id FROM res_groups g
|
||||
JOIN ir_model_data imd ON imd.res_id = g.id AND imd.model = 'res.groups'
|
||||
WHERE imd.module = 'base' AND imd.name = 'group_system'`).Scan(&groupSystem)
|
||||
if err != nil {
|
||||
log.Printf("db: cannot find group_system, skipping ACL seeding: %v", err)
|
||||
return
|
||||
}
|
||||
err = tx.QueryRow(ctx,
|
||||
`SELECT g.id FROM res_groups g
|
||||
JOIN ir_model_data imd ON imd.res_id = g.id AND imd.model = 'res.groups'
|
||||
WHERE imd.module = 'base' AND imd.name = 'group_user'`).Scan(&groupUser)
|
||||
if err != nil {
|
||||
log.Printf("db: cannot find group_user, skipping ACL seeding: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// ── Security Tiers ──────────────────────────────────────────────
|
||||
// Tier 1: System-only — only group_system gets full access
|
||||
systemOnly := map[string]bool{
|
||||
"ir.cron": true, "ir.rule": true, "ir.model.access": true,
|
||||
}
|
||||
|
||||
// Tier 2: Admin-only — group_user=read, group_system=full
|
||||
adminOnly := map[string]bool{
|
||||
"ir.model": true, "ir.model.fields": true, "ir.model.data": true,
|
||||
"ir.module.category": true, "ir.actions.server": true, "ir.sequence": true,
|
||||
"ir.logging": true, "ir.config_parameter": true, "ir.default": true,
|
||||
"ir.translation": true, "ir.actions.report": true, "report.paperformat": true,
|
||||
"res.config.settings": true,
|
||||
}
|
||||
|
||||
// Tier 3: Read-only for users — group_user=read, group_system=full
|
||||
readOnly := map[string]bool{
|
||||
"res.currency": true, "res.currency.rate": true,
|
||||
"res.country": true, "res.country.state": true, "res.country.group": true,
|
||||
"res.lang": true, "uom.category": true, "uom.uom": true,
|
||||
"product.category": true, "product.removal": true,
|
||||
"account.account.tag": true, "account.group": true,
|
||||
"account.tax.group": true, "account.tax.repartition.line": true,
|
||||
}
|
||||
|
||||
// Everything else → Tier 4: Standard user (group_user=full, group_system=full)
|
||||
|
||||
// Helper to insert an ACL rule
|
||||
insertACL := func(modelID int64, modelName string, groupID int64, suffix string, read, write, create, unlink bool) {
|
||||
aclName := "access_" + strings.ReplaceAll(modelName, ".", "_") + "_" + suffix
|
||||
tx.Exec(ctx, `
|
||||
INSERT INTO ir_model_access (name, model_id, group_id, perm_read, perm_write, perm_create, perm_unlink, active)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, true)
|
||||
ON CONFLICT DO NOTHING`,
|
||||
aclName, modelID, groupID, read, write, create, unlink)
|
||||
}
|
||||
|
||||
// Iterate ALL registered models
|
||||
allModels := orm.Registry.Models()
|
||||
seeded := 0
|
||||
for _, m := range allModels {
|
||||
modelName := m.Name()
|
||||
if m.IsAbstract() {
|
||||
continue // Abstract models have no table → no ACL needed
|
||||
}
|
||||
|
||||
// Look up ir_model ID
|
||||
var modelID int64
|
||||
err := tx.QueryRow(ctx,
|
||||
"SELECT id FROM ir_model WHERE model = $1", modelName).Scan(&modelID)
|
||||
if err != nil {
|
||||
continue // Not yet in ir_model — will be seeded on next restart
|
||||
}
|
||||
|
||||
if systemOnly[modelName] {
|
||||
// Tier 1: only group_system full access
|
||||
insertACL(modelID, modelName, groupSystem, "system", true, true, true, true)
|
||||
} else if adminOnly[modelName] {
|
||||
// Tier 2: group_user=read, group_system=full
|
||||
insertACL(modelID, modelName, groupUser, "user_read", true, false, false, false)
|
||||
insertACL(modelID, modelName, groupSystem, "system", true, true, true, true)
|
||||
} else if readOnly[modelName] {
|
||||
// Tier 3: group_user=read, group_system=full
|
||||
insertACL(modelID, modelName, groupUser, "user_read", true, false, false, false)
|
||||
insertACL(modelID, modelName, groupSystem, "system", true, true, true, true)
|
||||
} else {
|
||||
// Tier 4: group_user=full, group_system=full
|
||||
insertACL(modelID, modelName, groupUser, "user", true, true, true, true)
|
||||
insertACL(modelID, modelName, groupSystem, "system", true, true, true, true)
|
||||
}
|
||||
seeded++
|
||||
}
|
||||
log.Printf("db: seeded ACL rules for %d models", seeded)
|
||||
}
|
||||
|
||||
|
||||
255
pkg/service/fetchmail.go
Normal file
255
pkg/service/fetchmail.go
Normal file
@@ -0,0 +1,255 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/emersion/go-imap/v2"
|
||||
"github.com/emersion/go-imap/v2/imapclient"
|
||||
gomessage "github.com/emersion/go-message"
|
||||
_ "github.com/emersion/go-message/charset"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// FetchmailConfig holds IMAP server configuration.
|
||||
type FetchmailConfig struct {
|
||||
Host string
|
||||
Port int
|
||||
User string
|
||||
Password string
|
||||
UseTLS bool
|
||||
Folder string
|
||||
}
|
||||
|
||||
// LoadFetchmailConfig loads IMAP settings from environment variables.
|
||||
func LoadFetchmailConfig() *FetchmailConfig {
|
||||
cfg := &FetchmailConfig{
|
||||
Port: 993,
|
||||
UseTLS: true,
|
||||
Folder: "INBOX",
|
||||
}
|
||||
cfg.Host = os.Getenv("IMAP_HOST")
|
||||
cfg.User = os.Getenv("IMAP_USER")
|
||||
cfg.Password = os.Getenv("IMAP_PASSWORD")
|
||||
if v := os.Getenv("IMAP_FOLDER"); v != "" {
|
||||
cfg.Folder = v
|
||||
}
|
||||
if os.Getenv("IMAP_TLS") == "false" {
|
||||
cfg.UseTLS = false
|
||||
if cfg.Port == 993 {
|
||||
cfg.Port = 143
|
||||
}
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// FetchAndProcessEmails connects to IMAP, fetches unseen emails, and creates
|
||||
// mail.message records in the database. Matches emails to existing threads
|
||||
// via In-Reply-To/References headers.
|
||||
// Mirrors: odoo/addons/fetchmail/models/fetchmail.py fetch_mail()
|
||||
func FetchAndProcessEmails(ctx context.Context, pool *pgxpool.Pool) error {
|
||||
cfg := LoadFetchmailConfig()
|
||||
if cfg.Host == "" {
|
||||
return nil // IMAP not configured
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("%s:%d", cfg.Host, cfg.Port)
|
||||
|
||||
var c *imapclient.Client
|
||||
var err error
|
||||
if cfg.UseTLS {
|
||||
c, err = imapclient.DialTLS(addr, nil)
|
||||
} else {
|
||||
c, err = imapclient.DialInsecure(addr, nil)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchmail: connect to %s: %w", addr, err)
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
if err := c.Login(cfg.User, cfg.Password).Wait(); err != nil {
|
||||
return fmt.Errorf("fetchmail: login as %s: %w", cfg.User, err)
|
||||
}
|
||||
|
||||
if _, err := c.Select(cfg.Folder, nil).Wait(); err != nil {
|
||||
return fmt.Errorf("fetchmail: select %s: %w", cfg.Folder, err)
|
||||
}
|
||||
|
||||
// Search unseen
|
||||
criteria := &imap.SearchCriteria{
|
||||
NotFlag: []imap.Flag{imap.FlagSeen},
|
||||
}
|
||||
searchData, err := c.Search(criteria, nil).Wait()
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchmail: search: %w", err)
|
||||
}
|
||||
|
||||
seqSet := searchData.All
|
||||
if seqSet == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch envelope + body
|
||||
fetchOpts := &imap.FetchOptions{
|
||||
Envelope: true,
|
||||
BodySection: []*imap.FetchItemBodySection{{}},
|
||||
}
|
||||
msgs, err := c.Fetch(seqSet, fetchOpts).Collect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetchmail: fetch: %w", err)
|
||||
}
|
||||
|
||||
var processed int
|
||||
for _, msg := range msgs {
|
||||
if err := processOneEmail(ctx, pool, msg); err != nil {
|
||||
log.Printf("fetchmail: process error: %v", err)
|
||||
continue
|
||||
}
|
||||
processed++
|
||||
}
|
||||
|
||||
// Mark as seen
|
||||
if processed > 0 {
|
||||
storeFlags := &imap.StoreFlags{
|
||||
Op: imap.StoreFlagsAdd,
|
||||
Flags: []imap.Flag{imap.FlagSeen},
|
||||
}
|
||||
if _, err := c.Store(seqSet, storeFlags, nil).Collect(); err != nil {
|
||||
log.Printf("fetchmail: mark seen error: %v", err)
|
||||
}
|
||||
log.Printf("fetchmail: processed %d new emails", processed)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func processOneEmail(ctx context.Context, pool *pgxpool.Pool, buf *imapclient.FetchMessageBuffer) error {
|
||||
env := buf.Envelope
|
||||
if env == nil {
|
||||
return fmt.Errorf("no envelope")
|
||||
}
|
||||
|
||||
subject := env.Subject
|
||||
messageID := env.MessageID
|
||||
inReplyTo := env.InReplyTo
|
||||
date := env.Date
|
||||
|
||||
var fromEmail, fromName string
|
||||
if len(env.From) > 0 {
|
||||
fromEmail = fmt.Sprintf("%s@%s", env.From[0].Mailbox, env.From[0].Host)
|
||||
fromName = env.From[0].Name
|
||||
}
|
||||
|
||||
// Extract body from body section
|
||||
var bodyText string
|
||||
bodyBytes := buf.FindBodySection(&imap.FetchItemBodySection{})
|
||||
if bodyBytes != nil {
|
||||
bodyText = parseEmailBody(bodyBytes)
|
||||
}
|
||||
if bodyText == "" {
|
||||
bodyText = "(no body)"
|
||||
}
|
||||
|
||||
// Find author partner by email
|
||||
var authorID int64
|
||||
pool.QueryRow(ctx,
|
||||
`SELECT id FROM res_partner WHERE LOWER(email) = LOWER($1) LIMIT 1`, fromEmail,
|
||||
).Scan(&authorID)
|
||||
|
||||
// Thread matching via In-Reply-To
|
||||
var parentModel string
|
||||
var parentResID int64
|
||||
if len(inReplyTo) > 0 && inReplyTo[0] != "" {
|
||||
pool.QueryRow(ctx,
|
||||
`SELECT model, res_id FROM mail_message
|
||||
WHERE message_id = $1 AND model IS NOT NULL AND res_id IS NOT NULL
|
||||
LIMIT 1`, inReplyTo[0],
|
||||
).Scan(&parentModel, &parentResID)
|
||||
}
|
||||
|
||||
// Fallback: match by subject
|
||||
if parentModel == "" && subject != "" {
|
||||
clean := subject
|
||||
for _, prefix := range []string{"Re: ", "RE: ", "Fwd: ", "FW: ", "AW: "} {
|
||||
clean = strings.TrimPrefix(clean, prefix)
|
||||
}
|
||||
pool.QueryRow(ctx,
|
||||
`SELECT model, res_id FROM mail_message
|
||||
WHERE subject = $1 AND model IS NOT NULL AND res_id IS NOT NULL
|
||||
ORDER BY id DESC LIMIT 1`, clean,
|
||||
).Scan(&parentModel, &parentResID)
|
||||
}
|
||||
|
||||
_, err := pool.Exec(ctx,
|
||||
`INSERT INTO mail_message
|
||||
(subject, body, message_type, email_from, author_id, model, res_id,
|
||||
date, message_id, create_uid, write_uid, create_date, write_date)
|
||||
VALUES ($1, $2, 'email', $3, $4, $5, $6, $7, $8, 1, 1, NOW(), NOW())`,
|
||||
subject, bodyText,
|
||||
fmt.Sprintf("%s <%s>", fromName, fromEmail),
|
||||
nilIfZero(authorID),
|
||||
nilIfEmpty(parentModel),
|
||||
nilIfZero(parentResID),
|
||||
date,
|
||||
messageID,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func parseEmailBody(raw []byte) string {
|
||||
entity, err := gomessage.Read(strings.NewReader(string(raw)))
|
||||
if err != nil {
|
||||
return string(raw) // fallback: raw text
|
||||
}
|
||||
|
||||
if mr := entity.MultipartReader(); mr != nil {
|
||||
var htmlBody, textBody string
|
||||
for {
|
||||
part, err := mr.NextPart()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
ct, _, _ := part.Header.ContentType()
|
||||
body, _ := io.ReadAll(part.Body)
|
||||
switch {
|
||||
case strings.HasPrefix(ct, "text/html"):
|
||||
htmlBody = string(body)
|
||||
case strings.HasPrefix(ct, "text/plain"):
|
||||
textBody = string(body)
|
||||
}
|
||||
}
|
||||
if htmlBody != "" {
|
||||
return htmlBody
|
||||
}
|
||||
return textBody
|
||||
}
|
||||
|
||||
// Single part
|
||||
body, _ := io.ReadAll(entity.Body)
|
||||
return string(body)
|
||||
}
|
||||
|
||||
func nilIfZero(v int64) interface{} {
|
||||
if v == 0 {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func nilIfEmpty(v string) interface{} {
|
||||
if v == "" {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// RegisterFetchmailCron ensures the message_id column exists for thread matching.
|
||||
func RegisterFetchmailCron(ctx context.Context, pool *pgxpool.Pool) {
|
||||
pool.Exec(ctx, `ALTER TABLE mail_message ADD COLUMN IF NOT EXISTS message_id VARCHAR(255)`)
|
||||
pool.Exec(ctx, `CREATE INDEX IF NOT EXISTS idx_mail_message_message_id ON mail_message(message_id)`)
|
||||
log.Println("fetchmail: ready (IMAP config via IMAP_HOST/IMAP_USER/IMAP_PASSWORD env vars)")
|
||||
}
|
||||
Reference in New Issue
Block a user