This commit is contained in:
anhduy-tech
2026-03-19 11:57:52 +07:00
parent 7fc8138f70
commit f76cd880e1
26 changed files with 1248 additions and 2719 deletions

Binary file not shown.

Binary file not shown.

View File

@@ -14,10 +14,9 @@ Including another URLconf
2. Add a URL to urlpatterns: re_path('blog/', include('blog.urls'))
"""
from django.urls import re_path
from app import views, cob, payment, cleardata, email, backup, server,api_workflow, importdata
from app import views, cob, payment, cleardata, email, backup, server, importdata
urlpatterns = [
re_path("workflow/execute/$", api_workflow.execute_workflow),
# Existing Endpoints
re_path('get-model/$', views.get_model),
@@ -49,7 +48,6 @@ urlpatterns = [
re_path('set-token-expiry/', views.set_token_expiry),
re_path('download-contract/(?P<name>.+)', views.download_contract),
re_path('execute-command/$', server.execute_command),
re_path('excel-import/$', views.ExcelImportAPIView.as_view()),
re_path('generate-document/$',views.generate_document),
re_path('model-fields/(?P<name>.+)/', importdata.model_fields),
re_path('read-excel/', importdata.read_excel),

View File

@@ -1,28 +0,0 @@
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework import status
from app.workflow_engine import run_workflow
from datetime import datetime # Thêm import
@api_view(["POST"])
def execute_workflow(request):
try:
workflow_code = request.data.get("workflow_code")
trigger = request.data.get("trigger")
# Tạo bản sao của dữ liệu request để làm context cho workflow.
context = dict(request.data)
# FIX: Bổ sung biến hệ thống: ngày hiện tại để Serializer có thể lấy giá trị cho field 'date'
context["current_date"] = datetime.now().strftime("%Y-%m-%d")
if not workflow_code or not trigger:
# Sử dụng status.HTTP_400_BAD_REQUEST hoặc 400 như trong code gốc
return Response({"error": "workflow_code & trigger are required"}, status=400)
result = run_workflow(workflow_code, trigger, context)
return Response({"success": True, "result": result})
except Exception as e:
# Trả về lỗi chi tiết hơn
return Response({"error": str(e)}, status=400)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,31 @@
# Generated by Django 5.1.7 on 2026-03-19 04:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='dealer_rights',
unique_together=None,
),
migrations.RemoveField(
model_name='dealer_rights',
name='setting',
),
migrations.RemoveField(
model_name='dealer_rights',
name='user',
),
migrations.DeleteModel(
name='Dealer_Setting',
),
migrations.DeleteModel(
name='Dealer_Rights',
),
]

File diff suppressed because it is too large Load Diff

View File

@@ -4,12 +4,8 @@ from django.apps import apps
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
from django.db import transaction
# Import hàm get_serializer đã có
from .views import get_serializer
# Danh sách các model không muốn theo dõi để tránh "nhiễu"
# Ví dụ: các model của admin, session, hoặc các model log không cần real-time
MODELS_TO_IGNORE = ['LogEntry', 'Session', 'ContentType', 'AdminLog', 'Permission', 'Group', 'Token', 'Phone_Otp']
def send_model_update(instance, change_type):
@@ -19,35 +15,29 @@ def send_model_update(instance, change_type):
model_class = instance.__class__
model_name = model_class._meta.model_name
# Bỏ qua các model trong danh sách ignore
if model_class.__name__ in MODELS_TO_IGNORE:
return
# Lấy serializer một cách linh động
_model, serializer_class = get_serializer(model_name)
if not serializer_class:
print(f"Warning: No serializer found for model {model_name}. Cannot send update.")
return
# Serialize instance đã thay đổi
# Đối với 'delete', instance vẫn còn tồn tại trong bộ nhớ tại thời điểm này
serializer = serializer_class(instance)
# Chuẩn bị payload để gửi đi
payload = {
"name": model_name,
"change_type": change_type,
"record": serializer.data
}
# Gửi tin nhắn đến group tương ứng
channel_layer = get_channel_layer()
group_name = f"model_{model_name}_updates"
async_to_sync(channel_layer.group_send)(
group_name,
{
"type": "realtime.update", # Khớp với tên phương thức trong DataConsumer
"type": "realtime.update",
"payload": payload
}
)
@@ -59,14 +49,10 @@ def generic_post_save_handler(sender, instance, created, **kwargs):
def send_update_after_commit():
change_type = "created" if created else "updated"
try:
# Re-fetch the instance to ensure we have the committed data
refreshed_instance = sender.objects.get(pk=instance.pk)
send_model_update(refreshed_instance, change_type)
except sender.DoesNotExist:
# Object đã bị xóa (ví dụ: delete_entry vừa xóa Internal_Entry)
# Bỏ qua việc gửi update, hoặc gửi thông báo "deleted" nếu cần
print(f"Object {sender.__name__} {instance.pk} đã bị xóa, bỏ qua gửi update.")
# Optional: vẫn gửi "deleted" để frontend biết object không còn
send_model_update(instance, "deleted")
except Exception as exc:
print(f"Lỗi trong send_update_after_commit: {exc}")
@@ -77,9 +63,7 @@ def generic_post_delete_handler(sender, instance, **kwargs):
"""
Hàm xử lý chung cho tín hiệu post_delete từ BẤT KỲ model nào.
"""
# For delete, the action happens immediately, so on_commit is not strictly necessary
# unless the delete is part of a larger transaction that could be rolled back.
# It's safer to use it anyway.
def send_delete_after_commit():
send_model_update(instance, "deleted")
@@ -95,4 +79,3 @@ def connect_signals():
print("Connected generic signals for real-time updates.")
# File apps.py của bạn đã gọi hàm connect_signals() này rồi, nên mọi thứ sẽ tự động hoạt động.

View File

@@ -1042,277 +1042,6 @@ def set_token_expiry(request):
return Response(status = status.HTTP_200_OK)
#=============================================================================
class ExcelImportAPIView(APIView):
parser_classes = (MultiPartParser, FormParser)
def post(self, request, format=None):
excel_file = request.FILES.get('file')
if not excel_file:
return Response({'error': 'No Excel file provided (key "file" not found)'}, status=status.HTTP_400_BAD_REQUEST)
config_str = request.data.get('config')
if not config_str:
return Response({'error': 'No configuration provided (key "config" not found)'}, status=status.HTTP_400_BAD_REQUEST)
try:
config = json.loads(config_str)
except json.JSONDecodeError:
return Response({'error': 'Invalid JSON configuration'}, status=status.HTTP_400_BAD_REQUEST)
model_name = config.get('model_name')
mappings = config.get('mappings', [])
import_mode = config.get('import_mode', 'insert_only')
header_row_excel = config.get('header_row_index', 1)
header_index = max(0, header_row_excel - 1)
# LẤY VÀ PHÂN TÍCH TRƯỜNG UNIQUE KEY
unique_fields_config = config.get('unique_fields', 'code')
if isinstance(unique_fields_config, str):
UNIQUE_KEY_FIELDS = [unique_fields_config]
elif isinstance(unique_fields_config, list):
UNIQUE_KEY_FIELDS = unique_fields_config
else:
return Response({'error': 'Invalid format for unique_fields. Must be a string or a list of strings.'}, status=status.HTTP_400_BAD_REQUEST)
if not model_name or not mappings:
return Response({'error': 'model_name or mappings missing in configuration'}, status=status.HTTP_400_BAD_REQUEST)
try:
TargetModel = apps.get_model('app', model_name)
except LookupError:
return Response({'error': f'Model "{model_name}" not found in app'}, status=status.HTTP_400_BAD_REQUEST)
related_models_cache = {}
for mapping in mappings:
if 'foreign_key' in mapping:
fk_config = mapping['foreign_key']
related_model_name = fk_config.get('model_name')
if related_model_name:
try:
related_models_cache[related_model_name] = apps.get_model('app', related_model_name)
except LookupError:
return Response({'error': f"Related model '{related_model_name}' not found for mapping '{mapping.get('excel_column')}'"}, status=status.HTTP_400_BAD_REQUEST)
try:
file_stream = io.BytesIO(excel_file.read())
if excel_file.name.lower().endswith(('.xlsx', '.xls')):
df = pd.read_excel(file_stream, header=header_index)
else:
df = pd.read_csv(file_stream, header=header_index)
except Exception as e:
return Response({'error': f'Error reading file: {str(e)}'}, status=status.HTTP_400_BAD_REQUEST)
cleaned_columns = []
for col in df.columns:
col_str = str(col).strip()
col_str = col_str.replace('\n', ' ').strip()
col_str = re.sub(r'\s*\([^)]*\)', '', col_str).strip()
col_str = ' '.join(col_str.split())
cleaned_columns.append(col_str)
df.columns = cleaned_columns
df.reset_index(drop=True, inplace=True)
# Caching Foreign Key objects
related_obj_cache = {}
for related_name, RelatedModel in related_models_cache.items():
lookup_field = next((m['foreign_key']['lookup_field'] for m in mappings if 'foreign_key' in m and m['foreign_key']['model_name'] == related_name), None)
if lookup_field:
try:
related_obj_cache[related_name] = {
str(getattr(obj, lookup_field)).strip().lower(): obj
for obj in RelatedModel.objects.all()
}
if 'pk' not in related_obj_cache[related_name]:
related_obj_cache[related_name].update({
str(obj.pk): obj for obj in RelatedModel.objects.all()
})
except Exception as e:
return Response({'error': f"Error caching related model {related_name}: {e}"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
objects_to_create = []
errors = []
for index, row in df.iterrows():
instance_data = {}
row_errors = []
is_valid_for_db = True
for mapping in mappings:
excel_column = mapping.get('excel_column')
model_field = mapping.get('model_field')
default_value = mapping.get('default_value')
excel_value = None
is_static_default = False
# 1. XÁC ĐỊNH NGUỒN GIÁ TRỊ (STATIC DEFAULT HOẶC EXCEL)
if not excel_column and default_value is not None:
# Trường hợp 1: Không có cột Excel, luôn dùng giá trị mặc định tĩnh
excel_value = default_value
is_static_default = True
elif excel_column and excel_column in row:
# Trường hợp 2: Có cột Excel
excel_value = row[excel_column]
is_static_default = False
# === BỔ SUNG: KIỂM TRA VÀ SỬ DỤNG default_value NẾU CELL RỖNG ===
# Nếu giá trị từ Excel rỗng VÀ có default_value được cung cấp trong mapping
if (pd.isna(excel_value) or (isinstance(excel_value, str) and str(excel_value).strip() == '')) and default_value is not None:
excel_value = default_value
is_static_default = True # Coi như giá trị tĩnh để bypass Section 2 (kiểm tra NULL)
# === KẾT THÚC BỔ SUNG ===
elif excel_column and excel_column not in row:
row_errors.append(f"Excel column '{excel_column}' not found (Header index: {header_row_excel})")
is_valid_for_db = False
continue
elif excel_column is None and default_value is None:
continue
else:
row_errors.append(f"Invalid mapping entry: {mapping} - requires excel_column or default_value")
is_valid_for_db = False
continue
# 2. XỬ LÝ NULL/EMPTY VALUES (Chỉ khi giá trị đến từ Excel và KHÔNG phải giá trị tĩnh)
if not is_static_default and (pd.isna(excel_value) or (isinstance(excel_value, str) and str(excel_value).strip() == '')):
try:
field_obj = TargetModel._meta.get_field(model_field)
except FieldDoesNotExist:
row_errors.append(f"Model field '{model_field}' not found in model '{model_name}'")
is_valid_for_db = False
continue
# Trường cho phép NULL
if field_obj.null:
instance_data[model_field] = None
continue
# Trường có Default Value (từ Model)
elif field_obj.default is not models_fields.NOT_PROVIDED:
instance_data[model_field] = field_obj.default
continue
# Trường KHÔNG cho phép NULL (Non-nullable field)
else:
# === START: LOGIC BỔ SUNG CHO allow_empty_excel_non_nullable ===
allow_empty_non_nullable = mapping.get('allow_empty_excel_non_nullable', False)
# Chỉ áp dụng bypass nếu là CharField/TextField (có thể lưu "" để thỏa mãn NOT NULL)
if allow_empty_non_nullable and isinstance(field_obj, (CharField, TextField)):
instance_data[model_field] = ""
continue # Chấp nhận chuỗi rỗng và đi tiếp
# Nếu không được phép bypass HOẶC không phải CharField/TextField
row_errors.append(f"Non-nullable field '{model_field}' has empty value in row {index + 1}")
is_valid_for_db = False
instance_data[model_field] = "" if isinstance(field_obj, (CharField, TextField)) else None
continue
# === END: LOGIC BỔ SUNG CHO allow_empty_excel_non_nullable ===
# 3. XỬ LÝ FOREIGN KEY
if 'foreign_key' in mapping:
fk_config = mapping['foreign_key']
related_model_name = fk_config.get('model_name')
key_to_lookup = str(excel_value).strip().lower()
RelatedModelCache = related_obj_cache.get(related_model_name, {})
related_obj = RelatedModelCache.get(key_to_lookup)
# Logic dự phòng để tìm theo ID nếu là giá trị tĩnh và là số
if not related_obj and is_static_default and str(excel_value).isdigit():
related_obj = RelatedModelCache.get(str(excel_value))
if related_obj:
instance_data[model_field] = related_obj
else:
# Kiểm tra lại trường hợp giá trị lookup là rỗng/0 khi model field cho phép NULL
if (pd.isna(excel_value) or str(excel_value).strip() == '' or str(excel_value).strip() == '0') and TargetModel._meta.get_field(model_field).null:
instance_data[model_field] = None
continue
# Báo lỗi và không hợp lệ nếu không tìm thấy object
row_errors.append(f"Related object for '{model_field}' with value '{excel_value}' not found in model '{related_model_name}' (row {index + 1})")
if not TargetModel._meta.get_field(model_field).null:
is_valid_for_db = False
instance_data[model_field] = None
continue
else:
instance_data[model_field] = excel_value
if row_errors:
errors.append({'row': index + 1, 'messages': row_errors})
if is_valid_for_db:
try:
objects_to_create.append(TargetModel(**instance_data))
except Exception as e:
errors.append({'row': index + 1, 'messages': [f"Critical error creating model instance: {str(e)}"]})
successful_row_count = len(objects_to_create)
try:
with transaction.atomic():
# === LOGIC XỬ LÝ CÁC CHẾ ĐỘ NHẬP DỮ LIỆU ===
if import_mode == 'overwrite':
TargetModel.objects.all().delete()
TargetModel.objects.bulk_create(objects_to_create)
message = f'{successful_row_count} records imported successfully after full **overwrite**.'
elif import_mode == 'upsert':
for field in UNIQUE_KEY_FIELDS:
try:
TargetModel._meta.get_field(field)
except FieldDoesNotExist:
return Response({'error': f"Unique field '{field}' not found in model '{model_name}'. Cannot perform upsert."}, status=status.HTTP_400_BAD_REQUEST)
existing_objects_query = TargetModel.objects.only('pk', *UNIQUE_KEY_FIELDS)
existing_map = {}
for obj in existing_objects_query:
key_tuple = tuple(getattr(obj, field) for field in UNIQUE_KEY_FIELDS)
existing_map[key_tuple] = obj
to_update = []
to_insert = []
for new_instance in objects_to_create:
try:
lookup_key = tuple(getattr(new_instance, field) for field in UNIQUE_KEY_FIELDS)
except AttributeError:
continue
if lookup_key in existing_map:
new_instance.pk = existing_map[lookup_key].pk
to_update.append(new_instance)
else:
to_insert.append(new_instance)
update_fields = [
m['model_field']
for m in mappings
if m['model_field'] not in ['pk'] and m['model_field'] not in UNIQUE_KEY_FIELDS
]
TargetModel.objects.bulk_update(to_update, update_fields)
TargetModel.objects.bulk_create(to_insert)
message = f'{len(to_insert)} records inserted, {len(to_update)} records updated successfully (Upsert mode).'
elif import_mode == 'insert_only':
TargetModel.objects.bulk_create(objects_to_create)
message = f'{successful_row_count} records imported successfully (Insert Only mode).'
else:
return Response({'error': f"Invalid import_mode specified: {import_mode}"}, status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
return Response({'error': f'Database error during bulk operation (Rollback occurred): {str(e)}', 'rows_attempted': successful_row_count}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if errors:
return Response({'status': 'partial_success', 'message': f'{message} Invalid rows were skipped.', 'errors': errors}, status=status.HTTP_207_MULTI_STATUS)
return Response({'status': 'success', 'message': message}, status=status.HTTP_201_CREATED)
#=============================================================================
executor = ThreadPoolExecutor(max_workers=10)
def background_generate(doc_code, context_pks, output_filename, uid):
start_time = datetime.now()
@@ -1545,29 +1274,6 @@ class EmailTemplatePreview:
@api_view(['POST'])
def preview_email_template(request):
"""
API để preview email template - trả về nội dung đã thay thế placeholders
POST /api/email/preview/
Body: {
"template_id": 1,
"context_pks": {
"contract_id": 456,
"customer_id": 789
}
}
Response: {
"subject": "Thông báo hợp đồng #HD-001",
"content": "<div>Nội dung y nguyên đã thay thế...</div>",
"recipient_email": "customer@example.com",
"replacements": {
"[contract.code]": "HD-001",
"[customer.name]": "Nguyễn Văn A",
...
}
}
"""
try:
# Validate input
template_id = request.data.get('template_id')

View File

@@ -1,375 +0,0 @@
from django.test import Client
from app.workflow_registry import register_action
from app.workflow_utils import resolve_value
from app.document_generator import DocumentGenerator
from app.jobemail import EmailJobRunner
from app.payment import account_entry_api
from django.apps import apps
import re
import datetime
client = Client()
# ============================
# Logic xử lý Map Expression ($map)
# ============================
def handle_map_expression(expression, context):
"""
Xử lý biểu thức đặc biệt để biến đổi danh sách dữ liệu.
Cú pháp: $map(data.installments, {amount: amount, due_date: $add_days(created_at, gap)})
"""
# Regex tách nguồn dữ liệu và template
match = re.match(r"^\$map\(([^,]+),\s*\{(.*)\}\)$", expression.strip())
if not match:
return []
source_path = match.group(1).strip()
template_content = match.group(2).strip()
# Lấy danh sách dữ liệu gốc từ context (ví dụ: data.installments)
source_data = resolve_value(source_path, context)
if not isinstance(source_data, list):
return []
# Tìm các cặp key: value trong template định nghĩa
# Hỗ trợ cả trường hợp value là một hàm lồng như $add_days
pairs = re.findall(r"(\w+):\s*(\$add_days\([^)]+\)|[^{},]+)", template_content)
results = []
for index, item in enumerate(source_data):
# Tạo context riêng cho từng item để resolve
item_context = {**context, "item": item, "index": index}
processed_row = {}
for key, val_expr in pairs:
val_expr = val_expr.strip()
# 1. Xử lý biến chỉ mục ($index)
if val_expr == "$index":
processed_row[key] = index
# 2. Xử lý hàm cộng ngày ($add_days)
elif "$add_days" in val_expr:
m = re.search(r"\$add_days\(([^,]+),\s*([^)]+)\)", val_expr)
if m:
base_key = m.group(1).strip()
days_key = m.group(2).strip()
# Tìm giá trị ngày gốc và số ngày cần cộng
base_date = item.get(base_key) if base_key in item else resolve_value(base_key, item_context)
days = item.get(days_key) if days_key in item else resolve_value(days_key, item_context)
try:
# Chuyển đổi string sang date object
if isinstance(base_date, str):
base_date = datetime.datetime.strptime(base_date[:10], "%Y-%m-%d").date()
new_date = base_date + datetime.timedelta(days=int(days or 0))
processed_row[key] = new_date.isoformat()
except Exception:
processed_row[key] = str(base_date) # Trả về bản gốc nếu lỗi
# 3. Xử lý lấy giá trị từ item hiện tại hoặc context chung
else:
if val_expr == "$item":
processed_row[key] = item
elif val_expr == "$index":
processed_row[key] = index
elif val_expr in item:
processed_row[key] = item[val_expr]
else:
processed_row[key] = resolve_value(val_expr, item_context)
results.append(processed_row)
return results
# ============================
# CRUD thông qua API có sẵn
# ============================
def deep_resolve_values(data, context):
if isinstance(data, dict):
return {k: deep_resolve_values(v, context) for k, v in data.items()}
elif isinstance(data, list):
return [deep_resolve_values(item, context) for item in data]
elif isinstance(data, str):
# Workaround for resolver bug: handle strings that are only a placeholder
match = re.fullmatch(r"\{([^}]+)\}", data)
if match:
# The path is the content inside the braces, e.g., "transaction_detail.id"
path = match.group(1)
# resolve_value works on raw paths, so call it directly
return resolve_value(path, context)
else:
# This handles complex strings like "/prefix/{path}/" or normal strings
return resolve_value(data, context)
else:
return data
@register_action("API_CALL", schema={"required": ["method", "url"]})
def api_call_action(params, context):
"""Thực hiện gọi API nội bộ bằng Django Test Client"""
method = params["method"].upper()
url = resolve_value(params["url"], context)
save_as = params.get("save_as")
raw_body = params.get("body")
# ============================
# Resolve body
# ============================
if isinstance(raw_body, str) and raw_body.startswith("$map"):
body = handle_map_expression(raw_body, context)
elif isinstance(raw_body, dict):
body = deep_resolve_values(raw_body, context)
elif raw_body is None:
body = None
else:
body = resolve_value(raw_body, context)
print(f" [API_CALL] {method} {url}")
print(f" [API_CALL] Resolved Body: {body}")
# ============================
# Execute request
# ============================
if method == "POST":
resp = client.post(url, body, content_type="application/json")
elif method == "PATCH":
resp = client.patch(url, body, content_type="application/json")
elif method == "PUT":
resp = client.put(url, body, content_type="application/json")
elif method == "DELETE":
resp = client.delete(url)
else:
resp = client.get(url)
print(f" [API_CALL] Status Code: {resp.status_code}")
# ============================
# Handle error
# ============================
if resp.status_code >= 400:
error_content = resp.content.decode("utf-8") if resp.content else ""
print(f" [API_CALL] Error: {error_content}")
raise Exception(f"API Call failed: {error_content}")
# ============================
# Handle response safely
# ============================
if resp.status_code == 204 or not resp.content:
# DELETE / No Content
result = {"deleted": True}
else:
try:
result = resp.json()
except ValueError:
# Fallback nếu response không phải JSON
result = resp.content.decode("utf-8")
print(f" [API_CALL] Result: {result}")
if save_as:
context[save_as] = result
return result
# ============================
# Gọi Utility / API bên ngoài
# ============================
@register_action("CALL_UTILITY", schema={"required": ["utility_code"]})
def call_utility_action(params, context):
Utility = apps.get_model("app", "Utility")
util = Utility.objects.get(code=params["utility_code"])
module_path = util.integration_module
if not module_path:
return {"error": "utility has no module"}
from django.utils.module_loading import import_string
func = import_string(module_path)
resolved_params = {k: resolve_value(v, context) for k,v in params.get("params", {}).items()}
return func(**resolved_params)
@register_action("SEND_EMAIL", schema={"required": ["template"]})
def send_email_action(params, context):
tpl_name = params["template"]
tpl_pks = {k: resolve_value(v, context) for k,v in params.get("context_pks", {}).items()}
Email_Template = apps.get_model("app", "Email_Template")
try:
template_obj = Email_Template.objects.get(name=tpl_name)
except Email_Template.DoesNotExist:
raise Exception(f"Email template '{tpl_name}' not found")
runner = EmailJobRunner(template=template_obj, context_pks=tpl_pks)
success = runner.run()
return {"sent": success}
# ============================
# Tạo Document
# ============================
@register_action("GENERATE_DOCUMENT", schema={"required": ["document_code"]})
def generate_document_action(params, context):
code = resolve_value(params["document_code"], context)
pks = {k: str(resolve_value(v, context)) for k, v in params.get("context_pks", {}).items()}
save_as = params.get("save_as")
print(f" [GEN_DOC] Generating for code: {code}")
gen = DocumentGenerator(document_code=code, context_pks=pks)
result = gen.generate()
formatted_result = [{
"pdf": result.get("pdf"),
"file": result.get("file"),
"name": result.get("name"),
"code": code
}]
if save_as:
context[save_as] = formatted_result
print(f" [GEN_DOC] Success: Saved to context as '{save_as}'")
return formatted_result
# ============================
# Hạch toán
# ============================
@register_action("ACCOUNT_ENTRY", schema={"required": ["amount", "category_code"]})
def account_entry_action(params, context):
amount = resolve_value(params["amount"], context)
content = params.get("content", "")
userid = resolve_value(params.get("userid"), context)
return account_entry_api(
code=params.get("internal_account", "HOAC02VND"),
amount=amount,
content=content,
type=params.get("type", "CR"),
category=apps.get_model("app", "Entry_Category").objects.get(code=params["category_code"]).id,
userid=userid,
ref=params.get("ref")
)
# ============================
# Tìm bản ghi
# ============================
@register_action("LOOKUP_DATA", schema={"required": ["model_name", "lookup_field", "lookup_value"]})
def lookup_data_action(params, context):
model_name = params["model_name"]
field = params["lookup_field"]
save_as = params.get("save_as")
# Lấy giá trị thực tế (ví dụ: "reserved")
value = resolve_value(params["lookup_value"], context)
print(f" [LOOKUP] Searching {model_name} where {field} = '{value}'")
try:
Model = apps.get_model("app", model_name)
obj = Model.objects.filter(**{field: value}).first()
if not obj:
print(f" [LOOKUP] ERROR: Not found!")
raise Exception(f"Lookup failed: {model_name} with {field}={value} not found.")
if save_as:
context[save_as] = obj
print(f" [LOOKUP] Success: Found ID {obj.id}, saved to context as '{save_as}'")
return obj.id
except Exception as e:
print(f" [LOOKUP] EXCEPTION: {str(e)}")
raise e
# ============================
# Quét và phân bổ toàn bộ bút toán còn phần dư
# ============================
@register_action("ALLOCATE_ALL_PENDING", schema={})
def allocate_all_pending_action(params, context):
"""
Quét toàn bộ Internal_Entry có allocation_remain > 0 (type CR),
group by product_id, gọi phân bổ cho từng product cho đến khi hết.
"""
from app.payment import allocate_payment_to_schedules, allocate_penalty_reduction
from decimal import Decimal
Internal_Entry = apps.get_model("app", "Internal_Entry")
Payment_Schedule = apps.get_model("app", "Payment_Schedule")
Product_Booked = apps.get_model("app", "Product_Booked")
Transaction_Current = apps.get_model("app", "Transaction_Current")
Transaction_Detail = apps.get_model("app", "Transaction_Detail")
# ---------- Lấy toàn bộ product_id còn entry chưa phân bổ hết ----------
product_ids = list(
Internal_Entry.objects.filter(
type__code="CR",
allocation_remain__gt=0,
product__isnull=False
)
.values_list("product_id", flat=True)
.distinct()
)
print(f" [ALLOCATE_ALL] Tìm được {len(product_ids)} product có entry còn phần dư")
if not product_ids:
return {"total_products": 0, "results": []}
# ---------- DEBUG: dump trạng thái trước khi phân bổ ----------
for pid in product_ids:
print(f"\n [DEBUG] ===== Product {pid} — trạng thái TRƯỚC phân bổ =====")
# Entries
entries = Internal_Entry.objects.filter(
product_id=pid, type__code="CR", allocation_remain__gt=0
).order_by("date", "create_time")
for e in entries:
print(f" Entry id={e.id} | account_id={e.account_id} | amount={e.amount} | allocation_remain={e.allocation_remain} | date={e.date}")
# Lấy txn_detail của product
booked = Product_Booked.objects.filter(product_id=pid).first()
if not booked or not booked.transaction:
print(f" !! Không có Product_Booked / Transaction")
continue
txn = booked.transaction
txn_detail = None
try:
current = Transaction_Current.objects.get(transaction=txn)
txn_detail = current.detail
except Exception:
txn_detail = Transaction_Detail.objects.filter(transaction=txn).order_by("-create_time").first()
if not txn_detail:
print(f" !! Không có Transaction_Detail")
continue
# Schedules
all_schedules = Payment_Schedule.objects.filter(txn_detail=txn_detail).order_by("cycle", "from_date")
unpaid = all_schedules.filter(status__id=1)
print(f" Tổng schedule: {all_schedules.count()} | Chưa thanh toán (status=1): {unpaid.count()}")
for s in all_schedules:
print(f" Schedule id={s.id} | cycle={s.cycle} | status_id={s.status_id} | amount_remain={s.amount_remain} | penalty_remain={s.penalty_remain} | remain_amount={s.remain_amount}")
# ---------- Chạy phân bổ ----------
results = []
for product_id in product_ids:
try:
normal = allocate_payment_to_schedules(product_id)
reduction = allocate_penalty_reduction(product_id)
results.append({"product_id": product_id, "normal": normal, "reduction": reduction})
print(f" [ALLOCATE_ALL] Product {product_id}: OK — normal={normal}")
except Exception as e:
print(f" [ALLOCATE_ALL] Product {product_id}: ERROR - {str(e)}")
results.append({"product_id": product_id, "error": str(e)})
return {"total_products": len(product_ids), "results": results}

View File

@@ -1,84 +0,0 @@
from django.db import transaction
from app.models import Workflow, StepAction, Rule
from app.workflow_registry import ACTION_REGISTRY, validate_action_schema
from app.workflow_utils import resolve_value
@transaction.atomic
def execute_step(step: StepAction, context: dict):
#print(f"\n>>> EXECUTING STEP: {step.step_code} (Order: {step.order})")
# Evaluate rules first
for rule in step.rules.filter(is_active=True):
if not evaluate_rule(rule, context):
#print(f"Step {step.step_code} skipped due to rule failure.")
return {"step": step.step_code, "skipped": True, "reason": "rule_failed"}
results = []
# Lưu ý: step.actions thường là một list các dict
actions_list = step.actions if isinstance(step.actions, list) else []
for action in actions_list:
action_type = action.get("type")
params = action.get("params", {})
#print(f" - Action Type: {action_type}")
if action_type not in ACTION_REGISTRY:
#print(f" - ERROR: Action type '{action_type}' not registered!")
continue
try:
validate_action_schema(action_type, params)
handler = ACTION_REGISTRY[action_type]
# Thực thi handler
output = handler(params, context)
results.append({"action": action_type, "result": output})
# Lưu output cuối cùng vào context
context["last_result"] = output
except Exception as e:
#print(f" - ERROR in action {action_type}: {str(e)}")
# Raise để transaction.atomic rollback nếu cần, hoặc xử lý tùy ý
raise e
return {"step": step.step_code, "executed": True, "results": results}
def evaluate_rule(rule: Rule, context: dict):
for condition in (rule.conditions or []):
left = resolve_value(condition.get("left"), context)
right = resolve_value(condition.get("right"), context)
op = condition.get("operator", "==")
#print(f" Evaluating Rule: {left} {op} {right}")
if op == "IN" and left not in right: return False
if op == "==" and left != right: return False
if op == "!=" and left == right: return False
if op == ">" and not (left > right): return False
if op == "<" and not (left < right): return False
return True
def run_workflow(workflow_code: str, trigger: str, context: dict):
#print(f"\n================ START WORKFLOW: {workflow_code} ================")
#print(f"Trigger: {trigger} | Initial Context: {context}")
workflow = Workflow.objects.filter(code=workflow_code, is_active=True).first()
if not workflow:
#print(f"Workflow '{workflow_code}' not found or inactive.")
raise Exception(f"Workflow '{workflow_code}' not found")
steps = workflow.steps.filter(trigger_event=trigger, is_active=True).order_by("order")
#print(f"Found {steps.count()} active steps.")
outputs = []
for step in steps:
res = execute_step(step, context)
outputs.append(res)
#print(f"================ FINISH WORKFLOW: {workflow_code} ================\n")
return outputs

View File

@@ -1,21 +0,0 @@
from typing import Callable, Dict
ACTION_REGISTRY: Dict[str, Callable] = {}
ACTION_SCHEMAS: Dict[str, dict] = {}
def register_action(name: str, schema=None):
def decorator(func):
ACTION_REGISTRY[name] = func
ACTION_SCHEMAS[name] = schema or {}
return func
return decorator
def validate_action_schema(action_name, params):
schema = ACTION_SCHEMAS.get(action_name, {})
required = schema.get("required", [])
for key in required:
if key not in params:
raise Exception(f"Action '{action_name}' missing required param: {key}")
return True

View File

@@ -1,652 +0,0 @@
import re
import math
from datetime import datetime, date, timedelta
from decimal import Decimal
from django.db import models
from django.apps import apps
# =============================================
# CORE RESOLVER
# =============================================
def resolve_value(expr, context):
"""
Universal expression resolver with support for:
- Literals (int, float, bool, string)
- Template strings: {key}, "text {key} text"
- Dotted paths: customer.address.city
- Math functions: $add, $sub, $multiply, $divide, $mod, $power, $round, $abs, $min, $max
- Date functions: $now, $today, $date_diff, $date_add, $date_format, $date_parse
- String functions: $concat, $upper, $lower, $trim, $replace, $substring, $split, $length
- Logic functions: $if, $switch, $and, $or, $not
- List functions: $append, $agg, $filter, $map, $first, $last, $count, $sum
- Lookup functions: $vlookup, $lookup, $get
- Nested functions support
"""
if expr is None:
return None
# Direct literal types
if isinstance(expr, (int, float, bool, Decimal)):
return expr
if not isinstance(expr, str):
return expr
expr = expr.strip()
# =============================================
# 1. SYSTEM VARIABLES
# =============================================
if expr == "$now":
return context.get("now", datetime.now())
if expr == "$today":
if "today" in context:
return context["today"]
now_in_context = context.get("now")
if isinstance(now_in_context, datetime):
return now_in_context.date()
return date.today()
if expr == "$today_str":
now_val = context.get("now", datetime.now())
if isinstance(now_val, datetime):
return now_val.date().isoformat()
elif isinstance(now_val, date):
return now_val.isoformat()
return date.today().isoformat()
if expr == "$now_iso":
return datetime.now().isoformat(timespec='seconds')
if expr == "$timestamp":
return int(datetime.now().timestamp())
# =============================================
# 2. MATH FUNCTIONS (Support Nested)
# =============================================
math_functions = {
'add': lambda a, b: a + b,
'sub': lambda a, b: a - b,
'subtract': lambda a, b: a - b,
'multiply': lambda a, b: a * b,
'mul': lambda a, b: a * b,
'divide': lambda a, b: a / b if b != 0 else 0,
'div': lambda a, b: a / b if b != 0 else 0,
'mod': lambda a, b: a % b if b != 0 else 0,
'power': lambda a, b: a ** b,
'pow': lambda a, b: a ** b,
}
for func_name, func in math_functions.items():
pattern = rf'^\${func_name}\((.*)\)$'
match = re.match(pattern, expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) == 2:
a = to_number(resolve_value(args[0], context))
b = to_number(resolve_value(args[1], context))
return func(a, b)
# Single-argument math functions
single_math = {
'round': lambda x, d=0: round(x, int(d)),
'abs': lambda x: abs(x),
'ceil': lambda x: math.ceil(x),
'floor': lambda x: math.floor(x),
'sqrt': lambda x: math.sqrt(x) if x >= 0 else 0,
}
for func_name, func in single_math.items():
pattern = rf'^\${func_name}\((.*)\)$'
match = re.match(pattern, expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) >= 1:
val = to_number(resolve_value(args[0], context))
if len(args) == 2 and func_name == 'round':
decimals = to_number(resolve_value(args[1], context))
return func(val, decimals)
return func(val)
# Multi-argument math
if re.match(r'^\$(min|max)\(', expr, re.IGNORECASE):
match = re.match(r'^\$(min|max)\((.*)\)$', expr, re.IGNORECASE)
if match:
func_name = match.group(1).lower()
args = split_args(match.group(2))
values = [to_number(resolve_value(arg, context)) for arg in args]
return min(values) if func_name == 'min' else max(values)
# =============================================
# 3. DATE FUNCTIONS
# =============================================
# $date_diff(date1, date2, unit?)
if re.match(r'^\$date_diff\(', expr, re.IGNORECASE):
match = re.match(r'^\$date_diff\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) >= 2:
raw_d1 = resolve_value(args[0], context)
raw_d2 = resolve_value(args[1], context)
d1 = to_date(raw_d1)
d2 = to_date(raw_d2)
unit = resolve_value(args[2], context).lower() if len(args) > 2 else 'days'
#print(f"[DEBUG date_diff] raw_d1: {raw_d1}, raw_d2: {raw_d2}") # DEBUG
#print(f"[DEBUG date_diff] d1 (datetime): {d1}, d2 (datetime): {d2}") # DEBUG
#print(f"[DEBUG date_diff] unit: {unit}") # DEBUG
if not (d1 and d2):
#print("[DEBUG date_diff] One or both dates are invalid. Returning 0.") # DEBUG
return 0
# Ensure we are comparing date objects, ignoring time
d1_date_only = d1.date()
d2_date_only = d2.date()
#print(f"[DEBUG date_diff] d1_date_only: {d1_date_only}, d2_date_only: {d2_date_only}") # DEBUG
if unit == 'days':
delta_days = (d1_date_only - d2_date_only).days
#print(f"[DEBUG date_diff] Calculated delta_days: {delta_days}. Returning {delta_days}.") # DEBUG
return delta_days
elif unit == 'months':
delta_months = (d1_date_only.year - d2_date_only.year) * 12 + d1_date_only.month - d2_date_only.month
#print(f"[DEBUG date_diff] Calculated delta_months: {delta_months}. Returning {delta_months}.") # DEBUG
return delta_months
elif unit == 'years':
delta_years = d1_date_only.year - d2_date_only.year
#print(f"[DEBUG date_diff] Calculated delta_years: {delta_years}. Returning {delta_years}.") # DEBUG
return delta_years
#print(f"[DEBUG date_diff] Unit '{unit}' not recognized. Returning 0.") # DEBUG
return 0
# $date_add(date, amount, unit?)
if re.match(r'^\$date_add\(', expr, re.IGNORECASE):
match = re.match(r'^\$date_add\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) >= 2:
base_date = to_date(resolve_value(args[0], context))
amount = to_number(resolve_value(args[1], context))
unit = resolve_value(args[2], context).lower() if len(args) > 2 else 'days'
if base_date:
# Ensure base_date is datetime
if isinstance(base_date, date) and not isinstance(base_date, datetime):
base_date = datetime.combine(base_date, datetime.min.time())
if unit == 'days':
result = base_date + timedelta(days=int(amount))
elif unit == 'months':
month = base_date.month + int(amount)
year = base_date.year + (month - 1) // 12
month = ((month - 1) % 12) + 1
result = base_date.replace(year=year, month=month)
elif unit == 'years':
result = base_date.replace(year=base_date.year + int(amount))
elif unit == 'hours':
result = base_date + timedelta(hours=int(amount))
else:
result = base_date + timedelta(days=int(amount))
return result.isoformat() if isinstance(result, datetime) else result.strftime("%Y-%m-%d")
# $date_format(date, format)
if re.match(r'^\$date_format\(', expr, re.IGNORECASE):
match = re.match(r'^\$date_format\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) == 2:
dt = to_date(resolve_value(args[0], context))
fmt = resolve_value(args[1], context).strip('\'"')
if dt:
return dt.strftime(fmt)
# $date_parse(string, format)
if re.match(r'^\$date_parse\(', expr, re.IGNORECASE):
match = re.match(r'^\$date_parse\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) >= 1:
date_str = str(resolve_value(args[0], context))
fmt = resolve_value(args[1], context).strip('\'"') if len(args) > 1 else "%Y-%m-%d"
try:
return datetime.strptime(date_str, fmt).strftime("%Y-%m-%d")
except:
return None
# =============================================
# 4. STRING FUNCTIONS
# =============================================
# $concat(str1, str2, ...)
if re.match(r'^\$concat\(', expr, re.IGNORECASE):
match = re.match(r'^\$concat\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
return ''.join(str(resolve_value(arg, context) or '') for arg in args)
# $upper, $lower, $trim
string_single = {
'upper': lambda s: str(s).upper(),
'lower': lambda s: str(s).lower(),
'trim': lambda s: str(s).strip(),
'length': lambda s: len(str(s)),
}
for func_name, func in string_single.items():
pattern = rf'^\${func_name}\((.*)\)$'
match = re.match(pattern, expr, re.IGNORECASE)
if match:
arg = resolve_value(match.group(1).strip(), context)
return func(arg)
# $replace(text, old, new)
if re.match(r'^\$replace\(', expr, re.IGNORECASE):
match = re.match(r'^\$replace\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) == 3:
text = str(resolve_value(args[0], context))
old = str(resolve_value(args[1], context)).strip('\'"')
new = str(resolve_value(args[2], context)).strip('\'"')
return text.replace(old, new)
# $substring(text, start, length?)
if re.match(r'^\$substring\(', expr, re.IGNORECASE):
match = re.match(r'^\$substring\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) >= 2:
text = str(resolve_value(args[0], context))
start = int(to_number(resolve_value(args[1], context)))
length = int(to_number(resolve_value(args[2], context))) if len(args) > 2 else None
return text[start:start+length] if length else text[start:]
# $split(text, delimiter)
if re.match(r'^\$split\(', expr, re.IGNORECASE):
match = re.match(r'^\$split\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) == 2:
text = str(resolve_value(args[0], context))
delimiter = str(resolve_value(args[1], context)).strip('\'"')
return text.split(delimiter)
# =============================================
# 5. LOGIC FUNCTIONS
# =============================================
# $if(condition, true_value, false_value)
if re.match(r'^\$if\(', expr, re.IGNORECASE):
match = re.match(r'^\$if\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) == 3:
condition = resolve_value(args[0], context)
return resolve_value(args[1], context) if condition else resolve_value(args[2], context)
# $switch(value, case1, result1, case2, result2, ..., default)
if re.match(r'^\$switch\(', expr, re.IGNORECASE):
match = re.match(r'^\$switch\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) >= 2:
value = resolve_value(args[0], context)
for i in range(1, len(args) - 1, 2):
if i + 1 < len(args):
case = resolve_value(args[i], context)
if value == case:
return resolve_value(args[i + 1], context)
# Default value is last arg if odd number of args
if len(args) % 2 == 0:
return resolve_value(args[-1], context)
# $and, $or, $not
if re.match(r'^\$and\(', expr, re.IGNORECASE):
match = re.match(r'^\$and\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
return all(resolve_value(arg, context) for arg in args)
if re.match(r'^\$or\(', expr, re.IGNORECASE):
match = re.match(r'^\$or\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
return any(resolve_value(arg, context) for arg in args)
if re.match(r'^\$not\(', expr, re.IGNORECASE):
match = re.match(r'^\$not\((.*)\)$', expr, re.IGNORECASE)
if match:
arg = resolve_value(match.group(1).strip(), context)
return not arg
# =============================================
# 6. LIST/ARRAY FUNCTIONS
# =============================================
# $append(list, element)
if re.match(r'^\$append\(', expr, re.IGNORECASE):
match = re.match(r"^\$append\(([^,]+),\s*(.+)\)$", expr, re.DOTALL)
if match:
list_expr = match.group(1).strip()
element_expr = match.group(2).strip()
# 1. Resolve the list
target_list = resolve_value(list_expr, context)
if target_list is None:
target_list = []
# Ensure it's a copy so we don't modify the original context variable directly
target_list = list(target_list)
# 2. Resolve the element
resolved_element = resolve_value(element_expr, context)
if isinstance(resolved_element, str):
try:
import json
element_to_append = json.loads(resolved_element)
except json.JSONDecodeError:
element_to_append = resolved_element
else:
element_to_append = resolved_element
target_list.append(element_to_append)
return target_list
# $first(list), $last(list)
if re.match(r'^\$(first|last)\(', expr, re.IGNORECASE):
match = re.match(r'^\$(first|last)\((.*)\)$', expr, re.IGNORECASE)
if match:
func_name = match.group(1).lower()
lst = resolve_value(match.group(2).strip(), context)
if isinstance(lst, list) and len(lst) > 0:
return lst[0] if func_name == 'first' else lst[-1]
# $count(list)
if re.match(r'^\$count\(', expr, re.IGNORECASE):
match = re.match(r'^\$count\((.*)\)$', expr, re.IGNORECASE)
if match:
lst = resolve_value(match.group(1).strip(), context)
return len(lst) if isinstance(lst, list) else 0
# $agg(list, operation, field?)
if re.match(r'^\$agg\(', expr, re.IGNORECASE):
match = re.match(r'^\$agg\(([^,]+),\s*[\'"]([^\'\"]+)[\'"](?:,\s*[\'"]?([^\'\")]+)[\'"]?)?\)$', expr)
if match:
list_expr = match.group(1).strip()
operation = match.group(2).strip()
field_expr = match.group(3).strip() if match.group(3) else None
target_list = resolve_value(list_expr, context)
if not isinstance(target_list, list):
return 0
if operation == 'count':
return len(target_list)
if operation == 'sum':
if not field_expr:
return sum(to_number(item) for item in target_list)
total = 0
for item in target_list:
value = item.get(field_expr) if isinstance(item, dict) else getattr(item, field_expr, 0)
total += to_number(value)
return total
if operation in ['min', 'max', 'avg']:
values = []
for item in target_list:
if field_expr:
value = item.get(field_expr) if isinstance(item, dict) else getattr(item, field_expr, 0)
else:
value = item
values.append(to_number(value))
if not values:
return 0
if operation == 'min':
return min(values)
elif operation == 'max':
return max(values)
elif operation == 'avg':
return sum(values) / len(values)
# =============================================
# 7. LOOKUP FUNCTIONS
# =============================================
# $vlookup(lookup_value, model_name, lookup_field, return_field)
if re.match(r'^\$vlookup\(', expr, re.IGNORECASE):
match = re.match(r'^\$vlookup\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) == 4:
lookup_value = resolve_value(args[0], context)
model_name = resolve_value(args[1], context).strip('\'"')
lookup_field = resolve_value(args[2], context).strip('\'"')
return_field = resolve_value(args[3], context).strip('\'"')
try:
Model = apps.get_model('app', model_name)
obj = Model.objects.filter(**{lookup_field: lookup_value}).first()
if obj:
return getattr(obj, return_field, None)
except:
pass
# $lookup(model_name, field, value)
if re.match(r'^\$lookup\(', expr, re.IGNORECASE):
match = re.match(r'^\$lookup\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) == 3:
model_name = resolve_value(args[0], context).strip('\'"')
field = resolve_value(args[1], context).strip('\'"')
value = resolve_value(args[2], context)
try:
Model = apps.get_model('app', model_name)
return Model.objects.filter(**{field: value}).first()
except:
pass
# $get(dict_or_object, key, default?)
if re.match(r'^\$get\(', expr, re.IGNORECASE):
match = re.match(r'^\$get\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) >= 2:
obj = resolve_value(args[0], context)
key = resolve_value(args[1], context)
default = resolve_value(args[2], context) if len(args) > 2 else None
if isinstance(obj, dict):
return obj.get(key, default)
else:
return getattr(obj, key, default)
# =============================================
# 8. COMPARISON OPERATORS
# =============================================
# $eq, $ne, $gt, $gte, $lt, $lte, $in, $contains
comparisons = {
'eq': lambda a, b: a == b,
'ne': lambda a, b: a != b,
'gt': lambda a, b: a > b,
'gte': lambda a, b: a >= b,
'lt': lambda a, b: a < b,
'lte': lambda a, b: a <= b,
'in': lambda a, b: a in b,
'contains': lambda a, b: b in a,
}
for op_name, op_func in comparisons.items():
pattern = rf'^\${op_name}\('
if re.match(pattern, expr, re.IGNORECASE):
match = re.match(rf'^\${op_name}\((.*)\)$', expr, re.IGNORECASE)
if match:
args = split_args(match.group(1))
if len(args) == 2:
a = resolve_value(args[0], context)
b = resolve_value(args[1], context)
return op_func(a, b)
# =============================================
# 9. HELPER: Get context value (dotted path)
# =============================================
def get_context_value(key_path):
if not key_path:
return None
# Check if numeric literal
if re.match(r"^-?\d+(\.\d+)?$", key_path):
return float(key_path)
# Simple key
if "." not in key_path:
val = context.get(key_path)
if isinstance(val, Decimal):
return float(val)
return val
# Dotted path
root, *rest = key_path.split(".")
val = context.get(root)
for r in rest:
if val is None:
return None
# Array notation: field[0]
array_match = re.match(r"(\w+)\[(\d+)\]", r)
if array_match:
attr_name = array_match.group(1)
index = int(array_match.group(2))
val = getattr(val, attr_name, None) if not isinstance(val, dict) else val.get(attr_name)
try:
val = val[index]
except:
return None
else:
if isinstance(val, dict):
val = val.get(r)
else:
val = getattr(val, r, None)
# Auto-fetch first() for QuerySet
if hasattr(val, 'all') and not isinstance(val, models.Model):
val = val.first()
if isinstance(val, Decimal):
return float(val)
return val
# =============================================
# 10. TEMPLATE STRING PROCESSING
# =============================================
pattern = re.compile(r"\{(\w+(\.\w+)*)\}")
if pattern.search(expr):
single_match = pattern.fullmatch(expr)
if single_match:
return get_context_value(single_match.group(1))
def replace_match(match):
val = get_context_value(match.group(1))
return str(val) if val is not None else ""
return pattern.sub(replace_match, expr)
# =============================================
# 11. SUPPORT $last_result
# =============================================
if expr.startswith("$last_result"):
_, _, field = expr.partition(".")
last_res = context.get("last_result")
if not field:
return last_res
if last_res is None:
return None
return getattr(last_res, field, None) if not isinstance(last_res, dict) else last_res.get(field)
# =============================================
# 12. DOTTED PATH OR DIRECT CONTEXT KEY
# =============================================
if re.match(r"^-?\d+(\.\d+)?$", expr):
return float(expr)
if "." in expr or expr in context:
return get_context_value(expr)
return expr
# =============================================
# HELPER FUNCTIONS
# =============================================
def split_args(content):
"""
Split function arguments respecting nested parentheses and quotes.
Example: "a, $add(b, c), 'd'" -> ["a", "$add(b, c)", "'d'"]
"""
args = []
current = []
depth = 0
in_quote = None
for char in content:
if char in ('"', "'") and (not in_quote or in_quote == char):
in_quote = None if in_quote else char
current.append(char)
elif in_quote:
current.append(char)
elif char == '(':
depth += 1
current.append(char)
elif char == ')':
depth -= 1
current.append(char)
elif char == ',' and depth == 0:
args.append(''.join(current).strip())
current = []
else:
current.append(char)
if current:
args.append(''.join(current).strip())
return args
def to_number(value, default=0):
"""Convert value to number, return default if fails."""
if value is None or value == '':
return default
try:
return float(value)
except (ValueError, TypeError):
return default
def to_date(value):
"""Convert value to datetime object."""
#print(f"[DEBUG to_date] Input value: {value} (type: {type(value)})") # DEBUG
if isinstance(value, datetime):
#print(f"[DEBUG to_date] Output (datetime): {value}") # DEBUG
return value
if isinstance(value, date):
result = datetime.combine(value, datetime.min.time())
#print(f"[DEBUG to_date] Output (date -> datetime): {result}") # DEBUG
return result
if isinstance(value, str):
for fmt in ("%Y-%m-%d %H:%M:%S", "%Y-%m-%d", "%d/%m/%Y", "%Y-%m-%dT%H:%M:%S"):
try:
result = datetime.strptime(value.split('.')[0], fmt)
#print(f"[DEBUG to_date] Output (str -> datetime): {result}") # DEBUG
return result
except:
continue
#print(f"[DEBUG to_date] Output (None): None") # DEBUG
return None

17
prefect-ui.log Normal file
View File

@@ -0,0 +1,17 @@
___ ___ ___ ___ ___ ___ _____
| _ \ _ \ __| __| __/ __|_ _|
| _/ / _|| _|| _| (__ | |
|_| |_|_\___|_| |___\___| |_|
Configure Prefect to communicate with the server with:
prefect config set PREFECT_API_URL=http://127.0.0.1:4200/api
View the API reference documentation at http://127.0.0.1:4200/docs
Check out the dashboard at http://127.0.0.1:4200
Server stopped!

View File

@@ -20,5 +20,6 @@ num2words
mammoth
paramiko
channels
prefect
croniter
uvicorn[standard]

View File

@@ -1,5 +1,24 @@
#!/usr/bin/env bash
# Chạy Prefect UI (background, port 4200)
if ! lsof -i:4200 > /dev/null 2>&1; then
echo "Port 4200 trống → Khởi động Prefect server background..."
nohup prefect server start --host 127.0.0.1 --port 4200 > prefect-ui.log 2>&1 &
sleep 3 # chờ 3 giây để server khởi động ổn định
echo "Prefect UI đã khởi động (truy cập: http://localhost:4200)"
echo "Logs: tail -f prefect-ui.log"
else
echo "Port 4200 đã có Prefect server chạy rồi → bỏ qua"
fi
# ========================
# Chạy Django API (gunicorn + uvicorn)
# ========================
python3 envdev.py
sudo kill -9 $(lsof -i:8000 -t) 2> /dev/null
echo "Khởi động Gunicorn..."
gunicorn api.asgi:application \
-k uvicorn.workers.UvicornWorker \
-w 3 \