env REDIS_HOST; env REDIS_PORT; env REDIS_URL; env RESOLVER; env BACKEND_ENDPOINT_OVERRIDE; env OPENSSL_VERIFY; daemon off; master_process on; worker_processes 12; pcre_jit on; pid nginx.pid; error_log /dev/null warn; events { worker_connections 16192; multi_accept on; } http { sendfile on; sendfile_max_chunk 512k; tcp_nopush on; tcp_nodelay on; server_tokens off; proxy_cache_path /tmp/cache levels=1:2 keys_zone=apicast_cache:10m; # Enabling the Lua code cache is strongly encouraged for production use # Disabling it should only be done for testing and development purposes lua_code_cache on; lua_max_running_timers 2048; lua_socket_pool_size 512; server_names_hash_bucket_size 128; access_log off; lua_package_path "./src/?.lua;/opt/app-root/src/src/?.ljbc;/opt/app-root/src/src/?.lua;/opt/app-root/src/src/?/init.ljbc;/opt/app-root/src/src/?/init.lua;/usr/local/openresty/site/lualib/?.ljbc;/usr/local/openresty/site/lualib/?/init.ljbc;/usr/local/openresty/lualib/?.ljbc;/usr/local/openresty/lualib/?/init.ljbc;/usr/local/openresty/site/lualib/?.lua;/usr/local/openresty/site/lualib/?/init.lua;/usr/local/openresty/lualib/?.lua;/usr/local/openresty/lualib/?/init.lua;/opt/app-root/src/src/?.lua;/usr/lib64/lua/5.1/?.lua;/usr/local/share/lua/5.1/?.lua;/usr/local/share/lua/5.1/*/?.lua;/usr/local/openresty/lualib/?.lua;;"; lua_package_cpath "/opt/app-root/src/src/?.so;/usr/local/openresty/site/lualib/?.so;/usr/local/openresty/lualib/?.so;/opt/app-root/src/lib/?.so;./?.so;/usr/lib64/lua/5.1/?.so;/usr/lib64/lua/5.1/loadall.so;/usr/local/lib64/lua/5.1/?.so;"; ignore_invalid_headers off; resolver 10.0.2.3:53 192.168.100.1:53 [fe80::1%2]:53; ## include /opt/app-root/src/http.d/core.conf client_max_body_size 0; ## end /opt/app-root/src/http.d/core.conf ## include /opt/app-root/src/http.d/init.conf init_by_lua_block { -- require("jit.v").start("dump.txt") -- require('jit.p').start('vl') -- require('jit.dump').start('bsx', 'jit.log') local log = require('resty.log.log') log:patch_ngx_log_on_debug() if os.getenv('CI') == 'true' then pcall(require, 'luacov.runner') end local luacov = package.loaded['luacov.runner'] if luacov then local pwd = os.getenv('PWD') .. package.config:sub(1, 1) local pid = require("ngx.process").get_master_pid if not pid then do local ffi = require("ffi") ffi.cdef[[int getpid(void);]] pid = ffi.C.getpid() end end local config = { } for _, option in ipairs({"statsfile", "reportfile"}) do -- properly expand current working dir, workaround for https://github.com/openresty/resty-cli/issues/35 config[option] = pwd .. luacov.defaults[option] luacov.defaults[option] = pwd end luacov.defaults.savestepsize = 3 jit.off() luacov.init() for option, value in pairs(config) do luacov.configuration[option] = value end end require("resty.core") local resty_env = require('resty.env') -- WARNING, WARNING, WARNING: this is insane hack and should not be touched -- This file is not templated by Liquid, because it is used by not blackbox integration tests (Search for TEST_NGINX_HTTP_CONFIG). -- So it cannot be templated by liquid, to lift env variables and persist them in the configuration. -- This is a workaround so we can store environment at the time of building the config into the config itself. -- {{ ENV }} is both valid Liquid template and Lua code. -- In Lua it is a table with another empty table inside. In Liquid it prints variable ENV. -- This ENV variable is defined in the main nginx.conf.liquid and injected when including this partial. -- The content of the ENV variable is a Lua table, so when rendered it actually can run ipairs on it. for k,v in pairs({ [ [[APICAST_POLICY_LOAD_PATH]] ] = [[/opt/app-root/src/policies]], [ [[APICAST_CONFIGURATION_LOADER]] ] = [[lazy]], [ [[THREESCALE_PORTAL_ENDPOINT]] ] = [[http://apicastsecret@10.0.2.2:3008/master/api/proxy/configs]], [ [[THREESCALE_DEPLOYMENT_ENV]] ] = [[staging]], [ [[APICAST_LOADED_ENVIRONMENTS]] ] = [[/opt/app-root/src/config/staging.lua]], [ [[APICAST_DIR]] ] = [[/opt/app-root/src]], [ [[APICAST_BUILTIN_POLICY_LOAD_PATH]] ] = [[/opt/app-root/src/src/apicast/policy]], }) do if type(k) == 'string' and not resty_env.value(k) then resty_env.set(k,v) end end require('resty.resolver').init() require('apicast.loader') local env = require('apicast.cli.environment').load() local context = env:context() for k,v in pairs(context.env or {}) do resty_env.set(k,v) end local module = require('apicast.executor') if not module then ngx.log(ngx.EMERG, 'fatal error when loading the root module') os.exit(1) end if context.policy_chain then module = module.new(context.policy_chain) package.loaded['apicast.executor'] = module end module:init() collectgarbage("collect") } init_worker_by_lua_block { local luacov = package.loaded['luacov.runner'] if luacov then luacov.configuration.statsfile = luacov.defaults.statsfile .. 'luacov_stats.' .. ngx.worker.pid() .. '.out' luacov.resume() ngx.timer.every(100, function(premature) if premature then luacov.save_stats() luacov.tick = true end end) end require('apicast.executor'):init_worker() } lua_shared_dict init 256k; ## end /opt/app-root/src/http.d/init.conf ## include /opt/app-root/src/http.d/shdict.conf lua_shared_dict api_keys 30m; lua_shared_dict rate_limit_headers 20m; lua_shared_dict configuration 10m; lua_shared_dict locks 1m; ## end /opt/app-root/src/http.d/shdict.conf ## include /opt/app-root/src/http.d/upstream.conf upstream upstream { server 0.0.0.1:1; balancer_by_lua_block { require('apicast.executor'):balancer() } keepalive 1024; keepalive_pool $upstream_keepalive_key; } upstream http_client { server 0.0.0.1:1; balancer_by_lua_block { require('resty.http_ng.backend.ngx'):balancer() } keepalive 1024; } ## end /opt/app-root/src/http.d/upstream.conf ## include /opt/app-root/src/http.d/lua_capture_error_log.conf # To be able to use the ngx.errlog methods that we call from the Metrics policy # Ref: https://github.com/openresty/lua-nginx-module#lua_capture_error_log lua_capture_error_log 4k; ## end /opt/app-root/src/http.d/lua_capture_error_log.conf ## include /opt/app-root/src/http.d/ssl.conf ## Customize this file to set up proper ssl validation. ## Openresty/Nginx can't use system certificates: ## https://groups.google.com/forum/#!topic/openresty-en/SuqORBK9ys0 ## So you have to point it to some ca-bundle which makes ## it really hard to have working cross platform configuration. # # https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set_verify.html # The default depth limit is 100, allowing for the peer certificate, # at most 100 intermediate CA certificates and a final trust anchor certificate. lua_ssl_verify_depth 100; lua_ssl_trusted_certificate "/opt/app-root/src/conf/ca-bundle.crt"; proxy_ssl_server_name on; proxy_ssl_name $http_host; proxy_ssl_verify_depth 100; proxy_ssl_trusted_certificate "/opt/app-root/src/conf/ca-bundle.crt"; ## end /opt/app-root/src/http.d/ssl.conf log_format time '[$time_local] $target_host:$server_port $remote_addr:$remote_port "$request" $status $body_bytes_sent ($request_time) $post_action_impact'; # Use maps as variables because some logs can be raised out of the server context # where variables cannot be set, this allow us to avoid a warning map "" $extended_access_log { default ''; } map "" $access_logs_enabled { default '1'; } map "" $extended_access_logs_enabled { default '0'; } map "" $post_action_impact { default ''; } map "" $target_host { default '$host'; } log_format extended escape=none '$extended_access_log'; server { listen 8090; server_name management _; location / { content_by_lua_block { require('apicast.management').call() } } } server { listen 8081; server_name backend; location /transactions/authrep.xml { access_by_lua_block { local delay = tonumber(ngx.var.arg_delay) or 0 if delay > 0 then ngx.sleep(delay) end } echo "transactions authrep!"; } } upstream echo { server 127.0.0.1:8081; keepalive 1024; } server { listen 8081 default_server; server_name echo _; location / { log_by_lua_block { if ngx.config.debug then ngx.log(ngx.DEBUG, "[echo]:\n", ngx.req.raw_header()) end } echo_foreach_split '\r\n' $echo_client_request_headers; echo $echo_it; echo_end; echo_read_request_body; if ($http_content_length) { echo_after_body "\n$echo_request_body"; } access_by_lua_block { local delay = tonumber(ngx.var.arg_delay) or 0 if delay > 0 then ngx.sleep(delay) end } } location /config/ { echo "{}"; } } server { access_log /dev/stdout time if=$access_logs_enabled ; access_log /dev/stdout extended if=$extended_access_logs_enabled ; listen 8080 ; server_name _; ## Customize this file to set up proper ssl validation. ## Openresty/Nginx can't use system certificates: ## https://groups.google.com/forum/#!topic/openresty-en/SuqORBK9ys0 ## So you have to point it to some ca-bundle which makes ## it really hard to have working cross platform configuration. # # https://www.openssl.org/docs/manmaster/man3/SSL_CTX_set_verify.html # The default depth limit is 100, allowing for the peer certificate, # at most 100 intermediate CA certificates and a final trust anchor certificate. lua_ssl_verify_depth 100; lua_ssl_trusted_certificate "/opt/app-root/src/conf/ca-bundle.crt"; proxy_ssl_server_name on; proxy_ssl_name $http_host; proxy_ssl_verify_depth 100; proxy_ssl_trusted_certificate "/opt/app-root/src/conf/ca-bundle.crt"; proxy_ssl_verify off; set_by_lua $user_agent 'return require("apicast.user_agent")()'; set_by_lua_block $deployment { return require('apicast.user_agent').deployment() } # TODO: enable in the future when we support SSL # ssl_certificate_by_lua_block { require('apicast.executor').call() } # ssl_session_fetch_by_lua_block { require('apicast.executor').call() } # ssl_session_store_by_lua_block { require('apicast.executor').call() } location = /___http_call { internal; set $url ''; set $proxy_pass ''; set $host_header ''; set $connection_header 'close'; set $options ''; set $grant_type ''; proxy_pass $proxy_pass; proxy_pass_request_headers off; proxy_pass_request_body on; proxy_ssl_name $host_header; proxy_http_version 1.1; proxy_set_header Host $host_header; proxy_set_header Connection $connection_header; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-3scale-User-Agent $deployment; proxy_set_header X-3scale-Version $version; proxy_set_header User-Agent $user_agent; proxy_set_header X-3scale-OAuth2-Grant-Type $grant_type; proxy_set_header 3scale-options $options; # Hack for having a valid openresty config and valid liquid templating # # # # # # proxy_set_header uber-trace-id $http_uber_trace_id; # # rewrite_by_lua_block { require('resty.http_ng.backend.ngx'):resolver() } } location @out_of_band_authrep_action { internal; proxy_pass_request_headers off; set_by_lua $original_request_time 'return ngx.var.request_time'; content_by_lua_block { require('resty.ctx').apply() require('apicast.executor'):post_action() } log_by_lua_block { ngx.var.post_action_impact = ngx.var.request_time - ngx.var.original_request_time require('apicast.executor'):log() } } location @upstream { internal; rewrite_by_lua_block { require('resty.ctx').apply() } # # proxy_cache $cache_zone; proxy_cache_key $scheme$request_method$proxy_host$request_uri$service_id; proxy_no_cache $cache_request; proxy_cache_valid 200 302 1m; # # proxy_pass $proxy_pass; proxy_http_version 1.1; proxy_set_header X-Real-IP $remote_addr; proxy_set_header Host $http_host; proxy_set_header X-3scale-proxy-secret-token $secret_token; proxy_set_header X-3scale-debug ""; proxy_set_header Connection $upstream_connection_header; proxy_set_header Upgrade $upstream_upgrade_header; # This is a bit tricky. It uses liquid to set a SSL client certificate. In # NGINX, all this is not executed as it is commented with '#'. However, in # Liquid, all this will be evaluated. As a result, the following directives # are set optionally: proxy_ssl_certificate, proxy_ssl_certificate_key, # proxy_ssl_session_reuse, and proxy_ssl_password_file. # # When 'upstream_retry_cases' is empty, apply the same default as NGINX. # If the proxy_next_upstream directive is not declared, the retry policy # will never retry. # # proxy_next_upstream error timeout; # # these are duplicated so when request is redirected here those phases are executed post_action @out_of_band_authrep_action; body_filter_by_lua_block { require('apicast.executor'):body_filter() } header_filter_by_lua_block { require('apicast.executor'):header_filter() } } location / { set $cached_key ''; set $credentials ''; set $usage ''; set $service_id ''; set $proxy_pass ''; set $secret_token ''; set $backend_host 'backend'; set $backend_authentication_type ''; set $backend_authentication_value ''; set $version ''; set $real_url ''; set $ctx_ref -1; set $original_request_id $request_id; set $original_request_uri '$scheme://$host$request_uri'; # Variables needed by Websocket policy set $upstream_connection_header ''; set $upstream_upgrade_header $http_upgrade; # Variable to enable/disable content cache set $cache_request 'true'; set $cache_zone 'off'; set $target_host $host; set $upstream_keepalive_key ""; # proxy_ignore_client_abort on; rewrite_by_lua_block { require('resty.ctx').stash() require('apicast.executor'):rewrite() } access_by_lua_block { require('apicast.executor'):access() } content_by_lua_block { require('apicast.executor'):content() } # these are duplicated so those phases are executed when no internal redirect happens post_action @out_of_band_authrep_action; body_filter_by_lua_block { require('apicast.executor'):body_filter() } header_filter_by_lua_block { require('apicast.executor'):header_filter() } include ../apicast.d/location.d/*.conf; } # This exposes a health check for kubernetes environments # This is so the healtcheck on our Google HTTP load balancers works location /_threescale/healthz { return 200; access_log off; } location @grpc_upstream { internal; rewrite_by_lua_block { require('resty.ctx').apply() } grpc_pass grpcs://upstream; grpc_set_header X-Real-IP $remote_addr; grpc_set_header Host $http_host; grpc_set_header X-3scale-grpc-secret-token $secret_token; grpc_set_header X-3scale-debug ""; grpc_set_header Connection ""; # This is a bit tricky. It uses liquid to set a SSL client certificate. In # NGINX, all this is not executed as it is commented with '#'. However, in # Liquid, all this will be evaluated. As a result, the following directives # are set optionally: grpc_ssl_certificate, grpc_ssl_certificate_key, # grpc_ssl_session_reuse, and grpc_ssl_password_file. # # When 'upstream_retry_cases' is empty, apply the same default as NGINX. # If the proxy_next_upstream directive is not declared, the retry policy # will never retry. # # grpc_next_upstream error timeout; # # these are duplicated so when request is redirected here those phases are executed post_action @out_of_band_authrep_action; body_filter_by_lua_block { require('apicast.executor'):body_filter() } header_filter_by_lua_block { require('apicast.executor'):header_filter() } } } lua_shared_dict prometheus_metrics 16M; server { access_log off; listen 9421; server_name metrics prometheus _; location /metrics { content_by_lua_block { require('apicast.executor'):metrics() } } location /nginx_status { internal; stub_status; } } lua_shared_dict limiter 1m; # This shared dictionaries are only used in the 3scale batcher policy. # This is not ideal, but they'll need to be here until we allow policies to # modify this template. lua_shared_dict cached_auths 20m; lua_shared_dict batched_reports 20m; lua_shared_dict batched_reports_locks 1m; }