diff --git a/.clangd b/.clangd new file mode 100644 index 000000000..cd9c5e1db --- /dev/null +++ b/.clangd @@ -0,0 +1,9 @@ +CompileFlags: + Add: + - "-std=c++17" + - "-I../ext" + - "-I../ext/prometheus-cpp-lite-1.0/core/include" + - "-I../ext/prometheus-cpp-lite-1.0/simpleapi/include" + - "-I./ext" + - "-I./ext/prometheus-cpp-lite-1.0/core/include" + - "-I./ext/prometheus-cpp-lite-1.0/simpleapi/include" diff --git a/.drone.yml b/.drone.yml deleted file mode 100644 index 955fbbdea..000000000 --- a/.drone.yml +++ /dev/null @@ -1,267 +0,0 @@ ---- -kind: pipeline -type: docker -name: build 386 - -clone: - depth: 1 - -steps: - - name: build 386 - image: registry.sean.farm/honda-builder - commands: - - ./ci/scripts/build.sh linux 386 $${DRONE_COMMIT_SHA} - - name: notify-mattermost - pull: always - image: registry.sean.farm/mattermost-notify - environment: - token: - from_secret: mattermost-token - host: - from_secret: mattermost-host - channel: - from_secret: mattermost-channel - maxRetry: 3 - when: - status: - - failure - - success - -image_pull_secrets: - - dockerconfigjson - ---- -kind: pipeline -type: docker -name: build amd64 - -clone: - depth: 1 - -steps: - - name: build amd64 - image: registry.sean.farm/honda-builder - commands: - - ./ci/scripts/build.sh linux amd64 $${DRONE_COMMIT_SHA} - - name: notify-mattermost - pull: always - image: registry.sean.farm/mattermost-notify - environment: - token: - from_secret: mattermost-token - host: - from_secret: mattermost-host - channel: - from_secret: mattermost-channel - maxRetry: 3 - when: - status: - - failure - - success - -image_pull_secrets: - - dockerconfigjson - ---- -kind: pipeline -type: docker -name: build arm64 - -clone: - depth: 1 - -steps: - - name: build arm64 - image: registry.sean.farm/honda-builder - commands: - - ./ci/scripts/build.sh linux arm64 $${DRONE_COMMIT_SHA} - - name: notify-mattermost - pull: always - image: registry.sean.farm/mattermost-notify - environment: - token: - from_secret: mattermost-token - host: - from_secret: mattermost-host - channel: - from_secret: mattermost-channel - maxRetry: 3 - when: - status: - - failure - - success - -platform: - os: linux - arch: arm64 - -image_pull_secrets: - - dockerconfigjson - ---- -kind: pipeline -type: docker -name: build armv7 - -platform: - os: linux - arch: arm64 - -clone: - depth: 1 - -steps: - - name: build armv7 - image: registry.sean.farm/honda-builder - commands: - - ./ci/scripts/build.sh linux armv7 $${DRONE_COMMIT_SHA} - - name: notify-mattermost - pull: always - image: registry.sean.farm/mattermost-notify - environment: - token: - from_secret: mattermost-token - host: - from_secret: mattermost-host - channel: - from_secret: mattermost-channel - maxRetry: 3 - when: - status: - - failure - - success - -platform: - os: linux - arch: arm64 - -image_pull_secrets: - - dockerconfigjson - ---- -kind: pipeline -type: docker -name: build riscv64 - -clone: - depth: 1 - -steps: - - name: build riscv64 - image: registry.sean.farm/honda-builder - commands: - - ./ci/scripts/build.sh linux riscv64 $${DRONE_COMMIT_SHA} - - name: notify-mattermost - pull: always - image: registry.sean.farm/mattermost-notify - environment: - token: - from_secret: mattermost-token - host: - from_secret: mattermost-host - channel: - from_secret: mattermost-channel - maxRetry: 3 - when: - status: - - failure - - success - -image_pull_secrets: - - dockerconfigjson - ---- -kind: pipeline -type: docker -name: build mips64le - -clone: - depth: 1 - -steps: - - name: build mips64le - image: registry.sean.farm/honda-builder - commands: - - ./ci/scripts/build.sh linux mips64le $${DRONE_COMMIT_SHA} - - name: notify-mattermost - pull: always - image: registry.sean.farm/mattermost-notify - environment: - token: - from_secret: mattermost-token - host: - from_secret: mattermost-host - channel: - from_secret: mattermost-channel - maxRetry: 3 - when: - status: - - failure - - success - -image_pull_secrets: - - dockerconfigjson - ---- -kind: pipeline -type: docker -name: build ppc64le - -clone: - depth: 1 - -steps: - - name: build ppc64le - image: registry.sean.farm/honda-builder - commands: - - ./ci/scripts/build.sh linux ppc64le $${DRONE_COMMIT_SHA} - - name: notify-mattermost - pull: always - image: registry.sean.farm/mattermost-notify - environment: - token: - from_secret: mattermost-token - host: - from_secret: mattermost-host - channel: - from_secret: mattermost-channel - maxRetry: 3 - when: - status: - - failure - - success - -image_pull_secrets: - - dockerconfigjson - ---- -kind: pipeline -type: docker -name: build s390x - -clone: - depth: 1 - -steps: - - name: build s390x - image: registry.sean.farm/honda-builder - commands: - - ./ci/scripts/build.sh linux s390x $${DRONE_COMMIT_SHA} - - name: notify-mattermost - pull: always - image: registry.sean.farm/mattermost-notify - environment: - token: - from_secret: mattermost-token - host: - from_secret: mattermost-host - channel: - from_secret: mattermost-channel - maxRetry: 3 - when: - status: - - failure - - success - -image_pull_secrets: - - dockerconfigjson diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..fee6dd338 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,126 @@ +on: + pull_request: + push: + workflow_dispatch: + +jobs: + build_ubuntu: + runs-on: ubuntu-latest + steps: + - name: gitconfig + run: | + git config --global core.autocrlf input + # git config --global core.eol lf + - name: checkout + uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + targets: x86_64-unknown-linux-gnu + components: rustfmt, clippy + + - name: Set up cargo cache + uses: Swatinem/rust-cache@v2 + continue-on-error: false + with: + key: ${{ runner.os }}-cargo-${{ hashFiles('rustybits//Cargo.lock') }} + shared-key: ${{ runner.os }}-cargo- + workspaces: | + rustybits/ + + - name: make + run: make + - name: selftest + run: | + make selftest + ./zerotier-selftest + - name: 'Tar files' # keeps permissions (execute) + run: tar -cvf zerotier-one.tar zerotier-one + - name: Archive production artifacts + uses: actions/upload-artifact@v4 + with: + name: zerotier-one-ubuntu-x64 + path: zerotier-one.tar + retention-days: 7 + + build_macos: + runs-on: macos-latest + steps: + - name: gitconfig + run: | + git config --global core.autocrlf input + # git config --global core.eol lf + - name: checkout + uses: actions/checkout@v4 + - name: Install Rust aarch64 + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + target: aarch64-apple-darwin + components: rustfmt, clippy + - name: Install Rust x86_64 + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + target: x86_64-apple-darwin + components: rustfmt, clippy + - name: Set up cargo cache + uses: Swatinem/rust-cache@v2 + continue-on-error: false + with: + key: ${{ runner.os }}-cargo-${{ hashFiles('rustybits//Cargo.lock') }} + shared-key: ${{ runner.os }}-cargo- + workspaces: | + rustybits/ + - name: make + run: make + - name: selftest + run: | + make selftest + ./zerotier-selftest + - name: 'Tar files' # keeps permissions (execute) + run: tar -cvf zerotier-one.tar zerotier-one + - name: Archive production artifacts + uses: actions/upload-artifact@v4 + with: + name: zerotier-one-mac + path: zerotier-one.tar + retention-days: 7 + + + build_windows: + runs-on: windows-latest + steps: + - name: gitconfig + run: | + git config --global core.autocrlf true + # git config --global core.eol lf + - name: checkout + uses: actions/checkout@v4 + - name: Install Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: stable + target: aarch64-apple-darwin + components: rustfmt, clippy + - name: Set up cargo cache + uses: Swatinem/rust-cache@v2 + continue-on-error: false + with: + key: ${{ runner.os }}-cargo-${{ hashFiles('rustybits//Cargo.lock') }} + shared-key: ${{ runner.os }}-cargo- + workspaces: | + rustybits/ + + - name: setup msbuild + uses: microsoft/setup-msbuild@v2 + - name: msbuild + run: | + msbuild windows\ZeroTierOne.sln /m /p:Configuration=Release /property:Platform=x64 /t:ZeroTierOne + - name: Archive production artifacts + uses: actions/upload-artifact@v4 + with: + name: zerotier-one-windows + path: windows/Build + retention-days: 7 diff --git a/.github/workflows/validate-linux.sh b/.github/workflows/validate-linux.sh new file mode 100755 index 000000000..a661f6f6c --- /dev/null +++ b/.github/workflows/validate-linux.sh @@ -0,0 +1,497 @@ +#!/bin/bash + +# This test script joins Earth and pokes some stuff + +TEST_NETWORK=8056c2e21c000001 +RUN_LENGTH=30 +TEST_FINISHED=false +ZTO_VER=$(git describe --tags $(git rev-list --tags --max-count=1)) +ZTO_COMMIT=$(git rev-parse HEAD) +ZTO_COMMIT_SHORT=$(git rev-parse --short HEAD) +TEST_DIR_PREFIX="$ZTO_VER-$ZTO_COMMIT_SHORT-test-results" + +TEST_OK=0 +TEST_FAIL=1 + +echo "Performing test on: $ZTO_VER-$ZTO_COMMIT_SHORT" +TEST_FILEPATH_PREFIX="$TEST_DIR_PREFIX/$ZTO_COMMIT_SHORT" +mkdir $TEST_DIR_PREFIX + +# How long we will wait for ZT to come online before considering it a failure +MAX_WAIT_SECS=30 + +ZT_PORT_NODE_1=9996 +ZT_PORT_NODE_2=9997 + +################################################################################ +# Multi-node connectivity and performance test # +################################################################################ + +test() { + + echo -e "\nPerforming pre-flight checks" + + check_exit_on_invalid_identity + + echo -e "\nRunning test for $RUN_LENGTH seconds" + + export NS1="ip netns exec ns1" + export NS2="ip netns exec ns2" + + export ZT1="$NS1 ./zerotier-cli -p9996 -D$(pwd)/node1" + # Specify custom port on one node to ensure that feature works + export ZT2="$NS2 ./zerotier-cli -p9997 -D$(pwd)/node2" + + echo -e "\nSetting up network namespaces..." + echo "Setting up ns1" + + ip netns add ns1 + $NS1 ip link set dev lo up + ip link add veth0 type veth peer name veth1 + ip link set veth1 netns ns1 + ip addr add 192.168.0.1/24 dev veth0 + ip link set dev veth0 up + + $NS1 ip addr add 192.168.0.2/24 dev veth1 + $NS1 ip link set dev veth1 up + + # Add default route + $NS1 ip route add default via 192.168.0.1 + + iptables -t nat -A POSTROUTING -s 192.168.0.0/255.255.255.0 \ + -o eth0 -j MASQUERADE + iptables -A FORWARD -i eth0 -o veth0 -j ACCEPT + iptables -A FORWARD -o eth0 -i veth0 -j ACCEPT + + echo "Setting up ns2" + ip netns add ns2 + $NS2 ip link set dev lo up + ip link add veth2 type veth peer name veth3 + ip link set veth3 netns ns2 + ip addr add 192.168.1.1/24 dev veth2 + ip link set dev veth2 up + + $NS2 ip addr add 192.168.1.2/24 dev veth3 + $NS2 ip link set dev veth3 up + $NS2 ip route add default via 192.168.1.1 + + iptables -t nat -A POSTROUTING -s 192.168.1.0/255.255.255.0 \ + -o eth0 -j MASQUERADE + iptables -A FORWARD -i eth0 -o veth2 -j ACCEPT + iptables -A FORWARD -o eth0 -i veth2 -j ACCEPT + + # Allow forwarding + sysctl -w net.ipv4.ip_forward=1 + + ################################################################################ + # Memory Leak Check # + ################################################################################ + + export FILENAME_MEMORY_LOG="$TEST_FILEPATH_PREFIX-memory.log" + + echo -e "\nStarting a ZeroTier instance in each namespace..." + + export time_test_start=$(date +%s) + + # Spam the CLI as ZeroTier is starting + spam_cli 100 + + echo "Starting memory leak check" + $NS1 sudo valgrind --demangle=yes --exit-on-first-error=yes \ + --error-exitcode=1 \ + --xml=yes \ + --xml-file=$FILENAME_MEMORY_LOG \ + --leak-check=full \ + ./zerotier-one node1 -p$ZT_PORT_NODE_1 -U >>node_1.log 2>&1 & + + # Second instance, not run in memory profiler + # Don't set up internet access until _after_ zerotier is running + # This has been a source of stuckness in the past. + $NS2 ip addr del 192.168.1.2/24 dev veth3 + $NS2 sudo ./zerotier-one node2 -U -p$ZT_PORT_NODE_2 >>node_2.log 2>&1 & + + sleep 10; # New HTTP control plane is a bit sluggish, so we delay here + + check_bind_to_correct_ports $ZT_PORT_NODE_1 + check_bind_to_correct_ports $ZT_PORT_NODE_2 + + $NS2 ip addr add 192.168.1.2/24 dev veth3 + $NS2 ip route add default via 192.168.1.1 + + echo -e "\nPing from host to namespaces" + + ping -c 3 192.168.0.1 + ping -c 3 192.168.1.1 + + echo -e "\nPing from namespace to host" + + $NS1 ping -c 3 192.168.0.1 + $NS1 ping -c 3 192.168.0.1 + $NS2 ping -c 3 192.168.0.2 + $NS2 ping -c 3 192.168.0.2 + + echo -e "\nPing from ns1 to ns2" + + $NS1 ping -c 3 192.168.0.1 + + echo -e "\nPing from ns2 to ns1" + + $NS2 ping -c 3 192.168.0.1 + + ################################################################################ + # Online Check # + ################################################################################ + + echo "Waiting for ZeroTier to come online before attempting test..." + node1_online=false + node2_online=false + both_instances_online=false + time_zt_node1_start=$(date +%s) + time_zt_node2_start=$(date +%s) + + for ((s = 0; s <= $MAX_WAIT_SECS; s++)); do + node1_online="$($ZT1 -j info | jq '.online' 2>/dev/null)" + node2_online="$($ZT2 -j info | jq '.online' 2>/dev/null)" + echo "Checking for online status: try #$s, node1:$node1_online, node2:$node2_online" + if [[ "$node2_online" == "true" && "$node1_online" == "true" ]]; then + export both_instances_online=true + export time_to_both_nodes_online=$(date +%s) + break + fi + sleep 1 + done + + echo -e "\n\nContents of ZeroTier home paths:" + + ls -lga node1 + tree node1 + ls -lga node2 + tree node2 + + echo -e "\n\nRunning ZeroTier processes:" + echo -e "\nNode 1:\n" + $NS1 ps aux | grep zerotier-one + echo -e "\nNode 2:\n" + $NS2 ps aux | grep zerotier-one + + echo -e "\n\nStatus of each instance:" + + echo -e "\n\nNode 1:\n" + $ZT1 status + echo -e "\n\nNode 2:\n" + $ZT2 status + + if [[ "$both_instances_online" != "true" ]]; then + exit_test_and_generate_report $TEST_FAIL "one or more nodes failed to come online" + fi + + echo -e "\nJoining networks" + + $ZT1 join $TEST_NETWORK + $ZT2 join $TEST_NETWORK + + sleep 10 + + node1_ip4=$($ZT1 get $TEST_NETWORK ip4) + node2_ip4=$($ZT2 get $TEST_NETWORK ip4) + + echo "node1_ip4=$node1_ip4" + echo "node2_ip4=$node2_ip4" + + echo -e "\nPinging each node" + + PING12_FILENAME="$TEST_FILEPATH_PREFIX-ping-1-to-2.txt" + PING21_FILENAME="$TEST_FILEPATH_PREFIX-ping-2-to-1.txt" + + $NS1 ping -c 16 $node2_ip4 >$PING12_FILENAME + $NS2 ping -c 16 $node1_ip4 >$PING21_FILENAME + + ping_loss_percent_1_to_2=$(cat $PING12_FILENAME | + grep "packet loss" | awk '{print $6}' | sed 's/%//') + ping_loss_percent_2_to_1=$(cat $PING21_FILENAME | + grep "packet loss" | awk '{print $6}' | sed 's/%//') + + # Normalize loss value + export ping_loss_percent_1_to_2=$(echo "scale=2; $ping_loss_percent_1_to_2/100.0" | bc) + export ping_loss_percent_2_to_1=$(echo "scale=2; $ping_loss_percent_2_to_1/100.0" | bc) + + ################################################################################ + # CLI Check # + ################################################################################ + + echo "Testing basic CLI functionality..." + + spam_cli 10 + + $ZT1 join $TEST_NETWORK + + $ZT1 -h + $ZT1 -v + $ZT1 status + $ZT1 info + $ZT1 listnetworks + $ZT1 peers + $ZT1 listpeers + + $ZT1 -j status + $ZT1 -j info + $ZT1 -j listnetworks + $ZT1 -j peers + $ZT1 -j listpeers + + $ZT1 dump + + $ZT1 get $TEST_NETWORK allowDNS + $ZT1 get $TEST_NETWORK allowDefault + $ZT1 get $TEST_NETWORK allowGlobal + $ZT1 get $TEST_NETWORK allowManaged + $ZT1 get $TEST_NETWORK bridge + $ZT1 get $TEST_NETWORK broadcastEnabled + $ZT1 get $TEST_NETWORK dhcp + $ZT1 get $TEST_NETWORK id + $ZT1 get $TEST_NETWORK mac + $ZT1 get $TEST_NETWORK mtu + $ZT1 get $TEST_NETWORK name + $ZT1 get $TEST_NETWORK netconfRevision + $ZT1 get $TEST_NETWORK nwid + $ZT1 get $TEST_NETWORK portDeviceName + $ZT1 get $TEST_NETWORK portError + $ZT1 get $TEST_NETWORK status + $ZT1 get $TEST_NETWORK type + + # Test an invalid command + $ZT1 get $TEST_NETWORK derpderp + + # TODO: Validate JSON + + # Performance Test + + export FILENAME_PERF_JSON="$TEST_FILEPATH_PREFIX-iperf.json" + + echo -e "\nBeginning performance test:" + + echo -e "\nStarting server:" + + echo "$NS1 iperf3 -s &" + sleep 1 + + echo -e "\nStarting client:" + sleep 1 + + echo "$NS2 iperf3 --json -c $node1_ip4 > $FILENAME_PERF_JSON" + + cat $FILENAME_PERF_JSON + + # Let ZeroTier idle long enough for various timers + + echo -e "\nIdling ZeroTier for $RUN_LENGTH seconds..." + sleep $RUN_LENGTH + + echo -e "\nLeaving networks" + + $ZT1 leave $TEST_NETWORK + $ZT2 leave $TEST_NETWORK + + sleep 5 + + exit_test_and_generate_report $TEST_OK "completed test" +} + +################################################################################ +# Generate report # +################################################################################ + +exit_test_and_generate_report() { + + echo -e "\nStopping memory check..." + sudo pkill -15 -f valgrind + sleep 10 + + time_test_end=$(date +%s) + + echo "Exiting test with reason: $2 ($1)" + + # Collect ZeroTier dump files + + echo -e "\nCollecting ZeroTier dump files" + + node1_id=$($ZT1 -j status | jq -r .address) + node2_id=$($ZT2 -j status | jq -r .address) + + $ZT1 dump + mv zerotier_dump.txt "$TEST_FILEPATH_PREFIX-node-dump-$node1_id.txt" + + $ZT2 dump + mv zerotier_dump.txt "$TEST_FILEPATH_PREFIX-node-dump-$node2_id.txt" + + # Copy ZeroTier stdout/stderr logs + + cp node_1.log "$TEST_FILEPATH_PREFIX-node-log-$node1_id.txt" + cp node_2.log "$TEST_FILEPATH_PREFIX-node-log-$node2_id.txt" + + # Generate report + + cat $FILENAME_MEMORY_LOG + + DEFINITELY_LOST=$(xmlstarlet sel -t -v '/valgrindoutput/error/xwhat' \ + $FILENAME_MEMORY_LOG | grep "definitely" | awk '{print $1;}') + POSSIBLY_LOST=$(xmlstarlet sel -t -v '/valgrindoutput/error/xwhat' \ + $FILENAME_MEMORY_LOG | grep "possibly" | awk '{print $1;}') + + # Generate coverage report artifact and summary + + FILENAME_COVERAGE_JSON="$TEST_FILEPATH_PREFIX-coverage.json" + FILENAME_COVERAGE_HTML="$TEST_FILEPATH_PREFIX-coverage.html" + + echo -e "\nGenerating coverage test report..." + + gcovr -r . --exclude ext --json-summary $FILENAME_COVERAGE_JSON \ + --html >$FILENAME_COVERAGE_HTML + + cat $FILENAME_COVERAGE_JSON + + COVERAGE_LINE_COVERED=$(cat $FILENAME_COVERAGE_JSON | jq .line_covered) + COVERAGE_LINE_TOTAL=$(cat $FILENAME_COVERAGE_JSON | jq .line_total) + COVERAGE_LINE_PERCENT=$(cat $FILENAME_COVERAGE_JSON | jq .line_percent) + + COVERAGE_LINE_COVERED="${COVERAGE_LINE_COVERED:-0}" + COVERAGE_LINE_TOTAL="${COVERAGE_LINE_TOTAL:-0}" + COVERAGE_LINE_PERCENT="${COVERAGE_LINE_PERCENT:-0}" + + # Default values + + DEFINITELY_LOST="${DEFINITELY_LOST:-0}" + POSSIBLY_LOST="${POSSIBLY_LOST:-0}" + ping_loss_percent_1_to_2="${ping_loss_percent_1_to_2:-100.0}" + ping_loss_percent_2_to_1="${ping_loss_percent_2_to_1:-100.0}" + time_to_both_nodes_online="${time_to_both_nodes_online:--1}" + + # Summarize and emit json for trend reporting + + FILENAME_SUMMARY="$TEST_FILEPATH_PREFIX-summary.json" + + time_length_test=$((time_test_end - time_test_start)) + if [[ $time_to_both_nodes_online != -1 ]]; + then + time_to_both_nodes_online=$((time_to_both_nodes_online - time_test_start)) + fi + #time_length_zt_join=$((time_zt_join_end-time_zt_join_start)) + #time_length_zt_leave=$((time_zt_leave_end-time_zt_leave_start)) + #time_length_zt_can_still_ping=$((time_zt_can_still_ping-time_zt_leave_start)) + + summary=$( + cat <$FILENAME_SUMMARY + cat $FILENAME_SUMMARY + + exit 0 +} + +################################################################################ +# CLI Check # +################################################################################ + +spam_cli() { + echo "Spamming CLI..." + # Rapidly spam the CLI with joins/leaves + + MAX_TRIES="${1:-10}" + + for ((s = 0; s <= MAX_TRIES; s++)); do + $ZT1 status + $ZT2 status + sleep 0.1 + done + + SPAM_TRIES=128 + + for ((s = 0; s <= SPAM_TRIES; s++)); do + $ZT1 join $TEST_NETWORK + done + + for ((s = 0; s <= SPAM_TRIES; s++)); do + $ZT1 leave $TEST_NETWORK + done + + for ((s = 0; s <= SPAM_TRIES; s++)); do + $ZT1 leave $TEST_NETWORK + $ZT1 join $TEST_NETWORK + done +} + +################################################################################ +# Check for proper exit on load of invalid identity # +################################################################################ + +check_exit_on_invalid_identity() { + echo "Checking ZeroTier exits on invalid identity..." + mkdir -p $(pwd)/exit_test + ZT1="sudo ./zerotier-one -p9999 $(pwd)/exit_test" + echo "asdfasdfasdfasdf" > $(pwd)/exit_test/identity.secret + echo "asdfasdfasdfasdf" > $(pwd)/exit_test/authtoken.secret + + echo "Launch ZeroTier with an invalid identity" + $ZT1 & + my_pid=$! + + echo "Waiting 5 seconds" + sleep 5 + + # check if process is running + kill -0 $my_pid + if [ $? -eq 0 ]; then + exit_test_and_generate_report $TEST_FAIL "Exit test FAILED: Process still running after being fed an invalid identity" + fi +} + +################################################################################ +# Check that we're binding to the primary port for TCP/TCP6/UDP # +################################################################################ + +check_bind_to_correct_ports() { + PORT_NUMBER=$1 + echo "Checking bound ports:" + sudo netstat -anp | grep "$PORT_NUMBER" | grep "zerotier" + if [[ $(sudo netstat -anp | grep "$PORT_NUMBER" | grep "zerotier" | grep "tcp") ]]; + then + : + else + exit_test_and_generate_report $TEST_FAIL "ZeroTier did not bind to tcp/$1" + fi + if [[ $(sudo netstat -anp | grep "$PORT_NUMBER" | grep "zerotier" | grep "tcp6") ]]; + then + : + else + exit_test_and_generate_report $TEST_FAIL "ZeroTier did not bind to tcp6/$1" + fi + if [[ $(sudo netstat -anp | grep "$PORT_NUMBER" | grep "zerotier" | grep "udp") ]]; + then + : + else + exit_test_and_generate_report $TEST_FAIL "ZeroTier did not bind to udp/$1" + fi +} + +test "$@" diff --git a/.github/workflows/validate-report.sh b/.github/workflows/validate-report.sh new file mode 100755 index 000000000..3ae4e1a16 --- /dev/null +++ b/.github/workflows/validate-report.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +################################################################################ +# Set exit code depending on tool reports # +################################################################################ + +DEFINITELY_LOST=$(cat *test-results/*summary.json | jq .num_definite_bytes_lost) +EXIT_CODE=$(cat *test-results/*summary.json | jq .exit_code) +EXIT_REASON=$(cat *test-results/*summary.json | jq .exit_reason) + +cat *test-results/*summary.json + +echo -e "\nBytes of memory definitely lost: $DEFINITELY_LOST" + +if [[ "$DEFINITELY_LOST" -gt 0 ]]; then + exit 1 +fi + +# Catch-all for other non-zero exit codes + +if [[ "$EXIT_CODE" -gt 0 ]]; then + echo "Test failed: $EXIT_REASON" + exit 1 +fi \ No newline at end of file diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml new file mode 100644 index 000000000..bbc107660 --- /dev/null +++ b/.github/workflows/validate.yml @@ -0,0 +1,57 @@ +on: + pull_request: + push: + workflow_dispatch: + +jobs: + build_ubuntu: + runs-on: ubuntu-latest + steps: + - name: gitconfig + run: | + git config --global core.autocrlf input + + - name: checkout + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Install Rust + uses: actions-rs/toolchain@v1 + with: + toolchain: stable + target: x86_64-unknown-linux-gnu + override: true + components: rustfmt, clippy + + - name: Set up cargo cache + uses: Swatinem/rust-cache@v2 + continue-on-error: false + with: + key: ${{ runner.os }}-cargo-${{ hashFiles('zeroidc//Cargo.lock') }} + shared-key: ${{ runner.os }}-cargo- + workspaces: | + zeroidc/ + + - name: validate-1m-linux + env: + CC: 'gcc' + CXX: 'g++' + BRANCH: ${{ github.ref_name }} + run: | + sudo apt update && sudo apt install -y valgrind xmlstarlet gcovr iperf3 tree + make one ZT_COVERAGE=1 ZT_TRACE=1 + sudo chmod +x ./.github/workflows/validate-linux.sh + sudo ./.github/workflows/validate-linux.sh + + - name: Archive test results + uses: actions/upload-artifact@v4 + with: + name: ${{github.sha}}-test-results + path: "*test-results*" + + - name: final-report + run: | + sudo chmod +x ./.github/workflows/validate-report.sh + sudo ./.github/workflows/validate-report.sh + diff --git a/.gitignore b/.gitignore index 98b77490f..04a7d1eac 100755 --- a/.gitignore +++ b/.gitignore @@ -62,7 +62,6 @@ zt1-src.tar.gz *.opensdf *.user *.cache -*.obj *.tlog *.pid *.pkg @@ -107,7 +106,6 @@ windows/ZeroTierOne/Debug/ *.swp *~.nib DerivedData/ -build/ *.pbxuser *.mode1v3 *.mode2v3 @@ -118,7 +116,6 @@ build/ !default.perspectivev3 *.xccheckout xcuserdata/ -ext/librethinkdbxx/build .vscode __pycache__ *~ @@ -127,7 +124,7 @@ attic/world/mkworld workspace/ workspace2/ zeroidc/target/ -tmp/ +tcp-proxy/target #snapcraft specifics /parts/ @@ -140,4 +137,7 @@ tmp/ __pycache__ *.pyc *_source.tar.bz2 -snap/.snapcraft \ No newline at end of file +snap/.snapcraft +tcp-proxy/tcp-proxy +rustybits/target +ext/installfiles/windows/*.back*.aip diff --git a/AUTHORS.md b/AUTHORS.md deleted file mode 100644 index 84bb86316..000000000 --- a/AUTHORS.md +++ /dev/null @@ -1,75 +0,0 @@ -# Authors and Third Party Code Licensing Information - -## Primary Authors - - * ZeroTier Core and ZeroTier One virtual networking service
- Adam Ierymenko / adam.ierymenko@zerotier.com - Joseph Henry / joseph.henry@zerotier.com (QoS and multipath) - - * Java JNI Interface to enable Android application development, and Android app itself (code for that is elsewhere)
- Grant Limberg / glimberg@gmail.com - - * ZeroTier SDK (formerly known as Network Containers)
- Joseph Henry / joseph.henry@zerotier.com - -## Third Party Contributors - - * A number of fixes and improvements to the new controller, other stuff.
- Kees Bos / https://github.com/keesbos/ - - * Debugging and testing, OpenWRT support fixes.
- Moritz Warning / moritzwarning@web.de - - * Debian GNU/Linux packaging, manual pages, and license compliance edits.
- Ben Finney - - * Several others made smaller contributions, which GitHub tracks here:
- https://github.com/zerotier/ZeroTierOne/graphs/contributors/ - -## Third-Party Code - -ZeroTier includes the following third party code, either in ext/ or incorporated into the ZeroTier core. This third party code remains licensed under its original license and is not subject to ZeroTier's BSL license. - - * LZ4 compression algorithm by Yann Collet - - * Files: node/Packet.cpp (bundled within anonymous namespace) - * Home page: http://code.google.com/p/lz4/ - * License grant: BSD 2-clause - - * http-parser by Joyent, Inc. (many authors) - - * Files: ext/http-parser/* - * Home page: https://github.com/joyent/http-parser/ - * License grant: MIT/Expat - - * C++11 json (nlohmann/json) by Niels Lohmann - - * Files: ext/json/* - * Home page: https://github.com/nlohmann/json - * License grant: MIT - - * tap-windows6 by the OpenVPN project - - * Files: windows/TapDriver6/* - * Home page: https://github.com/OpenVPN/tap-windows6/ - * License grant: GNU GPL v2 - * ZeroTier Modifications: change name of driver to ZeroTier, add ioctl() to get L2 multicast memberships (source is in ext/ and modifications inherit GPL) - - * Salsa20 stream cipher, Curve25519 elliptic curve cipher, Ed25519 digital signature algorithm, and Poly1305 MAC algorithm, all by Daniel J. Bernstein - - * Files: node/Salsa20.* node/C25519.* node/Poly1305.* - * Home page: http://cr.yp.to/ - * License grant: public domain - * ZeroTier Modifications: slight cryptographically-irrelevant modifications for inclusion into ZeroTier core - - * MiniUPNPC and libnatpmp by Thomas Bernard - - * Files: ext/libnatpmp/* ext/miniupnpc/* - * Home page: http://miniupnp.free.fr/ - * License grant: BSD attribution no-endorsement - - * cpp-httplib by yhirose - - * Files: ext/cpp-httplib/* - * Home page: https://github.com/yhirose/cpp-httplib - * License grant: MIT diff --git a/COPYING b/COPYING index d07f35240..7f0801e20 100644 --- a/COPYING +++ b/COPYING @@ -1,7 +1,7 @@ ZeroTier One, an endpoint server for the ZeroTier virtual network layer. Copyright © 2011–2019 ZeroTier, Inc. -ZeroTier is released under the terms of the BSL version 1.1. See the +ZeroTier is released under the terms of the BUSL version 1.1. See the file LICENSE.txt for details. .. diff --git a/Dockerfile.release b/Dockerfile.release index a676226a3..2a289cead 100644 --- a/Dockerfile.release +++ b/Dockerfile.release @@ -1,22 +1,17 @@ # vim: ft=dockerfile -FROM debian:buster as stage +FROM debian:bookworm -ARG PACKAGE_BASEURL=https://download.zerotier.com/debian/buster/pool/main/z/zerotier-one/ -ARG ARCH=amd64 ARG VERSION -RUN apt-get update -qq && apt-get install curl -y -RUN curl -sSL -o zerotier-one.deb "${PACKAGE_BASEURL}/zerotier-one_${VERSION}_${ARCH}.deb" +RUN apt-get update -qq && apt-get install curl gpg -y +RUN mkdir -p /usr/share/zerotier && \ + curl -o /usr/share/zerotier/tmp.asc "https://download.zerotier.com/contact%40zerotier.com.gpg" && \ + gpg --no-default-keyring --keyring /usr/share/zerotier/zerotier.gpg --import /usr/share/zerotier/tmp.asc && \ + rm -f /usr/share/zerotier/tmp.asc && \ + echo "deb [signed-by=/usr/share/zerotier/zerotier.gpg] http://download.zerotier.com/debian/bookworm bookworm main" > /etc/apt/sources.list.d/zerotier.list -FROM debian:buster - -RUN apt-get update -qq && apt-get install openssl libssl1.1 -y - -COPY --from=stage zerotier-one.deb . - -RUN dpkg -i zerotier-one.deb && rm -f zerotier-one.deb -RUN echo "${VERSION}" >/etc/zerotier-version +RUN apt-get update -qq && apt-get install zerotier-one=${VERSION} curl iproute2 net-tools iputils-ping openssl libssl3 -y RUN rm -rf /var/lib/zerotier-one COPY entrypoint.sh.release /entrypoint.sh diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index 757729e33..000000000 --- a/Jenkinsfile +++ /dev/null @@ -1,365 +0,0 @@ -pipeline { - options { - disableConcurrentBuilds() - preserveStashes(buildCount: 10) - timestamps() - } - parameters { - booleanParam(name: "BUILD_ALL", defaultValue: false, description: "Build all supported platform/architecture combos. Defaults to x86/x64 only") - } - - agent none - - stages { - stage ("Build") { - steps { - script { - def tasks = [:] - tasks << buildStaticBinaries() - tasks << buildDebianNative() - tasks << buildCentosNative() - - parallel tasks - } - } - } - stage ("Package Static") { - steps { - script { - parallel packageStatic() - } - } - } - } -} - -def buildStaticBinaries() { - def tasks = [:] - def dist = ["alpine"] - def archs = [] - if (params.BUILD_ALL == true) { - archs = ["arm64", "amd64", "i386", "armhf", "armel", "ppc64le", "s390x"] - } else { - archs = ["amd64", "i386"] - } - - tasks << getTasks(dist, archs, { distro, platform -> - def myNode = { - node ('linux-build') { - dir ("build") { - checkout scm - } - sh "echo ${distro}-${platform}" - def runtime = docker.image("ztbuild/${distro}-${platform}:latest") - runtime.inside { - dir("build") { - sh 'make -j8 ZT_STATIC=1 all' - sh "file ./zerotier-one" - sh "mv zerotier-one zerotier-one-static-${platform}" - stash includes: 'zerotier-one-static-*', name: "static-${platform}" - } - cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true - } - } - } - return myNode - }) - - return tasks -} - -def getTasks(axisDistro, axisPlatform, task) { - def tasks = [:] - for(int i=0; i< axisDistro.size(); i++) { - def axisDistroValue = axisDistro[i] - for(int j=0; j< axisPlatform.size(); j++) { - def axisPlatformValue = axisPlatform[j] - tasks["${axisDistroValue}/${axisPlatformValue}"] = task(axisDistroValue, axisPlatformValue) - } - } - return tasks -} - -def packageStatic() { - def tasks = [:] - - def centos6 = ["centos6"] - def centos6Arch = ["i386", "amd64"] - tasks << getTasks(centos6, centos6Arch, { distro, arch -> - def myNode = { - node ('linux-build') { - dir ("build") { - checkout scm - } - def runtime = docker.image("ztbuild/${distro}-${arch}:latest") - runtime.inside { - dir("build") { - unstash "static-${arch}" - sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one" - sh "make redhat" - sh "mkdir -p ${distro}" - sh "cp -av `find ~/rpmbuild/ -type f -name \"*.rpm\"` ${distro}/" - archiveArtifacts artifacts: "${distro}/*.rpm", onlyIfSuccessful: true - } - } - cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true - } - } - return myNode - }) - - def centos7 = ["centos7"] - def centos7Arch = ["i386"] - tasks << getTasks(centos7, centos7Arch, { distro, arch -> - def myNode = { - node ('linux-build') { - dir ("build") { - checkout scm - } - def runtime = docker.image("ztbuild/${distro}-${arch}:latest") - runtime.inside { - dir("build") { - unstash "static-${arch}" - sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one" - sh "make redhat" - sh "mkdir -p ${distro}" - sh "cp -av `find ~/rpmbuild/ -type f -name \"*.rpm\"` ${distro}/" - archiveArtifacts artifacts: "${distro}/*.rpm", onlyIfSuccessful: true - } - } - cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true - } - } - return myNode - }) - - if (params.BUILD_ALL == true) { - def clefos7 = ["clefos"] - def clefos7Arch = ["s390x"] - tasks << getTasks(clefos7, clefos7Arch, { distro, arch -> - def myNode = { - node ('linux-build') { - dir ("build") { - checkout scm - } - def runtime = docker.image("ztbuild/${distro}-${arch}:latest") - runtime.inside { - dir("build/") { - unstash "static-${arch}" - sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one" - sh "make redhat" - sh "mkdir -p ${distro}" - sh "cp -av `find ~/rpmbuild/ -type f -name \"*.rpm\"` ${distro}/" - archiveArtifacts artifacts: "${distro}/*.rpm", onlyIfSuccessful: true - } - } - cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true - } - } - return myNode - }) - } - - def debianJessie = ["debian-jessie"] - def debianJessieArchs = [] - if (params.BUILD_ALL == true) { - debianJessieArch = ["armhf", "armel", "amd64", "i386"] - } else { - debianJessieArch = ["amd64", "i386"] - } - tasks << getTasks(debianJessie, debianJessieArch, { distro, arch -> - def myNode = { - node ('linux-build') { - dir ("build") { - checkout scm - } - def runtime = docker.image("ztbuild/${distro}-${arch}:latest") - runtime.inside { - sh "ls -la ." - dir('build/') { - sh "ls -la ." - unstash "static-${arch}" - sh "pwd" - sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one && file ./zerotier-one" - sh "mv -f debian/rules.static debian/rules" - sh "make debian" - } - sh "mkdir -p ${distro}" - sh "mv *.deb ${distro}" - archiveArtifacts artifacts: "${distro}/*.deb", onlyIfSuccessful: true - } - cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true - } - } - return myNode - }) - - def ubuntuTrusty = ["ubuntu-trusty"] - def ubuntuTrustyArch = [] - if (params.BUILD_ALL == true) { - ubuntuTrustyArch = ["i386", "amd64", "armhf", "arm64", "ppc64le"] - } else { - ubuntuTrustyArch = ["i386", "amd64"] - } - tasks << getTasks(ubuntuTrusty, ubuntuTrustyArch, { distro, arch -> - def myNode = { - node ('linux-build') { - dir ("build") { - checkout scm - } - def runtime = docker.image("ztbuild/${distro}-${arch}:latest") - runtime.inside { - sh "ls -la ." - dir('build/') { - sh "ls -la ." - unstash "static-${arch}" - sh "pwd" - sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one && file ./zerotier-one" - sh "mv -f debian/rules.static debian/rules" - sh "make debian" - } - sh "mkdir -p ${distro}" - sh "mv *.deb ${distro}" - archiveArtifacts artifacts: "${distro}/*.deb", onlyIfSuccessful: true - } - cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true - } - } - return myNode - }) - - def debianWheezy = ["debian-wheezy"] - def debianWheezyArchs = [] - if (params.BUILD_ALL == true) { - debianWheezyArchs = ["armhf", "armel", "amd64", "i386"] - } else { - debianWheezyArchs = ["amd64", "i386"] - } - tasks << getTasks(debianJessie, debianJessieArch, { distro, arch -> - def myNode = { - node ('linux-build') { - dir ("build") { - checkout scm - } - def runtime = docker.image("ztbuild/${distro}-${arch}:latest") - runtime.inside { - dir('build/') { - unstash "static-${arch}" - sh "mv zerotier-one-static-${arch} zerotier-one && chmod +x zerotier-one && file ./zerotier-one" - sh "mv -f debian/rules.wheezy.static debian/rules" - sh "mv -f debian/control.wheezy debian/control" - sh "make debian" - } - sh "mkdir -p ${distro}" - sh "mv *.deb ${distro}" - archiveArtifacts artifacts: "${distro}/*.deb", onlyIfSuccessful: true - } - cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true - } - } - return myNode - }) - - return tasks -} - -def buildDebianNative() { - def tasks = [:] - def buster = ["debian-buster", "debian-stretch", "debian-bullseye", "debian-sid"] - def busterArchs = [] - if (params.BUILD_ALL) { - busterArchs = ["s390x", "ppc64le", "i386", "armhf", "armel", "arm64", "amd64"] - } else { - busterArchs = ["amd64", "i386"] - } - - def build = { distro, arch -> - def myNode = { - node ('linux-build') { - dir ("build") { - checkout scm - } - def runtime = docker.image("ztbuild/${distro}-${arch}:latest") - runtime.inside { - dir("build") { - sh 'make debian' - } - sh "mkdir -p ${distro}" - sh "mv *.deb ${distro}" - archiveArtifacts artifacts: "${distro}/*.deb", onlyIfSuccessful: true - cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true - } - } - } - return myNode - } - - tasks << getTasks(buster, busterArchs, build) - - // bash is broken when running under QEMU-s390x on Xenial - def xenial = ["ubuntu-xenial"] - def xenialArchs = [] - if (params.BUILD_ALL == true) { - xenialArchs = ["i386", "amd64", "armhf", "arm64", "ppc64le"] - } else { - xenialArchs = ["i386", "amd64"] - } - tasks << getTasks(xenial, xenialArchs, build) - - def ubuntu = ["ubuntu-bionic", "ubuntu-eoan"] - def ubuntuArchs = [] - if (params.BUILD_ALL == true) { - ubuntuArchs = ["i386", "amd64", "armhf", "arm64", "ppc64le", "s390x"] - } else { - ubuntuArchs = ["i386", "amd64"] - } - tasks << getTasks(ubuntu, ubuntuArchs, build) - - def kali = ["kali-rolling"] - def kaliArchs = ["amd64"] - tasks << getTasks(kali, kaliArchs, build) - - return tasks -} - -def buildCentosNative() { - def tasks = [:] - - def build = { distro, arch -> - def myNode = { - node ('linux-build') { - dir ("build") { - checkout scm - } - def runtime = docker.image("ztbuild/${distro}-${arch}:latest") - runtime.inside { - dir("build") { - sh 'make -j4' - sh 'make redhat' - sh "mkdir -p ${distro}" - sh "cp -av `find ~/rpmbuild/ -type f -name \"*.rpm\"` ${distro}/" - archiveArtifacts artifacts: "${distro}/*.rpm", onlyIfSuccessful: true - } - - cleanWs deleteDirs: true, disableDeferredWipeout: true, notFailBuild: true - } - } - } - return myNode - } - - def centos8 = ["centos8"] - def centos8Archs = [] - if (params.BUILD_ALL == true) { - centos8Archs = ["amd64", "arm64", "ppc64le"] - } else { - centos8Archs = ["amd64"] - } - tasks << getTasks(centos8, centos8Archs, build) - - def centos7 = ["centos7"] - def centos7Archs = ["amd64"] - tasks << getTasks(centos7, centos7Archs, build) - - return tasks -} diff --git a/LICENSE.txt b/LICENSE.txt index 13fa014a3..06a3fad64 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -26,7 +26,7 @@ Additional Use Grant: You may make use of the Licensed Work, provided you ZeroTier behind the scenes to operate a service not related to ZeroTier network administration. - * Create Non-Open-Source Commercial Derviative Works + * Create Non-Open-Source Commercial Derivative Works (2) Link or directly include the Licensed Work in a commercial or for-profit application or other product @@ -47,7 +47,7 @@ Additional Use Grant: You may make use of the Licensed Work, provided you services, social welfare, senior care, child care, and the care of persons with disabilities. -Change Date: 2025-01-01 +Change Date: 2026-01-01 Change License: Apache License version 2.0 as published by the Apache Software Foundation diff --git a/Makefile b/Makefile index 39dabafdd..f77767fb8 100644 --- a/Makefile +++ b/Makefile @@ -26,3 +26,11 @@ endif ifeq ($(OSTYPE),NetBSD) include make-netbsd.mk endif + +drone: + @echo "rendering .drone.yaml from .drone.jsonnet" + drone jsonnet --format --stream + drone sign zerotier/ZeroTierOne --save + +clang-format: + find node osdep service tcp-proxy controller -iname '*.cpp' -o -iname '*.hpp' | xargs clang-format -i diff --git a/OFFICIAL-RELEASE-STEPS.md b/OFFICIAL-RELEASE-STEPS.md index ba7b556db..0c198bd68 100644 --- a/OFFICIAL-RELEASE-STEPS.md +++ b/OFFICIAL-RELEASE-STEPS.md @@ -14,6 +14,7 @@ The version must be incremented in all of the following files: /debian/changelog /ext/installfiles/mac/ZeroTier One.pkgproj /ext/installfiles/windows/ZeroTier One.aip + ../DesktopUI/mac-app-template/ZeroTier.app/Contents/Info.plist The final .AIP file can only be edited on Windows with [Advanced Installer Enterprise](http://www.advancedinstaller.com/). In addition to incrementing the version be sure that a new product code is generated. (The "upgrade code" GUID on the other hand must never change.) @@ -29,4 +30,4 @@ You will need [Packages](http://s.sudre.free.fr/Software/Packages/about.html) an ## Windows -First load the Visual Studio solution and rebuild the UI and ZeroTier One in both x64 and i386 `Release` mode. Then load [Advanced Installer Enterprise](http://www.advancedinstaller.com/), check that the version is correct, and build. The build will fail if any build artifacts are missing, and Windows must have our product singing key (from DigiCert) available to sign the resulting MSI file. The MSI must then be tested on at least a few different CLEAN Windows VMs to ensure that the installer is valid and properly signed. +First load the Visual Studio solution and rebuild the UI and ZeroTier One in both x64, i386, and arm64 `Release` mode. Then load [Advanced Installer Enterprise](http://www.advancedinstaller.com/), check that the version is correct, and build. The build will fail if any build artifacts are missing, and Windows must have our product singing key (from DigiCert) available to sign the resulting MSI file. The MSI must then be tested on at least a few different CLEAN Windows VMs to ensure that the installer is valid and properly signed. diff --git a/README.docker.md b/README.docker.md index 17e3cf4b8..5b046e320 100644 --- a/README.docker.md +++ b/README.docker.md @@ -60,9 +60,11 @@ To ensure you have a network available before trying to listen on it. Without pr You can control a few settings including the identity used and the authtoken used to interact with the control socket (which you can forward and access through `localhost:9993`). +- `ZEROTIER_JOIN_NETWORKS`: additional way to set networks to join. - `ZEROTIER_API_SECRET`: replaces the `authtoken.secret` before booting and allows you to manage the control socket's authentication key. - `ZEROTIER_IDENTITY_PUBLIC`: the `identity.public` file for zerotier-one. Use `zerotier-idtool` to generate one of these for you. - `ZEROTIER_IDENTITY_SECRET`: the `identity.secret` file for zerotier-one. Use `zerotier-idtool` to generate one of these for you. +- `ZEROTIER_LOCAL_CONF`: Sets the the `local.conf` file content for zerotier-one ### Tips diff --git a/README.md b/README.md index a53dca476..e881ce810 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ ZeroTier - Global Area Networking ====== -This document is written for a software developer audience. For information on using ZeroTier, see the: [Website](https://www.zerotier.com), [Documentation Site](https://docs.zerotier.com), and [Discussion Forum](https://discuss.zerotier.com) + +*This document is written for a software developer audience. For information on using ZeroTier, see the: [Website](https://www.zerotier.com), [Documentation Site](https://docs.zerotier.com), and [Discussion Forum](https://discuss.zerotier.com).* ZeroTier is a smart programmable Ethernet switch for planet Earth. It allows all networked devices, VMs, containers, and applications to communicate as if they all reside in the same physical data center or cloud region. @@ -36,30 +37,41 @@ The base path contains the ZeroTier One service main entry point (`one.cpp`), se - `ext/`: third party libraries, binaries that we ship for convenience on some platforms (Mac and Windows), and installation support files. - `include/`: include files for the ZeroTier core. - `java/`: a JNI wrapper used with our Android mobile app. (The whole Android app is not open source but may be made so in the future.) - - `macui/`: a Macintosh menu-bar app for controlling ZeroTier One, written in Objective C. - `node/`: the ZeroTier virtual Ethernet switch core, which is designed to be entirely separate from the rest of the code and able to be built as a stand-alone OS-independent library. Note to developers: do not use C++11 features in here, since we want this to build on old embedded platforms that lack C++11 support. C++11 can be used elsewhere. - `osdep/`: code to support and integrate with OSes, including platform-specific stuff only built for certain targets. - `rule-compiler/`: JavaScript rules language compiler for defining network-level rules. - `service/`: the ZeroTier One service, which wraps the ZeroTier core and provides VPN-like connectivity to virtual networks for desktops, laptops, servers, VMs, and containers. - `windows/`: Visual Studio solution files, Windows service code, and the Windows task bar app UI. + - `zeroidc/`: OIDC implementation used by ZeroTier service to log into SSO-enabled networks. (This part is written in Rust, and more Rust will be appearing in this repository in the future.) + +### Contributing + +Please do pull requests off of the `dev` branch. + +Releases are done by merging `dev` into `main` and then tagging and doing builds. ### Build and Platform Notes To build on Mac and Linux just type `make`. On FreeBSD and OpenBSD `gmake` (GNU make) is required and can be installed from packages or ports. For Windows there is a Visual Studio solution in `windows/`. - **Mac** - - Xcode command line tools for OSX 10.8 or newer are required. + - Xcode command line tools for macOS 10.13 or newer are required. + - Rust for x86_64 and ARM64 targets *if SSO is enabled in the build*. - **Linux** - - The minimum compiler versions required are GCC/G++ 4.9.3 or CLANG/CLANG++ 3.4.2. (Install `clang` on CentOS 7 as G++ is too old.) + - The minimum compiler versions required are GCC/G++ 8.x or CLANG/CLANG++ 5.x. - Linux makefiles automatically detect and prefer clang/clang++ if present as it produces smaller and slightly faster binaries in most cases. You can override by supplying CC and CXX variables on the make command line. + - Rust for x86_64 and ARM64 targets *if SSO is enabled in the build*. - **Windows** - - Windows 7 or newer is supported. This *may* work on Vista but isn't officially supported there. It will not work on Windows XP. - - We build with Visual Studio 2017. Older versions may not work. Clang or MinGW will also probably work but may require some makefile hacking. + - Visual Studio 2022 on Windows 10 or newer. + - Rust for x86_64 and ARM64 targets *if SSO is enabled in the build*. - **FreeBSD** - GNU make is required. Type `gmake` to build. + - `binutils` is required. Type `pkg install binutils` to install. + - Rust for x86_64 and ARM64 targets *if SSO is enabled in the build*. - **OpenBSD** - There is a limit of four network memberships on OpenBSD as there are only four tap devices (`/dev/tap0` through `/dev/tap3`). - GNU make is required. Type `gmake` to build. + - Rust for x86_64 and ARM64 targets *if SSO is enabled in the build*. Typing `make selftest` will build a *zerotier-selftest* binary which unit tests various internals and reports on a few aspects of the build environment. It's a good idea to try this on novel platforms or architectures. @@ -75,14 +87,14 @@ On most distributions, macOS, and Windows, the installer will start the service A home folder for your system will automatically be created. -The service is controlled via the JSON API, which by default is available at 127.0.0.1 port 9993. We include a *zerotier-cli* command line utility to make API calls for standard things like joining and leaving networks. The *authtoken.secret* file in the home folder contains the secret token for accessing this API. See [service/README.md](service/README.md) for API documentation. +The service is controlled via the JSON API, which by default is available at `127.0.0.1:9993`. It also listens on `0.0.0.0:9993` which is only usable if `allowManagementFrom` is properly configured in `local.conf`. We include a *zerotier-cli* command line utility to make API calls for standard things like joining and leaving networks. The *authtoken.secret* file in the home folder contains the secret token for accessing this API. See [service/README.md](service/README.md) for API documentation. Here's where home folders live (by default) on each OS: * **Linux**: `/var/lib/zerotier-one` * **FreeBSD** / **OpenBSD**: `/var/db/zerotier-one` * **Mac**: `/Library/Application Support/ZeroTier/One` - * **Windows**: `\ProgramData\ZeroTier\One` (That's for Windows 7. The base 'shared app data' folder might be different on different Windows versions.) + * **Windows**: `\ProgramData\ZeroTier\One` (That's the default. The base 'shared app data' folder might be different if Windows is installed with a non-standard drive letter assignment or layout.) ### Basic Troubleshooting @@ -98,8 +110,88 @@ On CentOS check `/etc/sysconfig/iptables` for IPTables rules. For other distribu ZeroTier One peers will automatically locate each other and communicate directly over a local wired LAN *if UDP port 9993 inbound is open*. If that port is filtered, they won't be able to see each others' LAN announcement packets. If you're experiencing poor performance between devices on the same physical network, check their firewall settings. Without LAN auto-location peers must attempt "loopback" NAT traversal, which sometimes fails and in any case requires that every packet traverse your external router twice. -Users behind certain types of firewalls and "symmetric" NAT devices may not able able to connect to external peers directly at all. ZeroTier has limited support for port prediction and will *attempt* to traverse symmetric NATs, but this doesn't always work. If P2P connectivity fails you'll be bouncing UDP packets off our relay servers resulting in slower performance. Some NAT router(s) have a configurable NAT mode, and setting this to "full cone" will eliminate this problem. If you do this you may also see a magical improvement for things like VoIP phones, Skype, BitTorrent, WebRTC, certain games, etc., since all of these use NAT traversal techniques similar to ours. +Users behind certain types of firewalls and "symmetric" NAT devices may not be able to connect to external peers directly at all. ZeroTier has limited support for port prediction and will *attempt* to traverse symmetric NATs, but this doesn't always work. If P2P connectivity fails you'll be bouncing UDP packets off our relay servers resulting in slower performance. Some NAT router(s) have a configurable NAT mode, and setting this to "full cone" will eliminate this problem. If you do this you may also see a magical improvement for things like VoIP phones, Skype, BitTorrent, WebRTC, certain games, etc., since all of these use NAT traversal techniques similar to ours. If a firewall between you and the Internet blocks ZeroTier's UDP traffic, you will fall back to last-resort TCP tunneling to rootservers over port 443 (https impersonation). This will work almost anywhere but is *very slow* compared to UDP or direct peer to peer connectivity. Additional help can be found in our [knowledge base](https://zerotier.atlassian.net/wiki/spaces/SD/overview). + +### Prometheus Metrics + +Prometheus Metrics are available at the `/metrics` API endpoint. This endpoint is protected by an API key stored in `metricstoken.secret` to prevent unwanted information leakage. Information that could be gleaned from the metrics include joined networks and peers your instance is talking to. + +Access control is via the ZeroTier control interface itself and `metricstoken.secret`. This can be sent as a bearer auth token, via the `X-ZT1-Auth` HTTP header field, or appended to the URL as `?auth=`. You can see the current metrics via `cURL` with the following command: + + // Linux + curl -H "X-ZT1-Auth: $(sudo cat /var/lib/zerotier-one/metricstoken.secret)" http://localhost:9993/metrics + + // macOS + curl -H "X-ZT1-Auth: $(sudo cat /Library/Application\ Support/ZeroTier/One/metricstoken.secret)" http://localhost:9993/metrics + + // Windows PowerShell (Admin) + Invoke-RestMethod -Headers @{'X-ZT1-Auth' = "$(Get-Content C:\ProgramData\ZeroTier\One\metricstoken.secret)"; } -Uri http://localhost:9993/metrics + +To configure a scrape job in Prometheus on the machine ZeroTier is running on, add this to your Prometheus `scrape_config`: + + - job_name: zerotier-one + honor_labels: true + scrape_interval: 15s + metrics_path: /metrics + static_configs: + - targets: + - 127.0.0.1:9993 + labels: + group: zerotier-one + node_id: $YOUR_10_CHARACTER_NODE_ID + authorization: + credentials: $YOUR_METRICS_TOKEN_SECRET + +If neither of these methods are desirable, it is probably possible to distribute metrics via [Prometheus Proxy](https://github.com/pambrose/prometheus-proxy) or some other tool. Note: We have not tested this internally, but will probably work with the correct configuration. + +Metrics are also available on disk in ZeroTier's working directory: + + // Linux + /var/lib/zerotier-one/metrics.prom + + // macOS + /Library/Application\ Support/ZeroTier/One/metrics.prom + + //Windows + C:\ProgramData\ZeroTier\One\metrics.prom + +#### Available Metrics + +| Metric Name | Labels | Metric Type | Description | +| --- | --- | --- | --- | +| zt_packet | packet_type, direction | Counter | ZeroTier packet type counts | +| zt_packet_error | error_type, direction | Counter | ZeroTier packet errors| +| zt_data | protocol, direction | Counter | number of bytes ZeroTier has transmitted or received | +| zt_num_networks | | Gauge | number of networks this instance is joined to | +| zt_network_multicast_groups_subscribed | network_id | Gauge | number of multicast groups networks are subscribed to | +| zt_network_packets | network_id, direction | Counter | number of incoming/outgoing packets per network | +| zt_peer_latency | node_id | Histogram | peer latency (ms) | +| zt_peer_path_count | node_id, status | Gauge | number of paths to peer | +| zt_peer_packets | node_id, direction | Counter | number of packets to/from a peer | +| zt_peer_packet_errors | node_id | Counter | number of incoming packet errors from a peer | + +If there are other metrics you'd like to see tracked, ask us in an Issue or send us a Pull Request! + +### HTTP / App server + +There is a static http file server suitable for hosting Single Page Apps at http://localhost:9993/app/ + +Use `zerotier-cli info -j` to find your zerotier-one service's homeDir + +``` sh +cd $ZT_HOME +sudo mkdir -p app/app1 +sudo mkdir -p app/appB +echo 'appA

hello world A' | sudo tee app/appA/index.html +echo 'app2

hello world 2' | sudo tee app/app2/index.html +curl -sL http://localhost:9993/app/appA http://localhost:9993/app/app2 +``` + +Then visit [http://localhost:9993/app/app1/](http://localhost:9993/app/app1/) and [http://localhost:9993/app/appB/](http://localhost:9993/app/appB/) + +Requests to paths don't exist return the app root index.html, as is customary for SPAs. +If you want, you can write some javascript that talks to the service or controller [api](https://docs.zerotier.com/service/v1). diff --git a/RELEASE-NOTES.md b/RELEASE-NOTES.md index fdcd35c31..e6582207e 100644 --- a/RELEASE-NOTES.md +++ b/RELEASE-NOTES.md @@ -1,10 +1,127 @@ ZeroTier Release Notes ====== +# 2024-10-23 -- Version 1.14.2 + + * Fix for missing entitlement on macOS Sequoia. + * Fix for a problem correctly parsing local.conf to enable low bandwidth mode. + * Increment versions of some dependent libraries. + * Other fixes. + +# 2024-09-12 -- Version 1.14.1 + + * Multithreaded packet I/O support! Currently this is just for Linux and must + be enabled in local.conf. It will likely make the largest difference on small + multi-core devices where CPU is a bottleneck and high throughput is desired. + It may be enabled by default in the future but we want it to be thoroughly + tested. It's a little harder than it seems at first glance due to the need + to keep packets in sequence and balance load. + * Several multipath bug fixes. + * Updated the versions on a number of libraries related to OIDC support and HTTP. + * MacOS .app now shows the correct version in its Info.plist manifest. + * Sanitize MAC addresses in JSON format rules parser. + * Some basic information about the platform (OS, CPU architecture) is now reported + to network controllers when networks are joined so it can be displayed to + network admins and in the future used in policy checking and inventory operations. + +# 2024-05-02 -- Version 1.14.0 + + * Linux I/O performance improvements under heavy load + * Improvements to multipath + * Fix for port rebinding "coma" bug after periods offline (some laptop users) + * Fixed a rules engine quirk/ambiguity (GitHub Issue #2200) + * Controller API enhancements: node names and other node meta-data + * Other bug fixes + +# 2023-09-12 -- Version 1.12.2 + + * More improvements to macOS full tunnel mode. + * Faster recovery after changes to physical network settings. + +# 2023-08-25 -- Version 1.12.1 + + * Minor release to fix a port binding issue in Linux. + * Update Debian dependencies. + * No changes for other platforms. + +# 2023-08-23 -- Version 1.12.0 + + * Experimental Windows ARM64 support + * Fix numerous sleep/wake issues on macOS and other platforms + * Faster recovery after changes to physical network settings + * Prometheus compatible metrics support! + * Fix full tunnel mode on recent macOS versions + * Numerous macOS DNS fixes + * 10-30% speed improvement on Linux + +# 2023-03-23 -- Version 1.10.6 + + * Prevent binding temporary ipv6 addresses on macos (#1910) + * Prevent path-learning loops (#1914) + * Prevent infinite loop of UAC prompts in tray app + +# 2023-03-10 -- Version 1.10.5 + + * Fix for high CPU usage bug on Windows + +# 2023-03-07 -- Version 1.10.4 + + * SECURITY FIX (Windows): this version fixes a file permission problem on + Windows that could allow non-privileged users on a Windows system to read + privileged files in the ZeroTier service's working directory. This could + allow an unprivileged local Windows user to administrate the local ZeroTier + instance without appropriate local permissions. This issue is not remotely + exploitable unless a remote user can read arbitrary local files, and does + not impact other operating systems. + + * Fix a bug in the handling of multiple IP address assignments to virtual + interfaces on macOS. + +# 2023-02-15 -- Version 1.10.3 + + * Fix for duplicate paths in client. Could cause connectivity issues. Affects all platforms. + * Fix for Ethernet Tap MTU setting, would not properly apply on Linux. + * Fix default route bugs (macOS.) + * Enable Ping automatically for ZeroTier Adapters (Windows.) + * SSO updates and minor bugfixes. + * Add low-bandwidth mode. + * Add forceTcpRelay mode (optionally enabled.) + * Fix bug that prevented setting of custom TCP relay address. + * Build script improvements and bug fixes. + +# 2022-11-01 -- Version 1.10.2 + + * Fix another SSO "stuck client" issue in zeroidc. + * Expose root-reported external IP/port information via the local JSON API for better diagnostics. + * Multipath: CLI output improvement for inspecting bonds + * Multipath: balance-aware mode + * Multipath: Custom policies + * Multipath: Link quality measurement improvements + +Note that releases are coming few and far between because most of our dev effort is going into version 2. + +# 2022-06-27 -- Version 1.10.1 + + * Fix an issue that could cause SSO clients to get "stuck" on stale auth URLs. + * A few other SSO related bug fixes. + +# 2022-06-07 -- Version 1.10.0 + + * Fix formatting problem in `zerotier-cli` when using SSO networks. + * Fix a few other minor bugs in SSO signin to prepare for general availability. + * Remove requirement for webview in desktop UI and instead just make everything available via the tray pulldown/menu. Use [libui-ng](https://github.com/libui-ng/libui-ng) for minor prompt dialogs. Saves space and eliminates installation headaches on Windows. + * Fix SSO "spam" bug in desktop UI. + * Use system default browser for SSO login so all your plugins, MFA devices, password managers, etc. will work as you have them configured. + * Minor fix for bonding/multipath. + +# 2022-05-10 -- Version 1.8.10 + + * Fixed a bug preventing SSO sign-on on Windows. + # 2022-04-25 -- Version 1.8.9 - * Fixed a long-standing and strange bug that was causing sporadic "phantom" packet authentication failures. Not a security problem but could be behind spordaic reports of link failures under some conditions. - * Fized a memory leak in SSO/OIDC support. + * Fixed a long-standing and strange bug that was causing sporadic "phantom" packet authentication failures. Not a security problem but could be behind sporadic reports of link failures under some conditions. + * Fixed a memory leak in SSO/OIDC support. * Fixed SSO/OIDC display error on CLI. * Fixed a bug causing nodes to sometimes fail to push certs to each other (primarily affects SSO/OIDC use cases). * Fixed a deadlock bug on leaving SSO/OIDC managed networks. @@ -255,7 +372,7 @@ We're trying to fix all these issues before the 1.6.0 release. Stay tuned. # 2017-04-20 -- Version 1.2.4 * Managed routes are now only bifurcated for the default route. This is a change in behavior, though few people will probably notice. Bifurcating all managed routes was causing more trouble than it was worth for most users. - * Up to 2X crypto speedup on x86-64 (except Windows, which will take some porting) and 32-bit ARM platforms due to integration of fast assembly language implementations of Salsa20/12 from the [supercop](http://bench.cr.yp.to/supercop.html) code base. These were written by Daniel J. Bernstein and are in the public domain. My Macbook Pro (Core i5 2.8ghz) now does almost 1.5GiB/sec Salsa20/12 per core and a Raspberry Pi got a 2X boost. 64-bit ARM support and Windows support will take some work but should not be too hard. + * Up to 2X crypto speedup on x86-64 (except Windows, which will take some porting) and 32-bit ARM platforms due to integration of fast assembly language implementations of Salsa20/12 from the [supercop](http://bench.cr.yp.to/supercop.html) code base. These were written by Daniel J. Bernstein and are in the public domain. My MacBook Pro (Core i5 2.8ghz) now does almost 1.5GiB/sec Salsa20/12 per core and a Raspberry Pi got a 2X boost. 64-bit ARM support and Windows support will take some work but should not be too hard. * Refactored code that manages credentials to greatly reduce memory use in most cases. This may also result in a small performance improvement. * Reworked and simplified path selection and priority logic to fix path instability and dead path persistence edge cases. There have been some sporadic reports of persistent path instabilities and dead paths hanging around that take minutes to resolve. These have proven difficult to reproduce in house, but hopefully this will fix them. In any case it seems to speed up path establishment in our tests and it makes the code simpler and more readable. * Eliminated some unused cruft from the code around path management and in the peer class. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..c539ed6a2 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,93 @@ +# Security + +ZeroTier takes the security of our software products and services seriously, which +includes all source code repositories managed through our GitHub organization. + +## Supported Versions + +The following versions of ZeroTier One receive security updates + +| Version | Supported | +| -------- | ------------------ | +| 1.14.x | :white_check_mark: | +| 1.12.x | :white_check_mark: | +| < 1.12.0 | :x: | + +## Reporting a Vulnerability + +**Please do not report security issues through public GitHub issues** + +Instead, please report vulnerabilities via email to security@zerotier.com. If possible, +please encrypt with our PGP key (see below). + +Please include the following information, or as much as you can provide to help us +understand the nature and scope of the issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +## Preferred Languages + +We prefer all communications to be in English. + +## security@zerotier.com PGP key + +``` +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBGQGOVIBEACalXTnNqaiSOVLFEiqHpDMg8N/OI5D5850Xy1ZEvx3B3rz7cbn +k30ozHtJKbh+vqpyItE7DjyQAuF19gP5Q64Yh0Y+MmLHq60q/GwOwAYz7cI+UzA3 +5x8YqcmTp32LAM1xJn+iMlMLBuAmJl4kULKmOXPlpqPiyTFs5saizvm7fgRmfgJJ +HpsnIrTkaDFJhAR+jvMJohVYwmhuydeI0DsHu7KGpG1ddcHDrUjOPNqXnnAPSPwx +llw4yfKlQb8GYErsv/G5QVyzd5+SxEuiI4MARRnrk8LlMQ33CR6pzIQ/Bk5AAmye +mHqfEAknkiOf++urYhRs9BL3Kz3MdV0cg92zr9EFOg0u56jxf5OnAiTOhGUUA0hn +dS7peVGl46R9Oy2JYIazNDGi+4NIsYDFXsnsss9xOQVygPyeQd71zFHfix0jct9w +j3o/kj7Egsnm9nc13354bYT6bbalqXiRWwGH1eAFpjueNWiVFwZS6NZUP3WeNDiY +BlPo1LodvolbXiJcTILTCyEkERJPCK2zoE2nTdVfvTLWsuehw1M6Yd2/q74TVYy/ +RY+KjHkrChEBQ9PqXsXRHj6opKbT8JLfZkvU5k+3IiqqxOpB+QXFI/whj493CxWW +so7QAmzOCyJq8GDVPxzkwUac22YIkXdiOmb8i/HWq+kLY/HjQE259Gx6KwARAQAB +tClaZXJvVGllciBTZWN1cml0eSA8c2VjdXJpdHlAemVyb3RpZXIuY29tPokCTAQT +AQoANhYhBH1HQGb+4jzl6mnFqf09m6uqADkABQJkBjlSAhsDBAsJCAcEFQoJCAUW +AgMBAAIeAQIXgAAKCRD9PZurqgA5ACqPD/sFt6SG6Tu0HwTY2ofJtYsa2GBLL0pf +dYlX4cWSs1PVB5+m5Oj18y+GB2umA9GnsVtmvaSfp3XEngt2zNWX27uUsVfL35b2 +/5TVVe8RjzOedqMN+lQWMvO+f/C1zmWYXjjpC+iGjgMMaRRrofkkn+7uL4N9y6gY +rcXtpACT1rYFC+i1AKnZfUO8Vr5ji7odq0f7bDkN/N38rB0kRRwEmO8wqdpQK6gK +nxf9vgJl5ggimDk5Xtz1sfd3y28bf5N4hdOCkXUbd10nUFY3wDNTM4VxozxTGJeG +imdcc19Wuw/1fGUZ5SIjgPanCdPLGYwSTr+M6Fuern9uTtlC1GOby3BUtmVGP6EU +1pSAJSRpmoBPHKKOYtSMwV8PCboXru9P1ab8y8STKM3SKyghUJrl17gdc0LaksZa +E54pJudGPIQMFRqZjMdV6jgMuaLTozjZ4mW8EThf4mkX4xDkO8l7cOn0225ZYJZC +lZKpdnwzk9owkJA80u4KBNJxTtB4ZAPzjBsD5hFzCZQTLNQp/psU3EjZsau28eXT +E/C1QjEQHgy4ohkgQlCm1H1+clKssCWcdmsVGXuS1u8gh4K6X9b0Z6LeCGRaQvH2 ++DB8oTAdqp9nUZv9rP4pbo+sR4fF67CFLriVuxjedAiFkbM4uHMFcL4tc/X9+DRo +YN5X7oEkZvO507kCDQRkBjlSARAAz58UMF7K1qKyQjzKTcutaYZ5SaIGky9lCLZn +/2vjpFCoBogkxS/6IKQcwZk8b4S9QstaaQZDFEkxqNeKC0GiFTAMAb6SmYcK495h +EZnHl0NA5Nc2dBlZk5E/ENzTCz2bXaxCcVESc2z+xCzu07brbhGrqvliKiwOUzt9 +JzqEsar6I95OutBcZvkFCs44/Uf9bS1qf1w4klE8w3vdMtGH23umrET4tFZ+sh6o +ZFtQx0u2eKjsRdn/RMtsxLNaJlcE1DdIAqBpQrcmuwMC8v5wUGfCGZjhClzmyQlq +akUkayir7UtbHbFT/mgO+YI77YGXWk5QrwPscqqT2l8KB/YMujNDmaWa/0KV1lIY +zr5s4dzVeiwqFLR9ANFIhzFwzf3JLi6XSx123Qix0TxZoYPZCHl7yoi9qi6qybz5 +0Od2LSz3jbApeKYymZ+zjE+YV5y9DI6Wzy1j2M1FogNvTO9fMk+6dLt4HhTdSNvH +cKya462YCcy+tnZTkhmh+FTebbJlV6D4wG7skE5KCdBhjm53xLwp6XW9L6n2CrkL +W1IDBcCz0oPd1sMkXbO3wnxdXprV2XurCfsg/R2nszSNzvdJ8/xj3cr9hpoJ714R +qqyoEDRZ1Ss9kGL166o5MpN5qb/EewdkqGgWP7YFXbhsdHQiW7Z7dAqzjoaybD4O +nakkwyUAEQEAAYkCNgQYAQoAIBYhBH1HQGb+4jzl6mnFqf09m6uqADkABQJkBjlS +AhsMAAoJEP09m6uqADkAax0P/Rh8EZYRqW6dPYTl1YQusAK10rAcRNq3ekjofXGk +oXK1S7HWGoFgl5++5nfSfNgFJ5VLcgIM56wtIf49zFjWe5oC6fw8k+ghh4d2chMP +hdDILx6e0c30Iq1+EvovGR9hWa0wJ4cKTdzlwhY9ZC09q0ia+bl2mwpie1JQDR0c +zXCjt+PldLeeK9z1/XT0Q7KowYC+U18oR+KFm+EaRV4QT85JVequnIeGkmaHJrHB +lH4T5A5ib7y8edon1c0Zx3GsaxJUojkEJ0SX7ffVDu6ztUZfkHfCVpMW4VzUeGA/ +m+CtFO9ciLRGZEkRa+zhIGoBvwEXU0GiwiF4nZ0F2C8UioeW0YIEV9zl3nXJctYE +ZKc2whSENQRTGgaYHVoVZhznt71LKWgFLshwBo81UCXVkzwAjMW1ActDnmPw5M7q +xR5Qp5G49Z1GmfSozazha0HVFPKNV5i3RlTzs4yLUnZyH0yC9IvtOefMHcLjG96L +N5miEV97gvJJjrn8rhRvpUwAWgmT/9IuYjBNQTtNN40arto5HxezR76WCjdKYxdL +p3dM1iiBDShHNm7LdyZlLFhTOMU0tNBxJJ7B09ar5gakeZjD+2aB1ODX9VuFtozL +onBjI2gIkry0UIkuznHfFw05lZAZAiqHEVgVi/WTk4C/bklDZNgE0lx+IWzEz2iS +L455 +=lheL +-----END PGP PUBLIC KEY BLOCK----- +``` diff --git a/attic/WinUI/APIHandler.cs b/attic/WinUI/APIHandler.cs deleted file mode 100644 index 7192f3f2f..000000000 --- a/attic/WinUI/APIHandler.cs +++ /dev/null @@ -1,459 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using System.Net; -using System.IO; -using System.Windows; -using Newtonsoft.Json; -using System.Diagnostics; -using System.Windows.Threading; - -namespace WinUI -{ - - - public class APIHandler - { - private string authtoken; - - private string url = null; - - private static volatile APIHandler instance; - private static object syncRoot = new Object(); - - public delegate void NetworkListCallback(List networks); - public delegate void StatusCallback(ZeroTierStatus status); - - private string ZeroTierAddress = ""; - - public static APIHandler Instance - { - get - { - if (instance == null) - { - lock (syncRoot) - { - if (instance == null) - { - if (!initHandler()) - { - return null; - } - } - } - } - - return instance; - } - } - - private static bool initHandler(bool resetToken = false) - { - String localZtDir = Environment.GetFolderPath(Environment.SpecialFolder.LocalApplicationData) + "\\ZeroTier\\One"; - String globalZtDir = Environment.GetFolderPath(Environment.SpecialFolder.CommonApplicationData) + "\\ZeroTier\\One"; - - String authToken = ""; - Int32 port = 9993; - - if (resetToken) - { - instance = null; - if (File.Exists(localZtDir + "\\authtoken.secret")) - { - File.Delete(localZtDir + "\\authtoken.secret"); - } - - if (File.Exists(localZtDir + "\\zerotier-one.port")) - { - File.Delete(localZtDir + "\\zerotier-one.port"); - } - } - - if (!File.Exists(localZtDir + "\\authtoken.secret") || !File.Exists(localZtDir + "\\zerotier-one.port")) - { - // launch external process to copy file into place - String curPath = System.Reflection.Assembly.GetEntryAssembly().Location; - int index = curPath.LastIndexOf("\\"); - curPath = curPath.Substring(0, index); - ProcessStartInfo startInfo = new ProcessStartInfo(curPath + "\\copyutil.exe", "\"" + globalZtDir + "\"" + " " + "\"" + localZtDir + "\""); - startInfo.Verb = "runas"; - - - var process = Process.Start(startInfo); - process.WaitForExit(); - } - - authToken = readAuthToken(localZtDir + "\\authtoken.secret"); - - if ((authToken == null) || (authToken.Length <= 0)) - { - MessageBox.Show("Unable to read ZeroTier One authtoken", "ZeroTier One"); - return false; - } - - port = readPort(localZtDir + "\\zerotier-one.port"); - instance = new APIHandler(port, authToken); - return true; - } - - private static String readAuthToken(String path) - { - String authToken = ""; - - if (File.Exists(path)) - { - try - { - byte[] tmp = File.ReadAllBytes(path); - authToken = System.Text.Encoding.UTF8.GetString(tmp).Trim(); - } - catch - { - MessageBox.Show("Unable to read ZeroTier One Auth Token from:\r\n" + path, "ZeroTier One"); - } - } - - return authToken; - } - - private static Int32 readPort(String path) - { - Int32 port = 9993; - - try - { - byte[] tmp = File.ReadAllBytes(path); - port = Int32.Parse(System.Text.Encoding.ASCII.GetString(tmp).Trim()); - if ((port <= 0) || (port > 65535)) - port = 9993; - } - catch - { - } - - return port; - } - - private APIHandler() - { - url = "http://127.0.0.1:9993"; - } - - public APIHandler(int port, string authtoken) - { - url = "http://127.0.0.1:" + port; - this.authtoken = authtoken; - } - - - - public void GetStatus(StatusCallback cb) - { - var request = WebRequest.Create(url + "/status" + "?auth=" + authtoken) as HttpWebRequest; - if (request != null) - { - request.Method = "GET"; - request.ContentType = "application/json"; - } - - try - { - var httpResponse = (HttpWebResponse)request.GetResponse(); - if (httpResponse.StatusCode == HttpStatusCode.OK) - { - using (var streamReader = new StreamReader(httpResponse.GetResponseStream())) - { - var responseText = streamReader.ReadToEnd(); - - ZeroTierStatus status = null; - try - { - status = JsonConvert.DeserializeObject(responseText); - - if (ZeroTierAddress != status.Address) - { - ZeroTierAddress = status.Address; - } - } - catch (JsonReaderException e) - { - Console.WriteLine(e.ToString()); - } - cb(status); - } - } - else if (httpResponse.StatusCode == HttpStatusCode.Unauthorized) - { - APIHandler.initHandler(true); - } - } - catch (System.Net.Sockets.SocketException) - { - cb(null); - } - catch (System.Net.WebException e) - { - HttpWebResponse res = (HttpWebResponse)e.Response; - if (res != null && res.StatusCode == HttpStatusCode.Unauthorized) - { - APIHandler.initHandler(true); - } - else - { - cb(null); - } - } - } - - - - public void GetNetworks(NetworkListCallback cb) - { - var request = WebRequest.Create(url + "/network" + "?auth=" + authtoken) as HttpWebRequest; - if (request == null) - { - cb(null); - } - - request.Method = "GET"; - request.ContentType = "application/json"; - request.Timeout = 10000; - - try - { - var httpResponse = (HttpWebResponse)request.GetResponse(); - - if (httpResponse.StatusCode == HttpStatusCode.OK) - { - using (var streamReader = new StreamReader(httpResponse.GetResponseStream())) - { - var responseText = streamReader.ReadToEnd(); - - List networkList = null; - try - { - networkList = JsonConvert.DeserializeObject>(responseText); - foreach (ZeroTierNetwork n in networkList) - { - // all networks received via JSON are connected by definition - n.IsConnected = true; - } - } - catch (JsonReaderException e) - { - Console.WriteLine(e.ToString()); - } - cb(networkList); - } - } - else if (httpResponse.StatusCode == HttpStatusCode.Unauthorized) - { - APIHandler.initHandler(true); - } - } - catch (System.Net.Sockets.SocketException) - { - cb(null); - } - catch (System.Net.WebException e) - { - HttpWebResponse res = (HttpWebResponse)e.Response; - if (res != null && res.StatusCode == HttpStatusCode.Unauthorized) - { - APIHandler.initHandler(true); - } - else - { - cb(null); - } - } - } - - public void JoinNetwork(Dispatcher d, string nwid, bool allowManaged = true, bool allowGlobal = false, bool allowDefault = false, bool allowDNS = false) - { - Task.Factory.StartNew(() => - { - var request = WebRequest.Create(url + "/network/" + nwid + "?auth=" + authtoken) as HttpWebRequest; - if (request == null) - { - return; - } - - request.Method = "POST"; - request.ContentType = "applicaiton/json"; - request.Timeout = 30000; - try - { - using (var streamWriter = new StreamWriter(((HttpWebRequest)request).GetRequestStream())) - { - string json = "{\"allowManaged\":" + (allowManaged ? "true" : "false") + "," + - "\"allowGlobal\":" + (allowGlobal ? "true" : "false") + "," + - "\"allowDefault\":" + (allowDefault ? "true" : "false") + "," + - "\"allowDNS\":" + (allowDNS ? "true" : "false") + "}"; - streamWriter.Write(json); - streamWriter.Flush(); - streamWriter.Close(); - } - } - catch (System.Net.WebException) - { - d.BeginInvoke(DispatcherPriority.Normal, new Action(() => - { - MessageBox.Show("Error Joining Network: Cannot connect to ZeroTier service."); - })); - return; - } - - try - { - var httpResponse = (HttpWebResponse)request.GetResponse(); - - if (httpResponse.StatusCode == HttpStatusCode.Unauthorized) - { - APIHandler.initHandler(true); - } - else if (httpResponse.StatusCode != HttpStatusCode.OK) - { - Console.WriteLine("Error sending join network message"); - } - } - catch (System.Net.Sockets.SocketException) - { - d.BeginInvoke(DispatcherPriority.Normal, new Action(() => - { - MessageBox.Show("Error Joining Network: Cannot connect to ZeroTier service."); - })); - } - catch (System.Net.WebException e) - { - HttpWebResponse res = (HttpWebResponse)e.Response; - if (res != null && res.StatusCode == HttpStatusCode.Unauthorized) - { - APIHandler.initHandler(true); - } - d.BeginInvoke(DispatcherPriority.Normal, new Action(() => - { - MessageBox.Show("Error Joining Network: Cannot connect to ZeroTier service."); - })); - } - }); - } - - public void LeaveNetwork(Dispatcher d, string nwid) - { - Task.Factory.StartNew(() => - { - var request = WebRequest.Create(url + "/network/" + nwid + "?auth=" + authtoken) as HttpWebRequest; - if (request == null) - { - return; - } - - request.Method = "DELETE"; - request.Timeout = 30000; - - try - { - var httpResponse = (HttpWebResponse)request.GetResponse(); - - if (httpResponse.StatusCode == HttpStatusCode.Unauthorized) - { - APIHandler.initHandler(true); - } - else if (httpResponse.StatusCode != HttpStatusCode.OK) - { - Console.WriteLine("Error sending leave network message"); - } - } - catch (System.Net.Sockets.SocketException) - { - d.BeginInvoke(DispatcherPriority.Normal, new Action(() => - { - MessageBox.Show("Error Leaving Network: Cannot connect to ZeroTier service."); - })); - } - catch (System.Net.WebException e) - { - HttpWebResponse res = (HttpWebResponse)e.Response; - if (res != null && res.StatusCode == HttpStatusCode.Unauthorized) - { - APIHandler.initHandler(true); - } - d.BeginInvoke(DispatcherPriority.Normal, new Action(() => - { - MessageBox.Show("Error Leaving Network: Cannot connect to ZeroTier service."); - })); - } - catch - { - Console.WriteLine("Error leaving network: Unknown error"); - } - }); - } - - public delegate void PeersCallback(List peers); - - public void GetPeers(PeersCallback cb) - { - var request = WebRequest.Create(url + "/peer" + "?auth=" + authtoken) as HttpWebRequest; - if (request == null) - { - cb(null); - } - - request.Method = "GET"; - request.ContentType = "application/json"; - - try - { - var httpResponse = (HttpWebResponse)request.GetResponse(); - if (httpResponse.StatusCode == HttpStatusCode.OK) - { - using (var streamReader = new StreamReader(httpResponse.GetResponseStream())) - { - var responseText = streamReader.ReadToEnd(); - //Console.WriteLine(responseText); - List peerList = null; - try - { - peerList = JsonConvert.DeserializeObject>(responseText); - } - catch (JsonReaderException e) - { - Console.WriteLine(e.ToString()); - } - cb(peerList); - } - } - else if (httpResponse.StatusCode == HttpStatusCode.Unauthorized) - { - APIHandler.initHandler(true); - } - } - catch (System.Net.Sockets.SocketException) - { - cb(null); - } - catch (System.Net.WebException e) - { - HttpWebResponse res = (HttpWebResponse)e.Response; - if (res != null && res.StatusCode == HttpStatusCode.Unauthorized) - { - APIHandler.initHandler(true); - } - else - { - cb(null); - } - } - } - - public string NodeAddress() - { - return ZeroTierAddress; - } - } -} diff --git a/attic/WinUI/AboutView.xaml b/attic/WinUI/AboutView.xaml deleted file mode 100644 index 295d27bd5..000000000 --- a/attic/WinUI/AboutView.xaml +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/attic/WinUI/AboutView.xaml.cs b/attic/WinUI/AboutView.xaml.cs deleted file mode 100644 index 9c48493d0..000000000 --- a/attic/WinUI/AboutView.xaml.cs +++ /dev/null @@ -1,35 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Diagnostics; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using System.Windows; -using System.Windows.Controls; -using System.Windows.Data; -using System.Windows.Documents; -using System.Windows.Input; -using System.Windows.Media; -using System.Windows.Media.Imaging; -using System.Windows.Navigation; -using System.Windows.Shapes; - -namespace WinUI -{ - /// - /// Interaction logic for AboutView.xaml - /// - public partial class AboutView : Window - { - public AboutView() - { - InitializeComponent(); - } - - private void Hyperlink_MouseLeftButtonDown(object sender, RequestNavigateEventArgs e) - { - var hyperlink = (Hyperlink)sender; - Process.Start(hyperlink.NavigateUri.ToString()); - } - } -} diff --git a/attic/WinUI/App.config b/attic/WinUI/App.config deleted file mode 100644 index 8e1564635..000000000 --- a/attic/WinUI/App.config +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - \ No newline at end of file diff --git a/attic/WinUI/App.xaml b/attic/WinUI/App.xaml deleted file mode 100644 index 12ed85f9a..000000000 --- a/attic/WinUI/App.xaml +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - - diff --git a/attic/WinUI/App.xaml.cs b/attic/WinUI/App.xaml.cs deleted file mode 100644 index 53ef2f678..000000000 --- a/attic/WinUI/App.xaml.cs +++ /dev/null @@ -1,25 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Configuration; -using System.Data; -using System.Linq; -using System.Threading.Tasks; -using System.Windows; -using Hardcodet.Wpf.TaskbarNotification; - -namespace WinUI -{ - /// - /// Interaction logic for App.xaml - /// - public partial class App : Application - { - private TaskbarIcon tb; - - private void InitApplication() - { - tb = (TaskbarIcon)FindResource("NotifyIcon"); - tb.Visibility = Visibility.Visible; - } - } -} diff --git a/attic/WinUI/CentralAPI.cs b/attic/WinUI/CentralAPI.cs deleted file mode 100644 index 22bdc697c..000000000 --- a/attic/WinUI/CentralAPI.cs +++ /dev/null @@ -1,256 +0,0 @@ -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Net; -using System.Net.Http; -using System.Text; -using System.Threading.Tasks; -using Newtonsoft.Json; - -namespace WinUI -{ - class CentralAPI - { - private static volatile CentralAPI instance; - private static object syncRoot = new Object(); - - private CookieContainer cookieContainer; - private HttpClientHandler clientHandler; - private HttpClient client; - - private CentralServer server; - public CentralServer Central - { - get - { - return this.server; - } - set - { - this.server = value; - WriteCentralConfig(); - UpdateRequestHeaders(); - } - } - - public static CentralAPI Instance - { - get - { - if (instance == null) - { - lock (syncRoot) - { - if (instance == null) - { - instance = new CentralAPI(); - } - } - } - - return instance; - } - } - - - - private CentralAPI() - { -#if DEBUG - ServicePointManager.ServerCertificateValidationCallback += (sender, cert, chain, sslPolicyErrors) => true; -#endif - cookieContainer = new CookieContainer(); - clientHandler = new HttpClientHandler - { - AllowAutoRedirect = true, - UseCookies = true, - CookieContainer = cookieContainer - }; - - client = new HttpClient(clientHandler); - - string centralConfigPath = CentralConfigFile(); - if (File.Exists(centralConfigPath)) - { - byte[] tmp = File.ReadAllBytes(centralConfigPath); - string json = Encoding.UTF8.GetString(tmp).Trim(); - CentralServer ctmp = JsonConvert.DeserializeObject(json); - if (ctmp != null) - { - Central = ctmp; - } - else - { - Central = new CentralServer(); - } - } - else - { - Central = new CentralServer(); - } - } - - public bool HasAccessToken() - { - if (Central == null) - return false; - - return !string.IsNullOrEmpty(Central.APIKey); - } - - private string ZeroTierDir() - { - return Environment.GetFolderPath(Environment.SpecialFolder.LocalApplicationData) + "\\ZeroTier\\One"; - } - - private string CentralConfigFile() - { - return ZeroTierDir() + "\\central.conf"; - } - - public void WriteCentralConfig() - { - string json = JsonConvert.SerializeObject(Central); - byte[] tmp = Encoding.UTF8.GetBytes(json); - if (tmp != null) - { - File.WriteAllBytes(CentralConfigFile(), tmp); - } - } - - private void UpdateRequestHeaders() - { - if (client.DefaultRequestHeaders.Contains("Authorization")) - { - client.DefaultRequestHeaders.Remove("Authorization"); - } - - if (!string.IsNullOrEmpty(Central.APIKey)) - { - client.DefaultRequestHeaders.Add("Authorization", "bearer " + Central.APIKey); - } - } - - public async Task Login(string email, string password, bool isNewUser) - { - string postURL = Central.ServerURL + "/api/_auth/local"; - CentralLogin login = new CentralLogin(email, password, isNewUser); - var content = new StringContent(JsonConvert.SerializeObject(login), Encoding.UTF8, "application/json"); - HttpResponseMessage response = await client.PostAsync(postURL, content); - - if (!response.IsSuccessStatusCode) - { - return false; - } - - string resContent = await response.Content.ReadAsStringAsync(); - - CentralUser user = JsonConvert.DeserializeObject(resContent); - - if (user.Tokens.Count == 0) - { - // create token - user = await CreateAuthToken(user); - } - - Central.APIKey = user.Tokens[0]; - - UpdateRequestHeaders(); - WriteCentralConfig(); - - return true; - } - - public async Task CreateAuthToken(CentralUser user) - { - string randomTokenURL = Central.ServerURL + "/api/randomToken"; - HttpResponseMessage response = await client.GetAsync(randomTokenURL); - - if (!response.IsSuccessStatusCode) - { - // TODO: throw an error - return null; - } - - string resContent = await response.Content.ReadAsStringAsync(); - - CentralToken t = JsonConvert.DeserializeObject(resContent); - - user.Tokens.Add(t.Token); - - string tokenObj = "{ \"tokens\": " + JsonConvert.SerializeObject(user.Tokens) + " } "; - - string postURL = Central.ServerURL + "/api/user/" + user.Id; - var postContent = new StringContent(tokenObj, Encoding.UTF8, "application/json"); - response = await client.PostAsync(postURL, postContent); - - if (!response.IsSuccessStatusCode) - { - // TODO: thrown an error - return null; - } - - resContent = await response.Content.ReadAsStringAsync(); - user = JsonConvert.DeserializeObject(resContent); - - return user; - } - - public async Task> GetNetworkList() - { - string networkURL = Central.ServerURL + "/api/network"; - - HttpResponseMessage response = await client.GetAsync(networkURL); - - if (!response.IsSuccessStatusCode) - { - // TODO: Throw Error - return new List(); - } - - string resContent = await response.Content.ReadAsStringAsync(); - - List networkList = JsonConvert.DeserializeObject>(resContent); - - return networkList; - } - - public async Task CreateNewNetwork() - { - string networkURL = Central.ServerURL + "/api/network?easy=1"; - CentralNetwork network = new CentralNetwork(); - network.Config = new CentralNetwork.CentralNetworkConfig(); - network.Config.Name = NetworkNameGenerator.GenerateName(); - string jsonNetwork = JsonConvert.SerializeObject(network); - var postContent = new StringContent(jsonNetwork, Encoding.UTF8, "application/json"); - HttpResponseMessage response = await client.PostAsync(networkURL, postContent); - - if (!response.IsSuccessStatusCode) - { - return null; - } - - string resContent = await response.Content.ReadAsStringAsync(); - - CentralNetwork newNetwork = JsonConvert.DeserializeObject(resContent); - - return newNetwork; - } - - public async Task AuthorizeNode(string nodeAddress, string networkId) - { - string json = "{ \"config\": { \"authorized\": true } }"; - string postURL = Central.ServerURL + "/api/network/" + networkId + "/member/" + nodeAddress; - var postContent = new StringContent(json, Encoding.UTF8, "application/json"); - HttpResponseMessage response = await client.PostAsync(postURL, postContent); - - if (response.IsSuccessStatusCode) - { - return true; - } - - return false; - } - } -} diff --git a/attic/WinUI/CentralLogin.cs b/attic/WinUI/CentralLogin.cs deleted file mode 100644 index 97265dcf8..000000000 --- a/attic/WinUI/CentralLogin.cs +++ /dev/null @@ -1,30 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using Newtonsoft.Json; - -namespace WinUI -{ - class CentralLogin - { - - - public CentralLogin(string email, string password, bool isNew) - { - Login = email; - Password = password; - IsNew = isNew; - } - - [JsonProperty("login")] - public string Login { get; set; } - - [JsonProperty("password")] - public string Password { get; set; } - - [JsonProperty("register")] - public bool IsNew { get; set; } - } -} diff --git a/attic/WinUI/CentralNetwork.cs b/attic/WinUI/CentralNetwork.cs deleted file mode 100644 index 26ad5234a..000000000 --- a/attic/WinUI/CentralNetwork.cs +++ /dev/null @@ -1,48 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using Newtonsoft.Json; - -namespace WinUI -{ - class CentralNetwork - { - [JsonProperty("id")] - public string Id { get; set; } - - [JsonProperty("type")] - public string Type { get; set; } - - [JsonProperty("clock")] - public UInt64 Clock { get; set; } - - [JsonProperty("rulesSource")] - public string RulesSource { get; set; } - - [JsonProperty("description")] - public string Description { get; set; } - - [JsonProperty("ownerId")] - public string OwnerID { get; set; } - - [JsonProperty("onlineMemberCount")] - public int OnlineMemberCount { get; set; } - - [JsonProperty("config")] - public CentralNetworkConfig Config { get; set; } - - public class CentralNetworkConfig - { - [JsonProperty("id")] - public string Id { get; set; } - - [JsonProperty("nwid")] - public string NetworkID { get; set; } - - [JsonProperty("name")] - public string Name { get; set; } - } - } -} diff --git a/attic/WinUI/CentralServer.cs b/attic/WinUI/CentralServer.cs deleted file mode 100644 index 8e2686534..000000000 --- a/attic/WinUI/CentralServer.cs +++ /dev/null @@ -1,23 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using Newtonsoft.Json; - -namespace WinUI -{ - class CentralServer - { - public CentralServer() - { - ServerURL = "https://my.zerotier.com"; - } - - [JsonProperty("server_url")] - public string ServerURL { get; set; } - - [JsonProperty("api_key")] - public string APIKey { get; set; } - } -} diff --git a/attic/WinUI/CentralToken.cs b/attic/WinUI/CentralToken.cs deleted file mode 100644 index 1db548aae..000000000 --- a/attic/WinUI/CentralToken.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using Newtonsoft.Json; - -namespace WinUI -{ - class CentralToken - { - [JsonProperty("token")] - public string Token { get; set; } - - [JsonProperty("clock")] - public UInt64 Clock { get; set; } - - [JsonProperty("raw")] - public string Raw { get; set; } - } -} diff --git a/attic/WinUI/CentralUser.cs b/attic/WinUI/CentralUser.cs deleted file mode 100644 index 8a8945a28..000000000 --- a/attic/WinUI/CentralUser.cs +++ /dev/null @@ -1,51 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; -using Newtonsoft.Json; - -namespace WinUI -{ - class CentralUser - { - public class CentralGlobalPermissions - { - [JsonProperty("a")] - public bool Administrator { get; set; } - - [JsonProperty("d")] - public bool Delete { get; set; } - - [JsonProperty("m")] - public bool Modify { get; set; } - - [JsonProperty("r")] - public bool Read { get; set; } - } - - [JsonProperty("id")] - public string Id { get; set; } - - [JsonProperty("type")] - public string Type { get; set; } - - [JsonProperty("clock")] - public UInt64 Clock { get; set; } - - [JsonProperty("globalPermissions")] - public CentralGlobalPermissions GlobalPermissions { get; set; } - - [JsonProperty("displayName")] - public string DisplayName { get; set; } - - [JsonProperty("email")] - public string Email { get; set; } - - [JsonProperty("smsNumber")] - public string SmsNumber { get; set; } - - [JsonProperty("tokens")] - public List Tokens { get; set; } - } -} diff --git a/attic/WinUI/Fonts/segoeui.ttf b/attic/WinUI/Fonts/segoeui.ttf deleted file mode 100644 index fc18ebd0a..000000000 Binary files a/attic/WinUI/Fonts/segoeui.ttf and /dev/null differ diff --git a/attic/WinUI/Fonts/segoeuib.ttf b/attic/WinUI/Fonts/segoeuib.ttf deleted file mode 100644 index 5f31e0ca6..000000000 Binary files a/attic/WinUI/Fonts/segoeuib.ttf and /dev/null differ diff --git a/attic/WinUI/Fonts/segoeuii.ttf b/attic/WinUI/Fonts/segoeuii.ttf deleted file mode 100644 index 7efb70d60..000000000 Binary files a/attic/WinUI/Fonts/segoeuii.ttf and /dev/null differ diff --git a/attic/WinUI/Fonts/segoeuiz.ttf b/attic/WinUI/Fonts/segoeuiz.ttf deleted file mode 100644 index d7bb186b3..000000000 Binary files a/attic/WinUI/Fonts/segoeuiz.ttf and /dev/null differ diff --git a/attic/WinUI/ISwitchable.cs b/attic/WinUI/ISwitchable.cs deleted file mode 100644 index e485a14cb..000000000 --- a/attic/WinUI/ISwitchable.cs +++ /dev/null @@ -1,13 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Linq; -using System.Text; -using System.Threading.Tasks; - -namespace WinUI -{ - interface ISwitchable - { - void UtilizeState(object state); - } -} diff --git a/attic/WinUI/JoinNetworkView.xaml b/attic/WinUI/JoinNetworkView.xaml deleted file mode 100644 index 9898f020a..000000000 --- a/attic/WinUI/JoinNetworkView.xaml +++ /dev/null @@ -1,17 +0,0 @@ - - - - - - - - - - - - - - - - - diff --git a/attic/macui/ZeroTier One/Network.h b/attic/macui/ZeroTier One/Network.h deleted file mode 100644 index c1cfbaf28..000000000 --- a/attic/macui/ZeroTier One/Network.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import - -enum NetworkStatus { - REQUESTING_CONFIGURATION, - OK, - ACCESS_DENIED, - NOT_FOUND, - PORT_ERROR, - CLIENT_TOO_OLD, -}; - -enum NetworkType { - PUBLIC, - PRIVATE, -}; - -@interface Network : NSObject - -@property (readonly) NSArray *assignedAddresses; -@property (readonly) BOOL bridge; -@property (readonly) BOOL broadcastEnabled; -@property (readonly) BOOL dhcp; -@property (readonly) NSString *mac; -@property (readonly) int mtu; -@property (readonly) int netconfRevision; -@property (readonly) NSString *name; -@property (readonly) UInt64 nwid; -@property (readonly) NSString *portDeviceName; -@property (readonly) int portError; -@property (readonly) enum NetworkStatus status; -@property (readonly) enum NetworkType type; -@property (readonly) BOOL allowManaged; -@property (readonly) BOOL allowGlobal; -@property (readonly) BOOL allowDefault; -@property (readonly) BOOL allowDNS; -@property (readonly) BOOL connected; // not persisted. set to YES if loaded via json - -- (id)initWithJsonData:(NSDictionary*)jsonData; -- (id)initWithCoder:(NSCoder *)aDecoder; -- (void)encodeWithCoder:(NSCoder *)aCoder; -+ (BOOL)defaultRouteExists:(NSArray*)netList; -- (NSString*)statusString; -- (NSString*)typeString; - -- (BOOL)hasSameNetworkId:(UInt64)networkId; - -- (BOOL)isEqualToNetwork:(Network*)network; -- (BOOL)isEqual:(id)object; -- (NSUInteger)hash; - -@end diff --git a/attic/macui/ZeroTier One/Network.m b/attic/macui/ZeroTier One/Network.m deleted file mode 100644 index 2379eb691..000000000 --- a/attic/macui/ZeroTier One/Network.m +++ /dev/null @@ -1,352 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import "Network.h" - -NSString *NetworkAddressesKey = @"addresses"; -NSString *NetworkBridgeKey = @"bridge"; -NSString *NetworkBroadcastKey = @"broadcast"; -NSString *NetworkDhcpKey = @"dhcp"; -NSString *NetworkMacKey = @"mac"; -NSString *NetworkMtuKey = @"mtu"; -NSString *NetworkMulticastKey = @"multicast"; -NSString *NetworkNameKey = @"name"; -NSString *NetworkNetconfKey = @"netconf"; -NSString *NetworkNwidKey = @"nwid"; -NSString *NetworkPortNameKey = @"port"; -NSString *NetworkPortErrorKey = @"portError"; -NSString *NetworkStatusKey = @"status"; -NSString *NetworkTypeKey = @"type"; -NSString *NetworkAllowManagedKey = @"allowManaged"; -NSString *NetworkAllowGlobalKey = @"allowGlobal"; -NSString *NetworkAllowDefaultKey = @"allowDefault"; -NSString *NetworkAllowDNSKey = @"allowDNS"; - -@implementation Network - -- (id)initWithJsonData:(NSDictionary*)jsonData -{ - self = [super init]; - - if(self) { - if([jsonData objectForKey:@"assignedAddresses"]) { - _assignedAddresses = (NSArray*)[jsonData objectForKey:@"assignedAddresses"]; - } - - if([jsonData objectForKey:@"bridge"]) { - _bridge = [(NSNumber*)[jsonData objectForKey:@"bridge"] boolValue]; - } - - if([jsonData objectForKey:@"broadcastEnabled"]) { - _broadcastEnabled = [(NSNumber*)[jsonData objectForKey:@"broadcastEnabled"] boolValue]; - } - - if([jsonData objectForKey:@"dhcp"]) { - _dhcp = [(NSNumber*)[jsonData objectForKey:@"dhcp"] boolValue]; - } - - if([jsonData objectForKey:@"mac"]) { - _mac = (NSString*)[jsonData objectForKey:@"mac"]; - } - - if([jsonData objectForKey:@"mtu"]) { - _mtu = [(NSNumber*)[jsonData objectForKey:@"mtu"] intValue]; - } - - if([jsonData objectForKey:@"name"]) { - _name = (NSString*)[jsonData objectForKey:@"name"]; - } - - if([jsonData objectForKey:@"netconfRevision"]) { - _netconfRevision = [(NSNumber*)[jsonData objectForKey:@"netconfRevision"] intValue]; - } - - if([jsonData objectForKey:@"nwid"]) { - NSString *networkid = (NSString*)[jsonData objectForKey:@"nwid"]; - - NSScanner *scanner = [NSScanner scannerWithString:networkid]; - [scanner scanHexLongLong:&_nwid]; - } - - if([jsonData objectForKey:@"portDeviceName"]) { - _portDeviceName = (NSString*)[jsonData objectForKey:@"portDeviceName"]; - } - - if([jsonData objectForKey:@"portError"]) { - _portError = [(NSNumber*)[jsonData objectForKey:@"portError"] intValue]; - } - - if([jsonData objectForKey:@"allowManaged"]) { - _allowManaged = [(NSNumber*)[jsonData objectForKey:@"allowManaged"] boolValue]; - } - - if([jsonData objectForKey:@"allowGlobal"]) { - _allowGlobal = [(NSNumber*)[jsonData objectForKey:@"allowGlobal"] boolValue]; - } - - if([jsonData objectForKey:@"allowDefault"]) { - _allowDefault = [(NSNumber*)[jsonData objectForKey:@"allowDefault"] boolValue]; - } - if([jsonData objectForKey:@"allowDNS"]) { - _allowDNS = [(NSNumber*)[jsonData objectForKey:@"allowDNS"] boolValue]; - } else { - _allowDNS = false; - } - - if([jsonData objectForKey:@"status"]) { - NSString *statusStr = (NSString*)[jsonData objectForKey:@"status"]; - if([statusStr isEqualToString:@"REQUESTING_CONFIGURATION"]) { - _status = REQUESTING_CONFIGURATION; - } - else if([statusStr isEqualToString:@"OK"]) { - _status = OK; - } - else if([statusStr isEqualToString:@"ACCESS_DENIED"]) { - _status = ACCESS_DENIED; - } - else if([statusStr isEqualToString:@"NOT_FOUND"]) { - _status = NOT_FOUND; - } - else if([statusStr isEqualToString:@"PORT_ERROR"]) { - _status = PORT_ERROR; - } - else if([statusStr isEqualToString:@"CLIENT_TOO_OLD"]) { - _status = CLIENT_TOO_OLD; - } - } - - if([jsonData objectForKey:@"type"]) { - NSString *typeStr = (NSString*)[jsonData objectForKey:@"type"]; - if([typeStr isEqualToString:@"PRIVATE"]) { - _type = PRIVATE; - } - else if([typeStr isEqualToString:@"PUBLIC"]) { - _type = PUBLIC; - } - } - - _connected = YES; - } - - return self; -} -- (id)initWithCoder:(NSCoder *)aDecoder -{ - self = [super init]; - - if(self) { - if([aDecoder containsValueForKey:NetworkAddressesKey]) { - _assignedAddresses = (NSArray*)[aDecoder decodeObjectForKey:NetworkAddressesKey]; - } - - if([aDecoder containsValueForKey:NetworkBridgeKey]) { - _bridge = [aDecoder decodeBoolForKey:NetworkBridgeKey]; - } - - if([aDecoder containsValueForKey:NetworkBroadcastKey]) { - _broadcastEnabled = [aDecoder decodeBoolForKey:NetworkBroadcastKey]; - } - - if([aDecoder containsValueForKey:NetworkDhcpKey]) { - _dhcp = [aDecoder decodeBoolForKey:NetworkDhcpKey]; - } - - if([aDecoder containsValueForKey:NetworkMacKey]) { - _mac = (NSString*)[aDecoder decodeObjectForKey:NetworkMacKey]; - } - - if([aDecoder containsValueForKey:NetworkMtuKey]) { - _mtu = (int)[aDecoder decodeIntegerForKey:NetworkMtuKey]; - } - - if([aDecoder containsValueForKey:NetworkNameKey]) { - _name = (NSString*)[aDecoder decodeObjectForKey:NetworkNameKey]; - } - - if([aDecoder containsValueForKey:NetworkNetconfKey]) { - _netconfRevision = (int)[aDecoder decodeIntegerForKey:NetworkNetconfKey]; - } - - if([aDecoder containsValueForKey:NetworkNwidKey]) { - _nwid = [(NSNumber*)[aDecoder decodeObjectForKey:NetworkNwidKey] unsignedLongLongValue]; - } - - if([aDecoder containsValueForKey:NetworkPortNameKey]) { - _portDeviceName = (NSString*)[aDecoder decodeObjectForKey:NetworkPortNameKey]; - } - - if([aDecoder containsValueForKey:NetworkPortErrorKey]) { - _portError = (int)[aDecoder decodeIntegerForKey:NetworkPortErrorKey]; - } - - if([aDecoder containsValueForKey:NetworkStatusKey]) { - _status = (enum NetworkStatus)[aDecoder decodeIntegerForKey:NetworkStatusKey]; - } - - if([aDecoder containsValueForKey:NetworkTypeKey]) { - _type = (enum NetworkType)[aDecoder decodeIntegerForKey:NetworkTypeKey]; - } - - if([aDecoder containsValueForKey:NetworkAllowManagedKey]) { - _allowManaged = [aDecoder decodeBoolForKey:NetworkAllowManagedKey]; - } - - if([aDecoder containsValueForKey:NetworkAllowGlobalKey]) { - _allowGlobal = [aDecoder decodeBoolForKey:NetworkAllowGlobalKey]; - } - - if([aDecoder containsValueForKey:NetworkAllowDefaultKey]) { - _allowDefault = [aDecoder decodeBoolForKey:NetworkAllowDefaultKey]; - } - - if([aDecoder containsValueForKey:NetworkAllowDNSKey]) { - _allowDNS = [aDecoder decodeBoolForKey:NetworkAllowDNSKey]; - } else { - _allowDNS = false; - } - - _connected = NO; - } - - return self; -} - -- (void)encodeWithCoder:(NSCoder *)aCoder -{ - [aCoder encodeObject:_assignedAddresses forKey:NetworkAddressesKey]; - [aCoder encodeBool:_bridge forKey:NetworkBridgeKey]; - [aCoder encodeBool:_broadcastEnabled forKey:NetworkBroadcastKey]; - [aCoder encodeBool:_dhcp forKey:NetworkDhcpKey]; - [aCoder encodeObject:_mac forKey:NetworkMacKey]; - [aCoder encodeInteger:_mtu forKey:NetworkMtuKey]; - [aCoder encodeObject:_name forKey:NetworkNameKey]; - [aCoder encodeInteger:_netconfRevision forKey:NetworkNetconfKey]; - [aCoder encodeObject:[NSNumber numberWithUnsignedLongLong:_nwid] - forKey:NetworkNwidKey]; - [aCoder encodeObject:_portDeviceName forKey:NetworkPortNameKey]; - [aCoder encodeInteger:_portError forKey:NetworkPortErrorKey]; - [aCoder encodeInteger:_status forKey:NetworkStatusKey]; - [aCoder encodeInteger:_type forKey:NetworkTypeKey]; - [aCoder encodeBool:_allowManaged forKey:NetworkAllowManagedKey]; - [aCoder encodeBool:_allowGlobal forKey:NetworkAllowGlobalKey]; - [aCoder encodeBool:_allowDefault forKey:NetworkAllowDefaultKey]; - [aCoder encodeBool:_allowDNS forKey:NetworkAllowDNSKey]; -} - -+ (BOOL)defaultRouteExists:(NSArray*)netList -{ - for(Network *net in netList) { - if (net.allowDefault && net.connected) { - return YES; - } - } - return NO; -} - -- (NSString*)statusString { - switch(_status) { - case REQUESTING_CONFIGURATION: - return @"REQUESTING_CONFIGURATION"; - case OK: - return @"OK"; - case ACCESS_DENIED: - return @"ACCESS_DENIED"; - case NOT_FOUND: - return @"NOT_FOUND"; - case PORT_ERROR: - return @"PORT_ERROR"; - case CLIENT_TOO_OLD: - return @"CLIENT_TOO_OLD"; - default: - return @""; - } -} - -- (NSString*)typeString { - switch(_type) { - case PUBLIC: - return @"PUBLIC"; - case PRIVATE: - return @"PRIVATE"; - default: - return @""; - } -} - -- (BOOL)hasSameNetworkId:(UInt64)networkId -{ - return self.nwid == networkId; -} - -- (BOOL)isEqualToNetwork:(Network*)network -{ - return [self.assignedAddresses isEqualToArray:network.assignedAddresses] && - self.bridge == network.bridge && - self.broadcastEnabled == network.broadcastEnabled && - self.dhcp == network.dhcp && - [self.mac isEqualToString:network.mac] && - self.mtu == network.mtu && - self.netconfRevision == network.netconfRevision && - [self.name isEqualToString:network.name] && - self.nwid == network.nwid && - [self.portDeviceName isEqualToString:network.portDeviceName] && - self.status == network.status && - self.type == network.type && - self.allowManaged == network.allowManaged && - self.allowGlobal == network.allowGlobal && - self.allowDefault == network.allowDefault && - self.allowDNS == network.allowDNS && - self.connected == network.connected; -} - -- (BOOL)isEqual:(id)object -{ - if (self == object) { - return YES; - } - - if (![object isKindOfClass:[Network class]]) { - return NO; - } - - return [self isEqualToNetwork:object]; -} - -- (NSUInteger)hash -{ - return [self.assignedAddresses hash] ^ - self.bridge ^ - self.broadcastEnabled ^ - self.dhcp ^ - [self.mac hash] ^ - self.mtu ^ - self.netconfRevision ^ - [self.name hash] ^ - self.nwid ^ - [self.portDeviceName hash] ^ - self.portError ^ - self.status ^ - self.type ^ - self.allowManaged ^ - self.allowGlobal ^ - self.allowDefault ^ - self.allowDNS ^ - self.connected; -} - -@end diff --git a/attic/macui/ZeroTier One/NetworkInfoCell.h b/attic/macui/ZeroTier One/NetworkInfoCell.h deleted file mode 100644 index f764034ee..000000000 --- a/attic/macui/ZeroTier One/NetworkInfoCell.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import - -@class ShowNetworksViewController; - -@interface NetworkInfoCell : NSTableCellView - -@property (weak, nonatomic) ShowNetworksViewController *parent; - -@property (weak, nonatomic) IBOutlet NSTextField *networkIdField; -@property (weak, nonatomic) IBOutlet NSTextField *networkNameField; -@property (weak, nonatomic) IBOutlet NSTextField *statusField; -@property (weak, nonatomic) IBOutlet NSTextField *typeField; -@property (weak, nonatomic) IBOutlet NSTextField *macField; -@property (weak, nonatomic) IBOutlet NSTextField *mtuField; -@property (weak, nonatomic) IBOutlet NSTextField *broadcastField; -@property (weak, nonatomic) IBOutlet NSTextField *bridgingField; -@property (weak, nonatomic) IBOutlet NSTextField *deviceField; -@property (weak, nonatomic) IBOutlet NSTextField *addressesField; -@property (weak, nonatomic) IBOutlet NSButton *allowManaged; -@property (weak, nonatomic) IBOutlet NSButton *allowGlobal; -@property (weak, nonatomic) IBOutlet NSButton *allowDefault; -@property (weak, nonatomic) IBOutlet NSButton *allowDNS; -@property (weak, nonatomic) IBOutlet NSButton *connectedCheckbox; -@property (weak, nonatomic) IBOutlet NSButton *deleteButton; - -- (IBAction)onConnectCheckStateChanged:(NSButton*)sender; -- (IBAction)deleteNetwork:(NSButton*)sender; -- (IBAction)onAllowStatusChanged:(NSButton*)sender; - -- (void)joinNetwork:(NSString*)nwid; -- (void)leaveNetwork:(NSString*)nwid; - -@end diff --git a/attic/macui/ZeroTier One/NetworkInfoCell.m b/attic/macui/ZeroTier One/NetworkInfoCell.m deleted file mode 100644 index df1bbf67e..000000000 --- a/attic/macui/ZeroTier One/NetworkInfoCell.m +++ /dev/null @@ -1,86 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import "NetworkInfoCell.h" -#import "ServiceCom.h" -#import "ShowNetworksViewController.h" -#import "Network.h" - -@implementation NetworkInfoCell - -- (void)drawRect:(NSRect)dirtyRect { - [super drawRect:dirtyRect]; - - // Drawing code here. -} - -- (IBAction)onConnectCheckStateChanged:(NSButton*)sender -{ - if(sender.state == NSOnState) { - [self joinNetwork:self.networkIdField.stringValue]; - } - else { - [self leaveNetwork:self.networkIdField.stringValue]; - } -} - -- (IBAction)deleteNetwork:(NSButton*)sender; -{ - [self leaveNetwork:self.networkIdField.stringValue]; - [self.parent deleteNetworkFromList:self.networkIdField.stringValue]; -} - -- (IBAction)onAllowStatusChanged:(NSButton*)sender -{ - [self joinNetwork:self.networkIdField.stringValue]; -} - -- (void)joinNetwork:(NSString*)nwid -{ - NSError *error = nil; - [[ServiceCom sharedInstance] joinNetwork:nwid - allowManaged:(self.allowManaged.state == NSOnState) - allowGlobal:(self.allowGlobal.state == NSOnState) - allowDefault:![Network defaultRouteExists:_parent.networkList] && (self.allowDefault.state == NSOnState) - allowDNS:(self.allowDNS.state == NSOnState) - error:&error]; - - if (error) { - NSAlert *alert = [NSAlert alertWithError:error]; - alert.alertStyle = NSCriticalAlertStyle; - [alert addButtonWithTitle:@"Ok"]; - - [alert runModal]; - } -} - -- (void)leaveNetwork:(NSString*)nwid -{ - NSError *error = nil; - [[ServiceCom sharedInstance] leaveNetwork:nwid error:&error]; - - if (error) { - NSAlert *alert = [NSAlert alertWithError:error]; - alert.alertStyle = NSCriticalAlertStyle; - [alert addButtonWithTitle:@"Ok"]; - - [alert runModal]; - } -} - -@end diff --git a/attic/macui/ZeroTier One/NetworkMonitor.h b/attic/macui/ZeroTier One/NetworkMonitor.h deleted file mode 100644 index 8cdec4ed6..000000000 --- a/attic/macui/ZeroTier One/NetworkMonitor.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import - -extern NSString * const NetworkUpdateKey; -extern NSString * const StatusUpdateKey; - -@class Network; - -@interface NetworkMonitor : NSObject -{ - NSMutableArray *_savedNetworks; - NSArray *_receivedNetworks; - NSMutableArray *_allNetworks; - - NSTimer *_timer; -} - -- (id)init; -- (void)dealloc; - -- (void)start; -- (void)stop; - -- (void)updateNetworkInfo; - -- (void)deleteSavedNetwork:(NSString*)networkId; - -@end diff --git a/attic/macui/ZeroTier One/NetworkMonitor.m b/attic/macui/ZeroTier One/NetworkMonitor.m deleted file mode 100644 index 7ed22c4a9..000000000 --- a/attic/macui/ZeroTier One/NetworkMonitor.m +++ /dev/null @@ -1,253 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import "NetworkMonitor.h" -#import "Network.h" -#import "ServiceCom.h" -#import "NodeStatus.h" - -@import AppKit; - - -NSString * const NetworkUpdateKey = @"com.zerotier.one.network-list"; -NSString * const StatusUpdateKey = @"com.zerotier.one.status"; - -@interface NetworkMonitor (private) - -- (NSString*)dataFile; -- (void)internal_updateNetworkInfo; -- (NSInteger)findNetworkWithID:(UInt64)networkId; -- (NSInteger)findSavedNetworkWithID:(UInt64)networkId; -- (void)saveNetworks; - -@end - -@implementation NetworkMonitor - -- (id)init -{ - self = [super init]; - if(self) - { - _savedNetworks = [NSMutableArray array]; - _receivedNetworks = [NSArray array]; - _allNetworks = [NSMutableArray array]; - _timer = nil; - } - - return self; -} - -- (void)dealloc -{ - [_timer invalidate]; -} - -- (void)start -{ - NSLog(@"ZeroTier monitor started"); - _timer = [NSTimer scheduledTimerWithTimeInterval:1.0f - target:self - selector:@selector(updateNetworkInfo) - userInfo:nil - repeats:YES]; -} - -- (void)stop -{ - NSLog(@"ZeroTier monitor stopped"); - [_timer invalidate]; - _timer = nil; -} - -- (void)updateNetworkInfo -{ - NSString *filePath = [self dataFile]; - - if([[NSFileManager defaultManager] fileExistsAtPath:filePath]) { - NSArray *networks = [NSKeyedUnarchiver unarchiveObjectWithFile:filePath]; - - if(networks != nil) { - _savedNetworks = [networks mutableCopy]; - } - } - - NSError *error = nil; - - [[ServiceCom sharedInstance] getNetworklist:^(NSArray *networkList) { - _receivedNetworks = networkList; - - [[NSOperationQueue mainQueue] addOperationWithBlock:^{ - [self internal_updateNetworkInfo]; - } ]; - } error:&error]; - - if(error) { - [self stop]; - - NSAlert *alert = [NSAlert alertWithError:error]; - alert.alertStyle = NSCriticalAlertStyle; - [alert addButtonWithTitle:@"Quit"]; - [alert addButtonWithTitle:@"Retry"]; - - NSModalResponse res = [alert runModal]; - - if(res == NSAlertFirstButtonReturn) { - [NSApp performSelector:@selector(terminate:) withObject:nil afterDelay:0.0]; - } - else if(res == NSAlertSecondButtonReturn) { - [self start]; - return; - } - } - - [[ServiceCom sharedInstance] getNodeStatus:^(NodeStatus *status) { - NSDictionary *userInfo = [NSDictionary dictionaryWithObject:status forKey:@"status"]; - - [[NSOperationQueue mainQueue] addOperationWithBlock:^{ - [[NSNotificationCenter defaultCenter] postNotificationName:StatusUpdateKey - object:nil - userInfo:userInfo]; - }]; - } error:&error]; - - if (error) { - [self stop]; - - NSAlert *alert = [NSAlert alertWithError:error]; - alert.alertStyle = NSCriticalAlertStyle; - [alert addButtonWithTitle:@"Quit"]; - [alert addButtonWithTitle:@"Retry"]; - - NSModalResponse res = [alert runModal]; - - if(res == NSAlertFirstButtonReturn) { - [NSApp performSelector:@selector(terminate:) withObject:nil afterDelay:0.0]; - } - else if(res == NSAlertSecondButtonReturn) { - [self start]; - return; - } - } -} - -- (void)deleteSavedNetwork:(NSString*)networkId -{ - UInt64 nwid = 0; - NSScanner *scanner = [NSScanner scannerWithString:networkId]; - [scanner scanHexLongLong:&nwid]; - - NSInteger index = [self findNetworkWithID:nwid]; - - if(index != NSNotFound) { - [_allNetworks removeObjectAtIndex:index]; - } - - index = [self findSavedNetworkWithID:nwid]; - - if(index != NSNotFound) { - [_savedNetworks removeObjectAtIndex:index]; - } - - [self saveNetworks]; -} - -@end - -@implementation NetworkMonitor (private) -- (NSString*)dataFile -{ - NSURL *appSupport = [[[NSFileManager defaultManager] URLsForDirectory:NSApplicationSupportDirectory - inDomains:NSUserDomainMask] objectAtIndex:0]; - - appSupport = [[[appSupport URLByAppendingPathComponent:@"ZeroTier"] URLByAppendingPathComponent:@"One"] URLByAppendingPathComponent:@"networkinfo.dat"]; - return appSupport.path; -} - -- (void)internal_updateNetworkInfo -{ - NSMutableArray *networks = [_savedNetworks mutableCopy]; - - for(Network *nw in _receivedNetworks) { - NSInteger index = [self findSavedNetworkWithID:nw.nwid]; - - if(index != NSNotFound) { - [networks setObject:nw atIndexedSubscript:index]; - } - else { - [networks addObject:nw]; - } - } - - [networks sortUsingComparator:^NSComparisonResult(Network *obj1, Network *obj2) { - if(obj1.nwid > obj2.nwid) { - return true; - } - return false; - }]; - - @synchronized(_allNetworks) { - _allNetworks = networks; - } - - [self saveNetworks]; - - NSDictionary *userInfo = [NSDictionary dictionaryWithObject:networks forKey:@"networks"]; - - [[NSNotificationCenter defaultCenter] postNotificationName:NetworkUpdateKey - object:nil - userInfo:userInfo]; -} - -- (NSInteger)findNetworkWithID:(UInt64)networkId -{ - for(int i = 0; i < [_allNetworks count]; ++i) { - Network *nw = [_allNetworks objectAtIndex:i]; - - if(nw.nwid == networkId) { - return i; - } - } - - return NSNotFound; -} - - -- (NSInteger)findSavedNetworkWithID:(UInt64)networkId -{ - for(int i = 0; i < [_savedNetworks count]; ++i) { - Network *nw = [_savedNetworks objectAtIndex:i]; - - if(nw.nwid == networkId) { - return i; - } - } - - return NSNotFound; -} - -- (void)saveNetworks -{ - NSString *filePath = [self dataFile]; - - @synchronized(_allNetworks) { - [NSKeyedArchiver archiveRootObject:_allNetworks toFile:filePath]; - } -} - -@end diff --git a/attic/macui/ZeroTier One/NodeStatus.h b/attic/macui/ZeroTier One/NodeStatus.h deleted file mode 100644 index eab5bfe44..000000000 --- a/attic/macui/ZeroTier One/NodeStatus.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import - -@interface NodeStatus : NSObject - -@property (readonly) NSString *address; -@property (readonly) NSString *publicIdentity; -@property (readonly) BOOL online; -@property (readonly) BOOL tcpFallbackActive; -@property (readonly) int versionMajor; -@property (readonly) int versionMinor; -@property (readonly) int versionRev; -@property (readonly) NSString *version; -@property (readonly) UInt64 clock; - -- (id)initWithJsonData:(NSDictionary*)jsonData; - -@end diff --git a/attic/macui/ZeroTier One/NodeStatus.m b/attic/macui/ZeroTier One/NodeStatus.m deleted file mode 100644 index 3bae3c7da..000000000 --- a/attic/macui/ZeroTier One/NodeStatus.m +++ /dev/null @@ -1,40 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#import "NodeStatus.h" - -@implementation NodeStatus - -- (id)initWithJsonData:(NSDictionary*)jsonData -{ - self = [super init]; - - if(self) { - _address = (NSString*)[jsonData objectForKey:@"address"]; - _publicIdentity = (NSString*)[jsonData objectForKey:@"publicIdentity"]; - _online = [(NSNumber*)[jsonData objectForKey:@"online"] boolValue]; - _tcpFallbackActive = [(NSNumber*)[jsonData objectForKey:@"tcpFallbackActive"] boolValue]; - _versionMajor = [(NSNumber*)[jsonData objectForKey:@"versionMajor"] intValue]; - _versionMinor = [(NSNumber*)[jsonData objectForKey:@"versionMinor"] intValue]; - _versionRev = [(NSNumber*)[jsonData objectForKey:@"versionRev"] intValue]; - _version = (NSString*)[jsonData objectForKey:@"version"]; - _clock = [(NSNumber*)[jsonData objectForKey:@"clock"] unsignedLongLongValue]; - } - - return self; -} -@end diff --git a/attic/macui/ZeroTier One/PreferencesViewController.h b/attic/macui/ZeroTier One/PreferencesViewController.h deleted file mode 100644 index 56d0fdb82..000000000 --- a/attic/macui/ZeroTier One/PreferencesViewController.h +++ /dev/null @@ -1,31 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import - -@interface PreferencesViewController : NSViewController - -@property (nonatomic, weak) IBOutlet NSButton *startupCheckBox; - -- (IBAction)onStartupCheckBoxChanged:(NSButton*)sender; - -- (BOOL)isLaunchAtStartup; -- (LSSharedFileListItemRef)itemRefInLoginItems; -- (void)setLaunchAtLoginEnabled:(BOOL)enabled; - -@end diff --git a/attic/macui/ZeroTier One/PreferencesViewController.m b/attic/macui/ZeroTier One/PreferencesViewController.m deleted file mode 100644 index 13927fbaf..000000000 --- a/attic/macui/ZeroTier One/PreferencesViewController.m +++ /dev/null @@ -1,112 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import "PreferencesViewController.h" - -@interface PreferencesViewController () - -@end - -@implementation PreferencesViewController - -- (void)viewDidLoad { - [super viewDidLoad]; - - if([self isLaunchAtStartup]) { - self.startupCheckBox.state = NSOnState; - } - else { - self.startupCheckBox.state = NSOffState; - } -} - -- (IBAction)onStartupCheckBoxChanged:(NSButton *)sender -{ - if(sender.state == NSOnState) { - [self setLaunchAtLoginEnabled:YES]; - } - else { - [self setLaunchAtLoginEnabled:NO]; - } - -} - -- (void)setLaunchAtLoginEnabled:(BOOL)enabled -{ - LSSharedFileListRef loginItemsRef = LSSharedFileListCreate(NULL, kLSSharedFileListSessionLoginItems, NULL); - - if (enabled) { - // Add the app to the LoginItems list. - CFURLRef appUrl = (__bridge CFURLRef)[NSURL fileURLWithPath:[[NSBundle mainBundle] bundlePath]]; - LSSharedFileListItemRef itemRef = LSSharedFileListInsertItemURL(loginItemsRef, kLSSharedFileListItemLast, NULL, NULL, appUrl, NULL, NULL); - if (itemRef) CFRelease(itemRef); - } - else { - // Remove the app from the LoginItems list. - LSSharedFileListItemRef itemRef = [self itemRefInLoginItems]; - LSSharedFileListItemRemove(loginItemsRef,itemRef); - if (itemRef != nil) CFRelease(itemRef); - } -} - - -- (BOOL)isLaunchAtStartup { - // See if the app is currently in LoginItems. - LSSharedFileListItemRef itemRef = [self itemRefInLoginItems]; - // Store away that boolean. - BOOL isInList = itemRef != nil; - // Release the reference if it exists. - if (itemRef != nil) CFRelease(itemRef); - - return isInList; -} - -- (LSSharedFileListItemRef)itemRefInLoginItems { - LSSharedFileListItemRef itemRef = nil; - - NSString * appPath = [[NSBundle mainBundle] bundlePath]; - - // This will retrieve the path for the application - // For example, /Applications/test.app - CFURLRef url = (__bridge CFURLRef)[NSURL fileURLWithPath:appPath]; - - // Create a reference to the shared file list. - LSSharedFileListRef loginItems = LSSharedFileListCreate(NULL, kLSSharedFileListSessionLoginItems, NULL); - - if (loginItems) { - UInt32 seedValue; - //Retrieve the list of Login Items and cast them to - // a NSArray so that it will be easier to iterate. - NSArray *loginItemsArray = (__bridge NSArray *)LSSharedFileListCopySnapshot(loginItems, &seedValue); - for(int i = 0; i< [loginItemsArray count]; i++){ - LSSharedFileListItemRef currentItemRef = (__bridge LSSharedFileListItemRef)[loginItemsArray - objectAtIndex:i]; - //Resolve the item with URL - if (LSSharedFileListItemResolve(currentItemRef, 0, (CFURLRef*) &url, NULL) == noErr) { - NSString * urlPath = [(__bridge NSURL*)url path]; - if ([urlPath compare:appPath] == NSOrderedSame){ - itemRef = currentItemRef; - } - } - } - } - CFRelease(loginItems); - return itemRef; -} - -@end diff --git a/attic/macui/ZeroTier One/PreferencesViewController.xib b/attic/macui/ZeroTier One/PreferencesViewController.xib deleted file mode 100644 index 62aef4c03..000000000 --- a/attic/macui/ZeroTier One/PreferencesViewController.xib +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/attic/macui/ZeroTier One/ServiceCom.h b/attic/macui/ZeroTier One/ServiceCom.h deleted file mode 100644 index 17b738e4d..000000000 --- a/attic/macui/ZeroTier One/ServiceCom.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import - -@class NodeStatus; -@class Network; - -@interface ServiceCom : NSObject -{ - NSString *baseURL; - NSURLSession *session; - BOOL _isQuitting; - BOOL _resetKey; -} -+ (ServiceCom*)sharedInstance; - -- (id)init; - -- (void)getNetworklist:(void (^)(NSArray*))completionHandler error:(NSError* __autoreleasing *)error; -- (void)getNodeStatus:(void (^)(NodeStatus*))completionHandler error:(NSError*__autoreleasing*)error; -- (void)joinNetwork:(NSString*)networkId allowManaged:(BOOL)allowManaged allowGlobal:(BOOL)allowGlobal allowDefault:(BOOL)allowDefault allowDNS:(BOOL)allowDNS error:(NSError*__autoreleasing*)error; -- (void)leaveNetwork:(NSString*)networkId error:(NSError*__autoreleasing*)error; - -@end diff --git a/attic/macui/ZeroTier One/ServiceCom.m b/attic/macui/ZeroTier One/ServiceCom.m deleted file mode 100644 index 55d67741a..000000000 --- a/attic/macui/ZeroTier One/ServiceCom.m +++ /dev/null @@ -1,522 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import "ServiceCom.h" -#import "AuthtokenCopy.h" -#import "Network.h" -#import "NodeStatus.h" -@import AppKit; - -@interface ServiceCom (Private) - -- (NSString*)key; - -@end - -@implementation ServiceCom - -+ (ServiceCom*)sharedInstance { - static ServiceCom *sc = nil; - static dispatch_once_t onceToken; - dispatch_once(&onceToken, ^{ - sc = [[ServiceCom alloc] init]; - }); - return sc; -} - -- (id)init -{ - self = [super init]; - if(self) { - baseURL = @"http://127.0.0.1:9993"; - session = [NSURLSession sessionWithConfiguration:[NSURLSessionConfiguration ephemeralSessionConfiguration]]; - _isQuitting = NO; - _resetKey = NO; - } - - return self; -} - -- (NSString*)key:(NSError* __autoreleasing *)err -{ - static NSString *k = nil; - static NSUInteger resetCount = 0; - - @synchronized (self) { - if (_isQuitting) { - return @""; - } - - if (_resetKey && k != nil) { - k = nil; - ++resetCount; - NSLog(@"ResetCount: %lu", (unsigned long)resetCount); - if (resetCount > 10) { - [[NSOperationQueue mainQueue] addOperationWithBlock:^{ - NSAlert *alert = [NSAlert alertWithMessageText:@"Error obtaining Auth Token" - defaultButton:@"Quit" - alternateButton:@"Retry" - otherButton:nil - informativeTextWithFormat:@"Please ensure ZeroTier is installed correctly"]; - alert.alertStyle = NSCriticalAlertStyle; - - NSModalResponse res; - if (!_isQuitting) { - res = [alert runModal]; - } - else { - return; - } - - if(res == 1) { - _isQuitting = YES; - [NSApp performSelector:@selector(terminate:) withObject:nil afterDelay:0.0]; - } - }]; - return @""; - } - } - - if (k == nil) { - NSError *error = nil; - NSURL *appSupportDir = [[NSFileManager defaultManager] URLForDirectory:NSApplicationSupportDirectory inDomain:NSUserDomainMask appropriateForURL:nil create:false error:&error]; - - if (error) { - NSLog(@"Error: %@", error); - return @""; - } - - appSupportDir = [[appSupportDir URLByAppendingPathComponent:@"ZeroTier"] URLByAppendingPathComponent:@"One"]; - NSURL *authtokenURL = [appSupportDir URLByAppendingPathComponent:@"authtoken.secret"]; - - if (!_resetKey && [[NSFileManager defaultManager] fileExistsAtPath:[authtokenURL path]]) { - k = [NSString stringWithContentsOfURL:authtokenURL - encoding:NSUTF8StringEncoding - error:&error]; - - k = [k stringByReplacingOccurrencesOfString:@"\n" withString:@""]; - - if (error) { - NSLog(@"Error: %@", error); - k = nil; - *err = error; - return @""; - } - } - else { - _resetKey = NO; - NSURL *sysAppSupportDir = [[NSFileManager defaultManager] URLForDirectory:NSApplicationSupportDirectory inDomain:NSSystemDomainMask appropriateForURL:nil create:false error:nil]; - - sysAppSupportDir = [[sysAppSupportDir URLByAppendingPathComponent:@"ZeroTier"] URLByAppendingPathComponent:@"One"]; - NSURL *sysAuthtokenURL = [sysAppSupportDir URLByAppendingPathComponent:@"authtoken.secret"]; - - if(![[NSFileManager defaultManager] fileExistsAtPath:[sysAuthtokenURL path]]) { - - } - - [[NSFileManager defaultManager] createDirectoryAtURL:appSupportDir - withIntermediateDirectories:YES - attributes:nil - error:&error]; - - if (error) { - NSLog(@"Error: %@", error); - *err = error; - k = nil; - return @""; - } - - AuthorizationRef authRef; - OSStatus status = AuthorizationCreate(nil, nil, kAuthorizationFlagDefaults, &authRef); - - if (status != errAuthorizationSuccess) { - NSLog(@"Authorization Failed! %d", status); - - NSDictionary *userInfo = @{ - NSLocalizedDescriptionKey: NSLocalizedString(@"Couldn't create AuthorizationRef", nil), - }; - *err = [NSError errorWithDomain:@"com.zerotier.one" code:-1 userInfo:userInfo]; - - return @""; - } - - AuthorizationItem authItem; - authItem.name = kAuthorizationRightExecute; - authItem.valueLength = 0; - authItem.flags = 0; - - AuthorizationRights authRights; - authRights.count = 1; - authRights.items = &authItem; - - AuthorizationFlags authFlags = kAuthorizationFlagDefaults | - kAuthorizationFlagInteractionAllowed | - kAuthorizationFlagPreAuthorize | - kAuthorizationFlagExtendRights; - - status = AuthorizationCopyRights(authRef, &authRights, nil, authFlags, nil); - - if (status != errAuthorizationSuccess) { - NSLog(@"Authorization Failed! %d", status); - NSDictionary *userInfo = @{ - NSLocalizedDescriptionKey: NSLocalizedString(@"Couldn't copy authorization rights", nil), - }; - *err = [NSError errorWithDomain:@"com.zerotier.one" code:-1 userInfo:userInfo]; - return @""; - } - - NSString *localKey = getAdminAuthToken(authRef); - AuthorizationFree(authRef, kAuthorizationFlagDestroyRights); - - if (localKey != nil && [localKey lengthOfBytesUsingEncoding:NSUTF8StringEncoding] > 0) { - k = localKey; - - [localKey writeToURL:authtokenURL - atomically:YES - encoding:NSUTF8StringEncoding - error:&error]; - - if (error) { - NSLog(@"Error writing token to disk: %@", error); - *err = error; - } - } - } - } - - if (k == nil) { - NSDictionary *userInfo = @{ - NSLocalizedDescriptionKey: NSLocalizedString(@"Unknown error finding authorization key", nil), - }; - *err = [NSError errorWithDomain:@"com.zerotier.one" code:-1 userInfo:userInfo]; - - return @""; - } - } - return k; -} - -- (void)getNetworklist:(void (^)(NSArray *))completionHandler error:(NSError *__autoreleasing*)error -{ - NSString* key = [self key:error]; - if(*error) { - return; - } - - NSString *urlString = [[baseURL stringByAppendingString:@"/network?auth="] stringByAppendingString:key]; - - NSURL *url = [NSURL URLWithString:urlString]; - NSURLSessionDataTask *task = - [session dataTaskWithURL:url - completionHandler:^(NSData * _Nullable data, NSURLResponse * _Nullable response, NSError * _Nullable err) { - - if (err) { - [[NSOperationQueue mainQueue] addOperationWithBlock:^{ - NSAlert *alert = [NSAlert alertWithError:err]; - alert.alertStyle = NSCriticalAlertStyle; - [alert addButtonWithTitle:@"Quit"]; - [alert addButtonWithTitle:@"Retry"]; - - NSModalResponse res; - if (!_isQuitting) { - res = [alert runModal]; - } - else { - return; - } - - if(res == NSAlertFirstButtonReturn) { - [NSApp performSelector:@selector(terminate:) withObject:nil afterDelay:0.0]; - _isQuitting = YES; - } - }]; - return; - } - - NSHTTPURLResponse *httpResponse = (NSHTTPURLResponse*)response; - NSInteger status = [httpResponse statusCode]; - - NSError *err2; - - if (status == 200) { - NSArray *json = [NSJSONSerialization JSONObjectWithData:data - options:0 - error:&err2]; - if (err) { - NSLog(@"Error fetching network list: %@", err2); - - [[NSOperationQueue mainQueue] addOperationWithBlock:^{ - NSAlert *alert = [NSAlert alertWithError:err2]; - alert.alertStyle = NSCriticalAlertStyle; - [alert addButtonWithTitle:@"Quit"]; - [alert addButtonWithTitle:@"Retry"]; - - NSModalResponse res; - if (!_isQuitting) { - res = [alert runModal]; - } - else { - return; - } - - if(res == NSAlertFirstButtonReturn) { - _isQuitting = YES; - [NSApp performSelector:@selector(terminate:) withObject:nil afterDelay:0.0]; - } - }]; - return; - } - - NSMutableArray *networks = [[NSMutableArray alloc] init]; - for(NSDictionary *dict in json) { - [networks addObject:[[Network alloc] initWithJsonData:dict]]; - } - - completionHandler(networks); - } - else if (status == 401) { - self->_resetKey = YES; - } - }]; - [task resume]; -} - -- (void)getNodeStatus:(void (^)(NodeStatus*))completionHandler error:(NSError*__autoreleasing*)error -{ - NSString *key = [self key:error]; - if(*error) { - return; - } - - NSString *urlString = [[baseURL stringByAppendingString:@"/status?auth="] stringByAppendingString:key]; - - NSURL *url = [NSURL URLWithString:urlString]; - NSURLSessionDataTask *task = - [session dataTaskWithURL:url - completionHandler:^(NSData * _Nullable data, NSURLResponse * _Nullable response, NSError * _Nullable err) { - - if(err) { - [[NSOperationQueue mainQueue] addOperationWithBlock:^{ - NSAlert *alert = [NSAlert alertWithError:err]; - alert.alertStyle = NSCriticalAlertStyle; - [alert addButtonWithTitle:@"Quit"]; - [alert addButtonWithTitle:@"Retry"]; - - NSModalResponse res; - if (!_isQuitting) { - res = [alert runModal]; - } - else { - return; - } - - if(res == NSAlertFirstButtonReturn) { - [NSApp performSelector:@selector(terminate:) withObject:nil afterDelay:0.0]; - _isQuitting = YES; - } - }]; - return; - } - - NSHTTPURLResponse *httpResponse = (NSHTTPURLResponse*)response; - NSInteger status = [httpResponse statusCode]; - - NSError *err2; - if(status == 200) { - NSDictionary *json = [NSJSONSerialization JSONObjectWithData:data - options:0 - error:&err2]; - - if(err2) { - NSLog(@"Error fetching node status: %@", err2); - [[NSOperationQueue mainQueue] addOperationWithBlock:^{ - NSAlert *alert = [NSAlert alertWithError:err2]; - alert.alertStyle = NSCriticalAlertStyle; - [alert addButtonWithTitle:@"Quit"]; - [alert addButtonWithTitle:@"Retry"]; - - NSModalResponse res; - if (!_isQuitting) { - res = [alert runModal]; - } - else { - return; - } - - if(res == NSAlertFirstButtonReturn) { - [NSApp performSelector:@selector(terminate:) withObject:nil afterDelay:0.0]; - _isQuitting = YES; - } - }]; - return; - } - - NodeStatus *status = [[NodeStatus alloc] initWithJsonData:json]; - - completionHandler(status); - } - else if (status == 401) { - self->_resetKey = YES; - } - }]; - [task resume]; -} - -- (void)joinNetwork:(NSString*)networkId - allowManaged:(BOOL)allowManaged - allowGlobal:(BOOL)allowGlobal - allowDefault:(BOOL)allowDefault - allowDNS:(BOOL)allowDNS - error:(NSError *__autoreleasing*)error -{ - NSString *key = [self key:error]; - if(*error) { - return; - } - - NSString *urlString = [[[[baseURL stringByAppendingString:@"/network/"] - stringByAppendingString:networkId] - stringByAppendingString:@"?auth="] - stringByAppendingString:key]; - - NSURL *url = [NSURL URLWithString:urlString]; - - NSMutableDictionary *jsonDict = [NSMutableDictionary dictionary]; - [jsonDict setObject:[NSNumber numberWithBool:allowManaged] forKey:@"allowManaged"]; - [jsonDict setObject:[NSNumber numberWithBool:allowGlobal] forKey:@"allowGlobal"]; - [jsonDict setObject:[NSNumber numberWithBool:allowDefault] forKey:@"allowDefault"]; - [jsonDict setObject:[NSNumber numberWithBool:allowDNS] forKey:@"allowDNS"]; - - NSError *err = nil; - - NSData *json = [NSJSONSerialization dataWithJSONObject:jsonDict - options:0 - error:&err]; - - if(err) { - NSLog(@"Error creating json data: %@", err); - *error = err; - return; - } - - NSMutableURLRequest *request = [NSMutableURLRequest requestWithURL:url]; - request.HTTPMethod = @"POST"; - request.HTTPBody = json; - [request setValue:@"application/json" forHTTPHeaderField:@"Content-Type"]; - - NSURLSessionDataTask *task = - [session dataTaskWithRequest:request completionHandler:^(NSData * _Nullable data, NSURLResponse * _Nullable response, NSError * _Nullable err) { - if(err) { - NSLog(@"Error posting join request: %@", err); - [[NSOperationQueue mainQueue] addOperationWithBlock:^{ - NSAlert *alert = [NSAlert alertWithError:err]; - alert.alertStyle = NSCriticalAlertStyle; - [alert addButtonWithTitle:@"Quit"]; - [alert addButtonWithTitle:@"Retry"]; - - NSModalResponse res; - if (!_isQuitting) { - res = [alert runModal]; - } - else { - return; - } - - if(res == NSAlertFirstButtonReturn) { - [NSApp performSelector:@selector(terminate:) withObject:nil afterDelay:0.0]; - _isQuitting = YES; - } - }]; - } - - NSHTTPURLResponse *httpResponse = (NSHTTPURLResponse*)response; - NSInteger status = [httpResponse statusCode]; - - if(status == 200) { - NSLog(@"join ok"); - } - else if (status == 401) { - self->_resetKey = YES; - } - else { - NSLog(@"join error: %ld", (long)status); - } - }]; - [task resume]; -} - -- (void)leaveNetwork:(NSString*)networkId error:(NSError*__autoreleasing*)error -{ - NSString *key = [self key:error]; - if(*error) { - return; - } - - NSString *urlString = [[[[baseURL stringByAppendingString:@"/network/"] - stringByAppendingString:networkId] - stringByAppendingString:@"?auth="] - stringByAppendingString:key]; - - NSURL *url = [NSURL URLWithString:urlString]; - - NSMutableURLRequest *request = [NSMutableURLRequest requestWithURL:url]; - request.HTTPMethod = @"DELETE"; - - NSURLSessionDataTask *task = - [session dataTaskWithRequest:request completionHandler:^(NSData * _Nullable data, NSURLResponse * _Nullable response, NSError * _Nullable err) { - if(err) { - NSLog(@"Error posting delete request: %@", err); - [[NSOperationQueue mainQueue] addOperationWithBlock:^{ - NSAlert *alert = [NSAlert alertWithError:err]; - alert.alertStyle = NSCriticalAlertStyle; - [alert addButtonWithTitle:@"Quit"]; - [alert addButtonWithTitle:@"Retry"]; - - NSModalResponse res; - if (!_isQuitting) { - res = [alert runModal]; - } - else { - return; - } - - if(res == NSAlertFirstButtonReturn) { - [NSApp performSelector:@selector(terminate:) withObject:nil afterDelay:0.0]; - _isQuitting = YES; - } - }]; - return; - } - - NSHTTPURLResponse *httpResponse = (NSHTTPURLResponse*)response; - NSInteger status = httpResponse.statusCode; - - if(status == 200) { - NSLog(@"leave ok"); - } - else if (status == 401) { - self->_resetKey = YES; - } - else { - NSLog(@"leave error: %ld", status); - } - }]; - [task resume]; -} - -@end diff --git a/attic/macui/ZeroTier One/ShowNetworksViewController.h b/attic/macui/ZeroTier One/ShowNetworksViewController.h deleted file mode 100644 index 6138958d3..000000000 --- a/attic/macui/ZeroTier One/ShowNetworksViewController.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import - -@class NetworkMonitor; -@class Network; - -@interface ShowNetworksViewController : NSViewController - -@property (nonatomic) NSMutableArray *networkList; -@property (nonatomic) NetworkMonitor *netMonitor; -@property (nonatomic) BOOL visible; - -@property (weak, nonatomic) IBOutlet NSTableView *tableView; - -- (void)deleteNetworkFromList:(NSString*)nwid; -- (void)setNetworks:(NSArray*)list; - - -@end diff --git a/attic/macui/ZeroTier One/ShowNetworksViewController.m b/attic/macui/ZeroTier One/ShowNetworksViewController.m deleted file mode 100644 index acd29479a..000000000 --- a/attic/macui/ZeroTier One/ShowNetworksViewController.m +++ /dev/null @@ -1,184 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import "ShowNetworksViewController.h" -#import "NetworkMonitor.h" -#import "NetworkInfoCell.h" -#import "Network.h" - -BOOL hasNetworkWithID(NSArray *list, UInt64 nwid) -{ - for(Network *n in list) { - if(n.nwid == nwid) { - return YES; - } - } - - return NO; -} - -@interface ShowNetworksViewController () - -@end - -@implementation ShowNetworksViewController - -- (void)viewDidLoad { - [super viewDidLoad]; - - self.networkList = [NSMutableArray array]; - - [self.tableView setDelegate:self]; - [self.tableView setDataSource:self]; - [self.tableView setBackgroundColor:[NSColor clearColor]]; -} - -- (void)viewWillAppear { - [super viewWillAppear]; - self.visible = YES; -} - -- (void)viewWillDisappear { - [super viewWillDisappear]; - self.visible = NO; -} - -- (NSInteger)findNetworkWithID:(UInt64)networkId -{ - for(int i = 0; i < [_networkList count]; ++i) { - Network *nw = [_networkList objectAtIndex:i]; - - if(nw.nwid == networkId) { - return i; - } - } - - return NSNotFound; -} - - -- (void)deleteNetworkFromList:(NSString *)nwid { - [self.netMonitor deleteSavedNetwork:nwid]; - - UInt64 netid = 0; - NSScanner *scanner = [NSScanner scannerWithString:nwid]; - [scanner scanHexLongLong:&netid]; - for (Network *n in _networkList) { - if (n.nwid == netid) { - NSInteger index = [self findNetworkWithID:netid]; - - if (index != NSNotFound) { - [_networkList removeObjectAtIndex:index]; - [_tableView reloadData]; - } - } - } -} - -- (void)setNetworks:(NSArray *)list { - for (Network *n in list) { - if ([_networkList containsObject:n]) { - // don't need to do anything here. Already an identical object in the list - continue; - } - else { - // network not in the list based on equality. Did an object change? or is it a new item? - if (hasNetworkWithID(_networkList, n.nwid)) { - - for (int i = 0; i < [_networkList count]; ++i) { - Network *n2 = [_networkList objectAtIndex:i]; - if (n.nwid == n2.nwid) - { - [_networkList replaceObjectAtIndex:i withObject:n]; - [_tableView reloadDataForRowIndexes:[NSIndexSet indexSetWithIndex:i] - columnIndexes:[NSIndexSet indexSetWithIndex:0]]; - } - } - } - else { - [_networkList addObject:n]; - [_tableView reloadData]; - } - } - } -} - -- (NSInteger)numberOfRowsInTableView:(NSTableView *)tableView { - return [_networkList count]; -} - -- (NSView*)tableView:(NSTableView *)tableView viewForTableColumn:(NSTableColumn *)tableColumn row:(NSInteger)row -{ - NetworkInfoCell *cell = (NetworkInfoCell*)[tableView makeViewWithIdentifier:@"NetworkInfoCell" - owner:nil]; - Network *network = [_networkList objectAtIndex:row]; - cell.parent = self; - cell.networkIdField.stringValue = [NSString stringWithFormat:@"%10llx", network.nwid]; - cell.networkNameField.stringValue = network.name; - cell.statusField.stringValue = [network statusString]; - cell.typeField.stringValue = [network typeString]; - cell.mtuField.stringValue = [NSString stringWithFormat:@"%d", network.mtu]; - cell.macField.stringValue = network.mac; - cell.broadcastField.stringValue = network.broadcastEnabled ? @"ENABLED" : @"DISABLED"; - cell.bridgingField.stringValue = network.bridge ? @"ENABLED" : @"DISABLED"; - cell.deviceField.stringValue = network.portDeviceName; - - if(network.connected) { - cell.connectedCheckbox.state = NSOnState; - - if(network.allowDefault) { - cell.allowDefault.enabled = YES; - cell.allowDefault.state = NSOnState; - } - else { - cell.allowDefault.state = NSOffState; - - if([Network defaultRouteExists:_networkList]) { - cell.allowDefault.enabled = NO; - } - else { - cell.allowDefault.enabled = YES; - } - } - - cell.allowGlobal.enabled = YES; - cell.allowManaged.enabled = YES; - cell.allowDNS.enabled = YES; - } - else { - cell.connectedCheckbox.state = NSOffState; - cell.allowDefault.enabled = NO; - cell.allowGlobal.enabled = NO; - cell.allowManaged.enabled = NO; - cell.allowDNS.enabled = NO; - } - - cell.allowGlobal.state = network.allowGlobal ? NSOnState : NSOffState; - cell.allowManaged.state = network.allowManaged ? NSOnState : NSOffState; - cell.allowDNS.state = network.allowDNS ? NSOnState : NSOffState; - - cell.addressesField.stringValue = @""; - - for(NSString *addr in network.assignedAddresses) { - cell.addressesField.stringValue = [[cell.addressesField.stringValue stringByAppendingString:addr] stringByAppendingString:@"\n"]; - } - - return cell; -} - -@end diff --git a/attic/macui/ZeroTier One/ShowNetworksViewController.xib b/attic/macui/ZeroTier One/ShowNetworksViewController.xib deleted file mode 100644 index 485adb0c0..000000000 --- a/attic/macui/ZeroTier One/ShowNetworksViewController.xib +++ /dev/null @@ -1,394 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/attic/macui/ZeroTier One/ZeroTier One.entitlements b/attic/macui/ZeroTier One/ZeroTier One.entitlements deleted file mode 100644 index 0c67376eb..000000000 --- a/attic/macui/ZeroTier One/ZeroTier One.entitlements +++ /dev/null @@ -1,5 +0,0 @@ - - - - - diff --git a/attic/macui/ZeroTier One/ZeroTierIcon.icns b/attic/macui/ZeroTier One/ZeroTierIcon.icns deleted file mode 100644 index 17e60d587..000000000 Binary files a/attic/macui/ZeroTier One/ZeroTierIcon.icns and /dev/null differ diff --git a/attic/macui/ZeroTier One/about.html b/attic/macui/ZeroTier One/about.html deleted file mode 100644 index 09f6eb36a..000000000 --- a/attic/macui/ZeroTier One/about.html +++ /dev/null @@ -1,58 +0,0 @@ - - - - - -
-
-

Welcome to ZeroTier

-

Getting Started

-

Networks are identified by 16-digit network IDs. If someone invited you to join theirs you probably received one. If not you can create your own at my.zerotier.com or by running running your own network controller. -

Your computer is identified by a 10-digit ZeroTier address. You can find it at the top of the ZeroTier app's pull-down menu or by typing "sudo zerotier-cli info" in a terminal window. This number is unique to your system and is how network administrators can recognize you. If someone invited you to a network, give them this ID so they can authorize you to join.

-

Starting, Stopping, and Uninstalling

-

The ZeroTier service is separate from the UI app and starts on system boot. The app can be started on login or only when needed. To stop the ZeroTier service use:

-      sudo launchctl unload /Library/LaunchDaemons/com.zerotier.one.plist

- Replace "unload" with "load" to start it again.

-

ZeroTier can be uninstalled with:

-      sudo '/Library/Application Support/ZeroTier/One/uninstall.sh' -

-

For more information, visit zerotier.com.

-
- - \ No newline at end of file diff --git a/attic/macui/ZeroTier One/main.m b/attic/macui/ZeroTier One/main.m deleted file mode 100644 index 108a6bd18..000000000 --- a/attic/macui/ZeroTier One/main.m +++ /dev/null @@ -1,23 +0,0 @@ -/* - * ZeroTier One - Network Virtualization Everywhere - * Copyright (C) 2011-2016 ZeroTier, Inc. https://www.zerotier.com/ - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#import - -int main(int argc, const char * argv[]) { - return NSApplicationMain(argc, argv); -} diff --git a/attic/world/build.sh b/attic/world/build.sh index d8800cd4c..f3bcfabc9 100755 --- a/attic/world/build.sh +++ b/attic/world/build.sh @@ -1,3 +1,3 @@ #!/bin/bash -c++ -std=c++11 -I../.. -I.. -g -o mkworld ../../node/C25519.cpp ../../node/Salsa20.cpp ../../node/SHA512.cpp ../../node/Identity.cpp ../../node/Utils.cpp ../../node/InetAddress.cpp ../../osdep/OSUtils.cpp mkworld.cpp -lm +c++ -std=c++11 -I../.. -I../../ext -I.. -g -o mkworld ../../node/C25519.cpp ../../node/Salsa20.cpp ../../node/SHA512.cpp ../../node/Identity.cpp ../../node/Utils.cpp ../../node/InetAddress.cpp ../../osdep/OSUtils.cpp mkworld.cpp -lm diff --git a/attic/world/mkworld.cpp b/attic/world/mkworld.cpp index 6b9bbe8d0..ed2c499a0 100644 --- a/attic/world/mkworld.cpp +++ b/attic/world/mkworld.cpp @@ -43,7 +43,7 @@ #include #include -#include +#include #include #include #include @@ -54,7 +54,7 @@ int main(int argc,char **argv) { std::string previous,current; if ((!OSUtils::readFile("previous.c25519",previous))||(!OSUtils::readFile("current.c25519",current))) { - C25519::Pair np(C25519::generate()); + ECC::Pair np(ECC::generate()); previous = std::string(); previous.append((const char *)np.pub.data,ZT_C25519_PUBLIC_KEY_LEN); previous.append((const char *)np.priv.data,ZT_C25519_PRIVATE_KEY_LEN); @@ -68,10 +68,10 @@ int main(int argc,char **argv) fprintf(stderr,"FATAL: previous.c25519 or current.c25519 empty or invalid" ZT_EOL_S); return 1; } - C25519::Pair previousKP; + ECC::Pair previousKP; memcpy(previousKP.pub.data,previous.data(),ZT_C25519_PUBLIC_KEY_LEN); memcpy(previousKP.priv.data,previous.data() + ZT_C25519_PUBLIC_KEY_LEN,ZT_C25519_PRIVATE_KEY_LEN); - C25519::Pair currentKP; + ECC::Pair currentKP; memcpy(currentKP.pub.data,current.data(),ZT_C25519_PUBLIC_KEY_LEN); memcpy(currentKP.priv.data,current.data() + ZT_C25519_PUBLIC_KEY_LEN,ZT_C25519_PRIVATE_KEY_LEN); diff --git a/ci/Dockerfile-build.deb b/ci/Dockerfile-build.deb new file mode 100644 index 000000000..09ed717f8 --- /dev/null +++ b/ci/Dockerfile-build.deb @@ -0,0 +1,13 @@ +ARG ZT_NAME +FROM 084037375216.dkr.ecr.us-east-2.amazonaws.com/${ZT_NAME}-builder as builder +WORKDIR /work/build +COPY . . +RUN pwd +RUN ls -la . +RUN make clean +RUN make debian +RUN ls -ls /work + +FROM scratch AS export +ARG ZT_NAME +COPY --from=builder /work/*.deb ./${ZT_NAME}/ diff --git a/ci/Dockerfile-build.el6 b/ci/Dockerfile-build.el6 new file mode 100644 index 000000000..c5848c984 --- /dev/null +++ b/ci/Dockerfile-build.el6 @@ -0,0 +1,36 @@ +ARG DOCKER_ARCH +FROM --platform=linux/${DOCKER_ARCH} alpine:edge AS builder + +RUN apk update +RUN apk add curl +RUN apk add bash +RUN apk add file +RUN apk add rust +RUN apk add cargo +RUN apk add make +RUN apk add cmake +RUN apk add clang +RUN apk add openssl-dev +RUN apk add linux-headers +RUN apk add build-base +RUN apk add openssl-libs-static + +COPY . . +RUN ZT_STATIC=1 make +RUN ls -la + +ARG DOCKER_ARCH +FROM --platform=linux/${DOCKER_ARCH} centos:6 AS stage +WORKDIR /root/rpmbuild/BUILD +COPY . . +COPY --from=builder zerotier-* ./ +RUN curl https://gist.githubusercontent.com/someara/b363002ba6e57b3c474dd027d4daef85/raw/4ac5534139752fc92fbe1a53599a390214f69615/el6%2520vault --output /etc/yum.repos.d/CentOS-Base.repo +RUN uname -a +RUN yum -y install make gcc rpm-build +RUN pwd +RUN ls -la +RUN make redhat + +FROM scratch AS export +ARG ZT_NAME +COPY --from=stage /root/rpmbuild/RPMS/*/*.rpm ./${ZT_NAME}/ diff --git a/ci/Dockerfile-build.rpm b/ci/Dockerfile-build.rpm new file mode 100644 index 000000000..cab00a06d --- /dev/null +++ b/ci/Dockerfile-build.rpm @@ -0,0 +1,9 @@ +ARG ZT_NAME +FROM 084037375216.dkr.ecr.us-east-2.amazonaws.com/${ZT_NAME}-builder as builder +WORKDIR /root/rpmbuild/BUILD +COPY . . +RUN make redhat + +FROM scratch AS export +ARG ZT_NAME +COPY --from=builder /root/rpmbuild/RPMS/*/*.rpm ./${ZT_NAME}/ diff --git a/ci/Dockerfile-test.deb b/ci/Dockerfile-test.deb new file mode 100644 index 000000000..7c709a863 --- /dev/null +++ b/ci/Dockerfile-test.deb @@ -0,0 +1,13 @@ +ARG ZT_NAME +FROM 084037375216.dkr.ecr.us-east-2.amazonaws.com/${ZT_NAME}-tester +ARG BASEURL +ARG VERSION +ARG DEB_ARCH +ARG ZT_NAME +ARG DISTRO +RUN curl -s http://${BASEURL}/key.gpg -o /etc/apt/trusted.gpg.d/zerotier.gpg +RUN echo "deb [arch=${DEB_ARCH} signed-by=/etc/apt/trusted.gpg.d/zerotier.gpg] http://${BASEURL}/${DISTRO} ${ZT_NAME} main" > /etc/apt/sources.list.d/zerotier.list +RUN apt-get -qq update +RUN apt-get -qq install zerotier-one=${VERSION} + +RUN ldd $(which zerotier-cli) diff --git a/ci/Dockerfile-test.el6 b/ci/Dockerfile-test.el6 new file mode 100644 index 000000000..499667313 --- /dev/null +++ b/ci/Dockerfile-test.el6 @@ -0,0 +1,4 @@ +ARG DOCKER_ARCH +FROM --platform=linux/${DOCKER_ARCH} centos:6 +RUN printf "[C6.10-base]\nname=CentOS-6.10 - Base\nbaseurl=http://vault.epel.cloud/6.10/os/\$basearch/\ngpgcheck=1\ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6\nenabled=1\nmetadata_expire=never\n" > /etc/yum.repos.d/CentOS-Base.repo +RUN yum -y install curl diff --git a/ci/Dockerfile-test.rpm b/ci/Dockerfile-test.rpm new file mode 100644 index 000000000..6a98607b9 --- /dev/null +++ b/ci/Dockerfile-test.rpm @@ -0,0 +1,17 @@ +ARG ZT_NAME +FROM 084037375216.dkr.ecr.us-east-2.amazonaws.com/${ZT_NAME}-tester +ARG BASEURL +ARG VERSION +ARG DEB_ARCH +ARG ZT_NAME +ARG DISTRO +ARG DNF_ARCH +RUN curl -s http://${BASEURL}/key.asc -o /etc/pki/rpm-gpg/RPM-GPG-KEY-zerotier +RUN rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-zerotier +RUN rpm -q gpg-pubkey --qf '%{NAME}-%{VERSION}-%{RELEASE}\t%{SUMMARY}\n' +RUN printf "[zerotier]\nname=zerotier\nbaseurl=http://${BASEURL}/${DISTRO}/${ZT_NAME}/$basearch/\nenabled=1\ngpgcheck=1\ngpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-zerotier\n" > /etc/yum.repos.d/zerotier.repo + +# RUN yum -v repolist +RUN setarch ${DNF_ARCH} yum -y install zerotier-one-${VERSION} +RUN file $(which zerotier-cli) +RUN ldd $(which zerotier-cli) diff --git a/ci/Dockerfile.sid b/ci/Dockerfile.sid deleted file mode 100644 index 4af604aac..000000000 --- a/ci/Dockerfile.sid +++ /dev/null @@ -1,7 +0,0 @@ -FROM registry.sean.farm/sid-builder as stage -COPY . . -RUN /usr/bin/make -j 8 - -FROM scratch AS export -COPY --from=stage /zerotier-one . -COPY --from=stage /zerotier-cli . diff --git a/ci/scripts/build.sh b/ci/scripts/build.sh index e0b6ce6b7..a4b8ca94b 100755 --- a/ci/scripts/build.sh +++ b/ci/scripts/build.sh @@ -2,27 +2,48 @@ set -euo pipefail IFS=$'\n\t' -export GOOS=$1 -export GOARCH=$2 -export VERSION=$3 -export DOCKER_BUILDKIT=1 +ZT_NAME="$1" ; shift +DISTRO="$1" ; shift +ZT_ISA="$1" ; shift +VERSION="$1" ; shift +BUILD_EVENT="$1" ; shift +source "$(dirname $0)/lib.sh" + +if [ -f "ci/Dockerfile-build.${ZT_NAME}" ]; then + DOCKERFILE="ci/Dockerfile-build.${ZT_NAME}" +else + DOCKERFILE="ci/Dockerfile-build.${PKGFMT}" +fi + +echo "#~~~~~~~~~~~~~~~~~~~~" +echo "$0 variables:" echo "nproc: $(nproc)" +echo "ZT_NAME: ${ZT_NAME}" +echo "DISTRO: ${DISTRO}" +echo "ZT_ISA: ${ZT_ISA}" +echo "VERSION: ${VERSION}" +echo "BUILD_EVENT: ${BUILD_EVENT}" +echo "DOCKER_ARCH: ${DOCKER_ARCH}" +echo "DNF_ARCH: ${DNF_ARCH}" +echo "RUST_TRIPLET: ${RUST_TRIPLET}" +echo "PKGFMT: ${PKGFMT}" +echo "PWD: ${PWD}" +echo "DOCKERFILE: ${DOCKERFILE}" +echo "#~~~~~~~~~~~~~~~~~~~~" -case $GOARCH in - armv5) - export ARCH=arm/v5 - ;; - armv7) - export ARCH=arm/v7 - ;; - arm64) - export ARCH=arm64/v8 - ;; - *) - export ARCH=$GOARCH - ;; -esac +make munge_rpm zerotier-one.spec VERSION=${VERSION} +make munge_deb debian/changelog VERSION=${VERSION} -docker run --privileged --rm tonistiigi/binfmt --install all -docker buildx build --platform ${GOOS}/${ARCH} -f ci/Dockerfile.sid --target export -t test . --output out/${GOOS}/${GOARCH} +docker buildx build \ + --no-cache=true \ + --build-arg ZT_NAME="${ZT_NAME}" \ + --build-arg RUST_TRIPLET="${RUST_TRIPLET}" \ + --build-arg DOCKER_ARCH="${DOCKER_ARCH}" \ + --build-arg DNF_ARCH="${DNF_ARCH}" \ + --platform linux/${DOCKER_ARCH} \ + -f ${DOCKERFILE} \ + -t build \ + . \ + --output type=local,dest=. \ + --target export diff --git a/ci/scripts/lib.sh b/ci/scripts/lib.sh new file mode 100755 index 000000000..43c35762b --- /dev/null +++ b/ci/scripts/lib.sh @@ -0,0 +1,63 @@ + +case $ZT_NAME in + el*|fc*|amzn*) + export PKGFMT=rpm + ;; + *) + export PKGFMT=deb +esac + +case $ZT_ISA in + 386) + export DOCKER_ARCH=386 + export DEB_ARCH=i386 + export DNF_ARCH=i686 + export RUST_TRIPLET=i686-unknown-linux-gnu + ;; + amd64) + export DOCKER_ARCH=amd64 + export DEB_ARCH=amd64 + export DNF_ARCH=x86_64 + export RUST_TRIPLET=x86_64-unknown-linux-gnu + ;; + armv7) + export DOCKER_ARCH=arm/v7 + export DNF_ARCH=armv7 + export DEB_ARCH=armhf + export RUST_TRIPLET=armv7-unknown-linux-gnueabihf + ;; + arm64) + export DOCKER_ARCH=arm64/v8 + export DEB_ARCH=arm64 + export DNF_ARCH=linux64 + export RUST_TRIPLET=aarch64-unknown-linux-gnu + ;; + riscv64) + export DOCKER_ARCH=riscv64 + export DEB_ARCH=riscv64 + export DNF_ARCH=riscv64 + export RUST_TRIPLET=riscv64gc-unknown-linux-gnu + ;; + ppc64le) + export DOCKER_ARCH=ppc64le + export DEB_ARCH=ppc64el + export DNF_ARCH=ppc64le + export RUST_TRIPLET=powerpc64le-unknown-linux-gnu + ;; + mips64le) + export DOCKER_ARCH=mips64le + export DEB_ARCH=mips64le + export DNF_ARCH=mips64le + export RUST_TRIPLET=mips64el-unknown-linux-gnuabi64 + ;; + s390x) + export DOCKER_ARCH=s390x + export DEB_ARCH=s390x + export DNF_ARCH=s390x + export RUST_TRIPLET=s390x-unknown-linux-gnu + ;; + *) + echo "ERROR: could not determine architecture settings. PLEASE FIX ME" + exit 1 + ;; +esac diff --git a/ci/scripts/munge_debian_changelog.sh b/ci/scripts/munge_debian_changelog.sh new file mode 100755 index 000000000..d37cb0638 --- /dev/null +++ b/ci/scripts/munge_debian_changelog.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -euo pipefail +IFS=$'\n\t' + +export FILE=$1 +export VERSION=$2 +export NAME=$3 +export MESSAGE=$4 +export DATE=$(date "+%a, %d %b %Y %T %z") +# export DATE=$(date "+%a %b %d %Y") + +set +e +grep --version | grep BSD &> /dev/null +if [ $? == 0 ]; then BSDGREP=true ; else BSDGREP=false ; fi +set -e + +# echo "#~~~~~~~~~~~~~~~~~~~~" +# echo "$0 variables:" +# echo "VERSION: ${VERSION}" +# echo "NAME: ${NAME}" +# echo "MESSAGE: ${MESSAGE}" +# echo "DATE: ${DATE}" +# echo "BSDGREP: ${BSDGREP}" +# echo "#~~~~~~~~~~~~~~~~~~~~" +# echo + +if $BSDGREP ; then + sed -i '' s/^Version:.*/"Version: ${VERSION}"/ ${FILE} +else + sed -i s/^Version:.*/"Version: ${VERSION}"/ ${FILE} +fi + +awk -v version=${VERSION} -v date=${DATE} -v name=${NAME} -v message=${MESSAGE} \ + 'BEGIN{print "zerotier-one (" version ") stable; urgency=medium\n\n * " message "\n\n -- " name " " date "\n" }{ print }' \ + ${FILE} > ${FILE}.new + +mv ${FILE}.new ${FILE} diff --git a/ci/scripts/munge_rpm_spec.sh b/ci/scripts/munge_rpm_spec.sh new file mode 100755 index 000000000..289df1ed4 --- /dev/null +++ b/ci/scripts/munge_rpm_spec.sh @@ -0,0 +1,36 @@ +#!/bin/bash +set -euo pipefail +IFS=$'\n\t' + +export FILE=$1 +export VERSION=$2 +export NAME=$3 +export MESSAGE=$4 +export DATE=$(date "+%a %b %d %Y") + +set +e +grep --version | grep BSD &> /dev/null +if [ $? == 0 ]; then BSDGREP=true ; else BSDGREP=false ; fi +set -e + +# echo "#~~~~~~~~~~~~~~~~~~~~" +# echo "$0 variables:" +# echo "VERSION: ${VERSION}" +# echo "NAME: ${NAME}" +# echo "MESSAGE: ${MESSAGE}" +# echo "DATE: ${DATE}" +# echo "BSDGREP: ${BSDGREP}" +# echo "#~~~~~~~~~~~~~~~~~~~~" +# echo + +if $BSDGREP ; then + sed -i '' s/^Version:.*/"Version: ${VERSION}"/ ${FILE} +else + sed -i s/^Version:.*/"Version: ${VERSION}"/ ${FILE} +fi + +awk -v version=${VERSION} -v date=${DATE} -v name=${NAME} -v message=${MESSAGE} \ + 'FNR==NR{ if (/%changelog/) p=NR; next} 1; FNR==p{ print "* " date " " name " - " version "\n- " message "\n" }' \ + ${FILE} ${FILE} > ${FILE}.new + +mv ${FILE}.new ${FILE} diff --git a/ci/scripts/publish.sh b/ci/scripts/publish.sh new file mode 100755 index 000000000..fa1e2468a --- /dev/null +++ b/ci/scripts/publish.sh @@ -0,0 +1,38 @@ +#!/bin/bash +set -euo pipefail +IFS=$'\n\t' + +ZT_NAME="$1" ; shift +DISTRO="$1" ; shift +ZT_ISA="$1" ; shift +VERSION="$1" ; shift +BUILD_EVENT="$1" ; shift + +source "$(dirname $0)/lib.sh" + +if [ ${BUILD_EVENT} == "tag" ]; then + CHANNEL="zerotier-releases" +else + CHANNEL="zerotier-builds" +fi + +function publish_rpm { + mkdir -p /${CHANNEL}/${DISTRO} + ls -la /${CHANNEL} + ls -la . + cp -a ${ZT_NAME} /${CHANNEL}/${DISTRO} +} + +function publish_deb { + mkdir -p /${CHANNEL}/${DISTRO}/pool/dists/${ZT_NAME}/main + cp -a ${ZT_NAME}/* /${CHANNEL}/${DISTRO}/pool/dists/${ZT_NAME}/main +} + +case ${PKGFMT} in + "rpm") + publish_rpm + ;; + "deb") + publish_deb +esac + diff --git a/ci/scripts/test.sh b/ci/scripts/test.sh new file mode 100755 index 000000000..931e9bb91 --- /dev/null +++ b/ci/scripts/test.sh @@ -0,0 +1,55 @@ +#!/bin/bash +set -euo pipefail +IFS=$'\n\t' + +ZT_NAME="$1" ; shift +DISTRO="$1" ; shift +ZT_ISA="$1" ; shift +VERSION="$1" ; shift +BUILD_EVENT="$1" ; shift + +source "$(dirname $0)/lib.sh" + +if [ -f "ci/Dockerfile-test.${ZT_NAME}" ]; then + DOCKERFILE="ci/Dockerfile-test.${ZT_NAME}" +else + DOCKERFILE="ci/Dockerfile-test.${PKGFMT}" +fi + +if [ ${BUILD_EVENT} == "tag" ]; then + BASEURL="zerotier-releases.home.arpa" +else + BASEURL="zerotier-builds.home.arpa" +fi + +echo "#~~~~~~~~~~~~~~~~~~~~" +echo "$0 variables:" +echo "nproc: $(nproc)" +echo "ZT_NAME: ${ZT_NAME}" +echo "DISTRO: ${DISTRO}" +echo "ZT_ISA: ${ZT_ISA}" +echo "VERSION: ${VERSION}" +echo "BUILD_EVENT: ${BUILD_EVENT}" +echo "DOCKER_ARCH: ${DOCKER_ARCH}" +echo "DNF_ARCH: ${DNF_ARCH}" +echo "RUST_TRIPLET: ${RUST_TRIPLET}" +echo "PKGFMT: ${PKGFMT}" +echo "PWD: ${PWD}" +echo "DOCKERFILE: ${DOCKERFILE}" +echo "#~~~~~~~~~~~~~~~~~~~~" + +# docker pull -q --platform="linux/${DOCKER_ARCH}" 084037375216.dkr.ecr.us-east-2.amazonaws.com/${ZT_NAME}-tester + +docker buildx build \ + --build-arg BASEURL="${BASEURL}" \ + --build-arg ZT_NAME="${ZT_NAME}" \ + --build-arg DISTRO="${DISTRO}" \ + --build-arg DEB_ARCH="${DEB_ARCH}" \ + --build-arg DNF_ARCH="${DNF_ARCH}" \ + --build-arg VERSION="${VERSION}" \ + --build-arg DOCKER_ARCH="${DOCKER_ARCH}" \ + --platform "linux/${DOCKER_ARCH}" \ + --no-cache \ + -f ${DOCKERFILE} \ + -t test \ + . diff --git a/controller/CV1.cpp b/controller/CV1.cpp new file mode 100644 index 000000000..95d85a280 --- /dev/null +++ b/controller/CV1.cpp @@ -0,0 +1,2092 @@ +/* + * Copyright (c)2019 ZeroTier, Inc. + * + * Use of this software is governed by the Business Source License included + * in the LICENSE.TXT file in the project's root directory. + * + * Change Date: 2026-01-01 + * + * On the date above, in accordance with the Business Source License, use + * of this software will be governed by version 2.0 of the Apache License. + */ +/****/ + +#include "CV1.hpp" + +#ifdef ZT_CONTROLLER_USE_LIBPQ + +#include "../node/Constants.hpp" +#include "../node/SHA512.hpp" +#include "../version.h" +#include "CtlUtil.hpp" +#include "EmbeddedNetworkController.hpp" +#include "Redis.hpp" +#include "opentelemetry/trace/provider.h" + +#include +#include +#include +#include +#include +#include + +// #define REDIS_TRACE 1 + +using json = nlohmann::json; + +namespace { + +static const int DB_MINIMUM_VERSION = 38; + +} // anonymous namespace + +using namespace ZeroTier; + +using Attrs = std::vector >; +using Item = std::pair; +using ItemStream = std::vector; + +CV1::CV1(const Identity& myId, const char* path, int listenPort, RedisConfig* rc) + : DB() + , _pool() + , _myId(myId) + , _myAddress(myId.address()) + , _ready(0) + , _connected(1) + , _run(1) + , _waitNoticePrinted(false) + , _listenPort(listenPort) + , _rc(rc) + , _redis(NULL) + , _cluster(NULL) + , _redisMemberStatus(false) + , _smee(NULL) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::CV1"); + auto scope = tracer->WithActiveSpan(span); + + char myAddress[64]; + _myAddressStr = myId.address().toString(myAddress); + _connString = std::string(path); + auto f = std::make_shared(_connString); + _pool = std::make_shared >(15, 5, std::static_pointer_cast(f)); + + memset(_ssoPsk, 0, sizeof(_ssoPsk)); + char* const ssoPskHex = getenv("ZT_SSO_PSK"); +#ifdef ZT_TRACE + fprintf(stderr, "ZT_SSO_PSK: %s\n", ssoPskHex); +#endif + if (ssoPskHex) { + // SECURITY: note that ssoPskHex will always be null-terminated if libc actually + // returns something non-NULL. If the hex encodes something shorter than 48 bytes, + // it will be padded at the end with zeroes. If longer, it'll be truncated. + Utils::unhex(ssoPskHex, _ssoPsk, sizeof(_ssoPsk)); + } + const char* redisMemberStatus = getenv("ZT_REDIS_MEMBER_STATUS"); + if (redisMemberStatus && (strcmp(redisMemberStatus, "true") == 0)) { + _redisMemberStatus = true; + fprintf(stderr, "Using redis for member status\n"); + } + + auto c = _pool->borrow(); + pqxx::work txn { *c->c }; + + pqxx::row r { txn.exec1("SELECT version FROM ztc_database") }; + int dbVersion = r[0].as(); + txn.commit(); + + if (dbVersion < DB_MINIMUM_VERSION) { + fprintf(stderr, "Central database schema version too low. This controller version requires a minimum schema version of %d. Please upgrade your Central instance", DB_MINIMUM_VERSION); + exit(1); + } + _pool->unborrow(c); + + if (_rc != NULL) { + auto innerspan = tracer->StartSpan("cv1::CV1::configureRedis"); + auto innerscope = tracer->WithActiveSpan(innerspan); + + sw::redis::ConnectionOptions opts; + sw::redis::ConnectionPoolOptions poolOpts; + opts.host = _rc->hostname; + opts.port = _rc->port; + opts.password = _rc->password; + opts.db = 0; + opts.keep_alive = true; + opts.connect_timeout = std::chrono::seconds(3); + poolOpts.size = 25; + poolOpts.wait_timeout = std::chrono::seconds(5); + poolOpts.connection_lifetime = std::chrono::minutes(3); + poolOpts.connection_idle_time = std::chrono::minutes(1); + if (_rc->clusterMode) { + innerspan->SetAttribute("cluster_mode", "true"); + fprintf(stderr, "Using Redis in Cluster Mode\n"); + _cluster = std::make_shared(opts, poolOpts); + } + else { + innerspan->SetAttribute("cluster_mode", "false"); + fprintf(stderr, "Using Redis in Standalone Mode\n"); + _redis = std::make_shared(opts, poolOpts); + } + } + + _readyLock.lock(); + + fprintf(stderr, "[%s] NOTICE: %.10llx controller PostgreSQL waiting for initial data download..." ZT_EOL_S, ::_timestr(), (unsigned long long)_myAddress.toInt()); + _waitNoticePrinted = true; + + initializeNetworks(); + initializeMembers(); + + _heartbeatThread = std::thread(&CV1::heartbeat, this); + _membersDbWatcher = std::thread(&CV1::membersDbWatcher, this); + _networksDbWatcher = std::thread(&CV1::networksDbWatcher, this); + for (int i = 0; i < ZT_CENTRAL_CONTROLLER_COMMIT_THREADS; ++i) { + _commitThread[i] = std::thread(&CV1::commitThread, this); + } + _onlineNotificationThread = std::thread(&CV1::onlineNotificationThread, this); + + configureSmee(); +} + +CV1::~CV1() +{ + if (_smee != NULL) { + smeeclient::smee_client_delete(_smee); + _smee = NULL; + } + + _run = 0; + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + + _heartbeatThread.join(); + _membersDbWatcher.join(); + _networksDbWatcher.join(); + _commitQueue.stop(); + for (int i = 0; i < ZT_CENTRAL_CONTROLLER_COMMIT_THREADS; ++i) { + _commitThread[i].join(); + } + _onlineNotificationThread.join(); +} + +void CV1::configureSmee() +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::configureSmee"); + auto scope = tracer->WithActiveSpan(span); + + const char* TEMPORAL_SCHEME = "ZT_TEMPORAL_SCHEME"; + const char* TEMPORAL_HOST = "ZT_TEMPORAL_HOST"; + const char* TEMPORAL_PORT = "ZT_TEMPORAL_PORT"; + const char* TEMPORAL_NAMESPACE = "ZT_TEMPORAL_NAMESPACE"; + const char* SMEE_TASK_QUEUE = "ZT_SMEE_TASK_QUEUE"; + + const char* scheme = getenv(TEMPORAL_SCHEME); + if (scheme == NULL) { + scheme = "http"; + } + const char* host = getenv(TEMPORAL_HOST); + const char* port = getenv(TEMPORAL_PORT); + const char* ns = getenv(TEMPORAL_NAMESPACE); + const char* task_queue = getenv(SMEE_TASK_QUEUE); + + if (scheme != NULL && host != NULL && port != NULL && ns != NULL && task_queue != NULL) { + fprintf(stderr, "creating smee client\n"); + std::string hostPort = std::string(scheme) + std::string("://") + std::string(host) + std::string(":") + std::string(port); + this->_smee = smeeclient::smee_client_new(hostPort.c_str(), ns, task_queue); + } + else { + fprintf(stderr, "Smee client not configured\n"); + } +} + +bool CV1::waitForReady() +{ + while (_ready < 2) { + _readyLock.lock(); + _readyLock.unlock(); + } + return true; +} + +bool CV1::isReady() +{ + return ((_ready == 2) && (_connected)); +} + +bool CV1::save(nlohmann::json& record, bool notifyListeners) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::save"); + auto scope = tracer->WithActiveSpan(span); + + bool modified = false; + try { + if (! record.is_object()) { + fprintf(stderr, "record is not an object?!?\n"); + return false; + } + const std::string objtype = record["objtype"]; + if (objtype == "network") { + // fprintf(stderr, "network save\n"); + const uint64_t nwid = OSUtils::jsonIntHex(record["id"], 0ULL); + if (nwid) { + nlohmann::json old; + get(nwid, old); + if ((! old.is_object()) || (! _compareRecords(old, record))) { + record["revision"] = OSUtils::jsonInt(record["revision"], 0ULL) + 1ULL; + _commitQueue.post(std::pair(record, notifyListeners)); + modified = true; + } + } + } + else if (objtype == "member") { + std::string networkId = record["nwid"]; + std::string memberId = record["id"]; + const uint64_t nwid = OSUtils::jsonIntHex(record["nwid"], 0ULL); + const uint64_t id = OSUtils::jsonIntHex(record["id"], 0ULL); + // fprintf(stderr, "member save %s-%s\n", networkId.c_str(), memberId.c_str()); + if ((id) && (nwid)) { + nlohmann::json network, old; + get(nwid, network, id, old); + if ((! old.is_object()) || (! _compareRecords(old, record))) { + // fprintf(stderr, "commit queue post\n"); + record["revision"] = OSUtils::jsonInt(record["revision"], 0ULL) + 1ULL; + _commitQueue.post(std::pair(record, notifyListeners)); + modified = true; + } + else { + // fprintf(stderr, "no change\n"); + } + } + } + else { + fprintf(stderr, "uhh waaat\n"); + } + } + catch (std::exception& e) { + fprintf(stderr, "Error on PostgreSQL::save: %s\n", e.what()); + } + catch (...) { + fprintf(stderr, "Unknown error on PostgreSQL::save\n"); + } + return modified; +} + +void CV1::eraseNetwork(const uint64_t networkId) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::eraseNetwork"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + + fprintf(stderr, "PostgreSQL::eraseNetwork\n"); + char tmp2[24]; + waitForReady(); + Utils::hex(networkId, tmp2); + std::pair tmp; + tmp.first["id"] = tmp2; + tmp.first["objtype"] = "_delete_network"; + tmp.second = true; + _commitQueue.post(tmp); + nlohmann::json nullJson; + _networkChanged(tmp.first, nullJson, true); +} + +void CV1::eraseMember(const uint64_t networkId, const uint64_t memberId) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::eraseMember"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex10(memberId, memberIdStr)); + + fprintf(stderr, "PostgreSQL::eraseMember\n"); + char tmp2[24]; + waitForReady(); + std::pair tmp, nw; + Utils::hex(networkId, tmp2); + tmp.first["nwid"] = tmp2; + Utils::hex(memberId, tmp2); + tmp.first["id"] = tmp2; + tmp.first["objtype"] = "_delete_member"; + tmp.second = true; + _commitQueue.post(tmp); + nlohmann::json nullJson; + _memberChanged(tmp.first, nullJson, true); +} + +void CV1::nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress, const char* osArch) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::nodeIsOnline"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + char ipStr[INET6_ADDRSTRLEN]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex10(memberId, memberIdStr)); + span->SetAttribute("physical_address", physicalAddress.toString(ipStr)); + span->SetAttribute("os_arch", osArch); + + std::lock_guard l(_lastOnline_l); + NodeOnlineRecord& i = _lastOnline[std::pair(networkId, memberId)]; + i.lastSeen = OSUtils::now(); + if (physicalAddress) { + i.physicalAddress = physicalAddress; + } + i.osArch = std::string(osArch); +} + +void CV1::nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress) +{ + this->nodeIsOnline(networkId, memberId, physicalAddress, "unknown/unknown"); +} + +AuthInfo CV1::getSSOAuthInfo(const nlohmann::json& member, const std::string& redirectURL) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::getSSOAuthInfo"); + auto scope = tracer->WithActiveSpan(span); + + Metrics::db_get_sso_info++; + // NONCE is just a random character string. no semantic meaning + // state = HMAC SHA384 of Nonce based on shared sso key + // + // need nonce timeout in database? make sure it's used within X time + // X is 5 minutes for now. Make configurable later? + // + // how do we tell when a nonce is used? if auth_expiration_time is set + std::string networkId = member["nwid"]; + std::string memberId = member["id"]; + + char authenticationURL[4096] = { 0 }; + AuthInfo info; + info.enabled = true; + + // if (memberId == "a10dccea52" && networkId == "8056c2e21c24673d") { + // fprintf(stderr, "invalid authinfo for grant's machine\n"); + // info.version=1; + // return info; + // } + // fprintf(stderr, "PostgreSQL::updateMemberOnLoad: %s-%s\n", networkId.c_str(), memberId.c_str()); + std::shared_ptr c; + try { + c = _pool->borrow(); + pqxx::work w(*c->c); + + char nonceBytes[16] = { 0 }; + std::string nonce = ""; + + // check if the member exists first. + pqxx::row count = w.exec_params1("SELECT count(id) FROM ztc_member WHERE id = $1 AND network_id = $2 AND deleted = false", memberId, networkId); + if (count[0].as() == 1) { + // get active nonce, if exists. + pqxx::result r = w.exec_params( + "SELECT nonce FROM ztc_sso_expiry " + "WHERE network_id = $1 AND member_id = $2 " + "AND ((NOW() AT TIME ZONE 'UTC') <= authentication_expiry_time) AND ((NOW() AT TIME ZONE 'UTC') <= nonce_expiration)", + networkId, + memberId); + + if (r.size() == 0) { + // no active nonce. + // find an unused nonce, if one exists. + pqxx::result r = w.exec_params( + "SELECT nonce FROM ztc_sso_expiry " + "WHERE network_id = $1 AND member_id = $2 " + "AND authentication_expiry_time IS NULL AND ((NOW() AT TIME ZONE 'UTC') <= nonce_expiration)", + networkId, + memberId); + + if (r.size() == 1) { + // we have an existing nonce. Use it + nonce = r.at(0)[0].as(); + Utils::unhex(nonce.c_str(), nonceBytes, sizeof(nonceBytes)); + } + else if (r.empty()) { + // create a nonce + Utils::getSecureRandom(nonceBytes, 16); + char nonceBuf[64] = { 0 }; + Utils::hex(nonceBytes, sizeof(nonceBytes), nonceBuf); + nonce = std::string(nonceBuf); + + pqxx::result ir = w.exec_params0( + "INSERT INTO ztc_sso_expiry " + "(nonce, nonce_expiration, network_id, member_id) VALUES " + "($1, TO_TIMESTAMP($2::double precision/1000), $3, $4)", + nonce, + OSUtils::now() + 300000, + networkId, + memberId); + + w.commit(); + } + else { + // > 1 ?!? Thats an error! + fprintf(stderr, "> 1 unused nonce!\n"); + exit(6); + } + } + else if (r.size() == 1) { + nonce = r.at(0)[0].as(); + Utils::unhex(nonce.c_str(), nonceBytes, sizeof(nonceBytes)); + } + else { + // more than 1 nonce in use? Uhhh... + fprintf(stderr, "> 1 nonce in use for network member?!?\n"); + exit(7); + } + + r = w.exec_params( + "SELECT oc.client_id, oc.authorization_endpoint, oc.issuer, oc.provider, oc.sso_impl_version " + "FROM ztc_network AS n " + "INNER JOIN ztc_org o " + " ON o.owner_id = n.owner_id " + "LEFT OUTER JOIN ztc_network_oidc_config noc " + " ON noc.network_id = n.id " + "LEFT OUTER JOIN ztc_oidc_config oc " + " ON noc.client_id = oc.client_id AND oc.org_id = o.org_id " + "WHERE n.id = $1 AND n.sso_enabled = true", + networkId); + + std::string client_id = ""; + std::string authorization_endpoint = ""; + std::string issuer = ""; + std::string provider = ""; + uint64_t sso_version = 0; + + if (r.size() == 1) { + client_id = r.at(0)[0].as >().value_or(""); + authorization_endpoint = r.at(0)[1].as >().value_or(""); + issuer = r.at(0)[2].as >().value_or(""); + provider = r.at(0)[3].as >().value_or(""); + sso_version = r.at(0)[4].as >().value_or(1); + } + else if (r.size() > 1) { + fprintf(stderr, "ERROR: More than one auth endpoint for an organization?!?!? NetworkID: %s\n", networkId.c_str()); + } + else { + fprintf(stderr, "No client or auth endpoint?!?\n"); + } + + info.version = sso_version; + + // no catch all else because we don't actually care if no records exist here. just continue as normal. + if ((! client_id.empty()) && (! authorization_endpoint.empty())) { + uint8_t state[48]; + HMACSHA384(_ssoPsk, nonceBytes, sizeof(nonceBytes), state); + char state_hex[256]; + Utils::hex(state, 48, state_hex); + + if (info.version == 0) { + char url[2048] = { 0 }; + OSUtils::ztsnprintf( + url, + sizeof(authenticationURL), + "%s?response_type=id_token&response_mode=form_post&scope=openid+email+profile&redirect_uri=%s&nonce=%s&state=%s&client_id=%s", + authorization_endpoint.c_str(), + url_encode(redirectURL).c_str(), + nonce.c_str(), + state_hex, + client_id.c_str()); + info.authenticationURL = std::string(url); + } + else if (info.version == 1) { + info.ssoClientID = client_id; + info.issuerURL = issuer; + info.ssoProvider = provider; + info.ssoNonce = nonce; + info.ssoState = std::string(state_hex) + "_" + networkId; + info.centralAuthURL = redirectURL; +#ifdef ZT_DEBUG + fprintf( + stderr, + "ssoClientID: %s\nissuerURL: %s\nssoNonce: %s\nssoState: %s\ncentralAuthURL: %s\nprovider: %s\n", + info.ssoClientID.c_str(), + info.issuerURL.c_str(), + info.ssoNonce.c_str(), + info.ssoState.c_str(), + info.centralAuthURL.c_str(), + provider.c_str()); +#endif + } + } + else { + fprintf(stderr, "client_id: %s\nauthorization_endpoint: %s\n", client_id.c_str(), authorization_endpoint.c_str()); + } + } + + _pool->unborrow(c); + } + catch (std::exception& e) { + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "ERROR: Error updating member on load for network %s: %s\n", networkId.c_str(), e.what()); + } + + return info; // std::string(authenticationURL); +} + +void CV1::initializeNetworks() +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::initializeNetworks"); + auto scope = tracer->WithActiveSpan(span); + + try { + std::string setKey = "networks:{" + _myAddressStr + "}"; + + fprintf(stderr, "Initializing Networks...\n"); + + if (_redisMemberStatus) { + fprintf(stderr, "Init Redis for networks...\n"); + try { + if (_rc->clusterMode) { + _cluster->del(setKey); + } + else { + _redis->del(setKey); + } + } + catch (sw::redis::Error& e) { + // ignore. if this key doesn't exist, there's no reason to delete it + } + } + + std::unordered_set networkSet; + + char qbuf[2048] = { 0 }; + sprintf( + qbuf, + "SELECT n.id, (EXTRACT(EPOCH FROM n.creation_time AT TIME ZONE 'UTC')*1000)::bigint as creation_time, n.capabilities, " + "n.enable_broadcast, (EXTRACT(EPOCH FROM n.last_modified AT TIME ZONE 'UTC')*1000)::bigint AS last_modified, n.mtu, n.multicast_limit, n.name, n.private, n.remote_trace_level, " + "n.remote_trace_target, n.revision, n.rules, n.tags, n.v4_assign_mode, n.v6_assign_mode, n.sso_enabled, (CASE WHEN n.sso_enabled THEN noc.client_id ELSE NULL END) as client_id, " + "(CASE WHEN n.sso_enabled THEN oc.authorization_endpoint ELSE NULL END) as authorization_endpoint, " + "(CASE WHEN n.sso_enabled THEN oc.provider ELSE NULL END) as provider, d.domain, d.servers, " + "ARRAY(SELECT CONCAT(host(ip_range_start),'|', host(ip_range_end)) FROM ztc_network_assignment_pool WHERE network_id = n.id) AS assignment_pool, " + "ARRAY(SELECT CONCAT(host(address),'/',bits::text,'|',COALESCE(host(via), 'NULL'))FROM ztc_network_route WHERE network_id = n.id) AS routes " + "FROM ztc_network n " + "LEFT OUTER JOIN ztc_org o " + " ON o.owner_id = n.owner_id " + "LEFT OUTER JOIN ztc_network_oidc_config noc " + " ON noc.network_id = n.id " + "LEFT OUTER JOIN ztc_oidc_config oc " + " ON noc.client_id = oc.client_id AND oc.org_id = o.org_id " + "LEFT OUTER JOIN ztc_network_dns d " + " ON d.network_id = n.id " + "WHERE deleted = false AND controller_id = '%s'", + _myAddressStr.c_str()); + auto c = _pool->borrow(); + auto c2 = _pool->borrow(); + pqxx::work w { *c->c }; + + fprintf(stderr, "Load networks from psql...\n"); + auto stream = pqxx::stream_from::query(w, qbuf); + + std::tuple< + std::string // network ID + , + std::optional // creationTime + , + std::optional // capabilities + , + std::optional // enableBroadcast + , + std::optional // lastModified + , + std::optional // mtu + , + std::optional // multicastLimit + , + std::optional // name + , + bool // private + , + std::optional // remoteTraceLevel + , + std::optional // remoteTraceTarget + , + std::optional // revision + , + std::optional // rules + , + std::optional // tags + , + std::optional // v4AssignMode + , + std::optional // v6AssignMode + , + std::optional // ssoEnabled + , + std::optional // clientId + , + std::optional // authorizationEndpoint + , + std::optional // ssoProvider + , + std::optional // domain + , + std::optional // servers + , + std::string // assignmentPoolString + , + std::string // routeString + > + row; + + uint64_t count = 0; + auto tmp = std::chrono::high_resolution_clock::now(); + uint64_t total = 0; + while (stream >> row) { + auto start = std::chrono::high_resolution_clock::now(); + + json empty; + json config; + + initNetwork(config); + + std::string nwid = std::get<0>(row); + std::optional creationTime = std::get<1>(row); + std::optional capabilities = std::get<2>(row); + std::optional enableBroadcast = std::get<3>(row); + std::optional lastModified = std::get<4>(row); + std::optional mtu = std::get<5>(row); + std::optional multicastLimit = std::get<6>(row); + std::optional name = std::get<7>(row); + bool isPrivate = std::get<8>(row); + std::optional remoteTraceLevel = std::get<9>(row); + std::optional remoteTraceTarget = std::get<10>(row); + std::optional revision = std::get<11>(row); + std::optional rules = std::get<12>(row); + std::optional tags = std::get<13>(row); + std::optional v4AssignMode = std::get<14>(row); + std::optional v6AssignMode = std::get<15>(row); + std::optional ssoEnabled = std::get<16>(row); + std::optional clientId = std::get<17>(row); + std::optional authorizationEndpoint = std::get<18>(row); + std::optional ssoProvider = std::get<19>(row); + std::optional dnsDomain = std::get<20>(row); + std::optional dnsServers = std::get<21>(row); + std::string assignmentPoolString = std::get<22>(row); + std::string routesString = std::get<23>(row); + + config["id"] = nwid; + config["nwid"] = nwid; + config["creationTime"] = creationTime.value_or(0); + config["capabilities"] = json::parse(capabilities.value_or("[]")); + config["enableBroadcast"] = enableBroadcast.value_or(false); + config["lastModified"] = lastModified.value_or(0); + config["mtu"] = mtu.value_or(2800); + config["multicastLimit"] = multicastLimit.value_or(64); + config["name"] = name.value_or(""); + config["private"] = isPrivate; + config["remoteTraceLevel"] = remoteTraceLevel.value_or(0); + config["remoteTraceTarget"] = remoteTraceTarget.value_or(""); + config["revision"] = revision.value_or(0); + config["rules"] = json::parse(rules.value_or("[]")); + config["tags"] = json::parse(tags.value_or("[]")); + config["v4AssignMode"] = json::parse(v4AssignMode.value_or("{}")); + config["v6AssignMode"] = json::parse(v6AssignMode.value_or("{}")); + config["ssoEnabled"] = ssoEnabled.value_or(false); + config["objtype"] = "network"; + config["ipAssignmentPools"] = json::array(); + config["routes"] = json::array(); + config["clientId"] = clientId.value_or(""); + config["authorizationEndpoint"] = authorizationEndpoint.value_or(""); + config["provider"] = ssoProvider.value_or(""); + + networkSet.insert(nwid); + + if (dnsDomain.has_value()) { + std::string serverList = dnsServers.value(); + json obj; + auto servers = json::array(); + if (serverList.rfind("{", 0) != std::string::npos) { + serverList = serverList.substr(1, serverList.size() - 2); + std::stringstream ss(serverList); + while (ss.good()) { + std::string server; + std::getline(ss, server, ','); + servers.push_back(server); + } + } + obj["domain"] = dnsDomain.value(); + obj["servers"] = servers; + config["dns"] = obj; + } + + config["ipAssignmentPools"] = json::array(); + if (assignmentPoolString != "{}") { + std::string tmp = assignmentPoolString.substr(1, assignmentPoolString.size() - 2); + std::vector assignmentPools = split(tmp, ','); + for (auto it = assignmentPools.begin(); it != assignmentPools.end(); ++it) { + std::vector r = split(*it, '|'); + json ip; + ip["ipRangeStart"] = r[0]; + ip["ipRangeEnd"] = r[1]; + config["ipAssignmentPools"].push_back(ip); + } + } + + config["routes"] = json::array(); + if (routesString != "{}") { + std::string tmp = routesString.substr(1, routesString.size() - 2); + std::vector routes = split(tmp, ','); + for (auto it = routes.begin(); it != routes.end(); ++it) { + std::vector r = split(*it, '|'); + json route; + route["target"] = r[0]; + route["via"] = ((route["via"] == "NULL") ? nullptr : r[1]); + config["routes"].push_back(route); + } + } + + Metrics::network_count++; + + _networkChanged(empty, config, false); + + auto end = std::chrono::high_resolution_clock::now(); + auto dur = std::chrono::duration_cast(end - start); + ; + total += dur.count(); + ++count; + if (count > 0 && count % 10000 == 0) { + fprintf(stderr, "Averaging %llu us per network\n", (total / count)); + } + } + + if (count > 0) { + fprintf(stderr, "Took %llu us per network to load\n", (total / count)); + } + stream.complete(); + + w.commit(); + _pool->unborrow(c2); + _pool->unborrow(c); + fprintf(stderr, "done.\n"); + + if (! networkSet.empty()) { + if (_redisMemberStatus) { + fprintf(stderr, "adding networks to redis...\n"); + if (_rc->clusterMode) { + auto tx = _cluster->transaction(_myAddressStr, true, false); + uint64_t count = 0; + for (std::string nwid : networkSet) { + tx.sadd(setKey, nwid); + if (++count % 30000 == 0) { + tx.exec(); + tx = _cluster->transaction(_myAddressStr, true, false); + } + } + tx.exec(); + } + else { + auto tx = _redis->transaction(true, false); + uint64_t count = 0; + for (std::string nwid : networkSet) { + tx.sadd(setKey, nwid); + if (++count % 30000 == 0) { + tx.exec(); + tx = _redis->transaction(true, false); + } + } + tx.exec(); + } + fprintf(stderr, "done.\n"); + } + } + + if (++this->_ready == 2) { + if (_waitNoticePrinted) { + fprintf(stderr, "[%s] NOTICE: %.10llx controller PostgreSQL data download complete." ZT_EOL_S, _timestr(), (unsigned long long)_myAddress.toInt()); + } + _readyLock.unlock(); + } + fprintf(stderr, "network init done.\n"); + } + catch (sw::redis::Error& e) { + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "ERROR: Error initializing networks in Redis: %s\n", e.what()); + std::this_thread::sleep_for(std::chrono::milliseconds(5000)); + exit(-1); + } + catch (std::exception& e) { + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "ERROR: Error initializing networks: %s\n", e.what()); + std::this_thread::sleep_for(std::chrono::milliseconds(5000)); + exit(-1); + } +} + +void CV1::initializeMembers() +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::initializeMembers"); + auto scope = tracer->WithActiveSpan(span); + + std::string memberId; + std::string networkId; + try { + std::unordered_map networkMembers; + fprintf(stderr, "Initializing Members...\n"); + + std::string setKeyBase = "network-nodes-all:{" + _myAddressStr + "}:"; + + if (_redisMemberStatus) { + fprintf(stderr, "Initialize Redis for members...\n"); + std::unique_lock l(_networks_l); + std::unordered_set deletes; + for (auto it : _networks) { + uint64_t nwid_i = it.first; + char nwidTmp[64] = { 0 }; + OSUtils::ztsnprintf(nwidTmp, sizeof(nwidTmp), "%.16llx", nwid_i); + std::string nwid(nwidTmp); + std::string key = setKeyBase + nwid; + deletes.insert(key); + } + + if (! deletes.empty()) { + try { + if (_rc->clusterMode) { + auto tx = _cluster->transaction(_myAddressStr, true, false); + for (std::string k : deletes) { + tx.del(k); + } + tx.exec(); + } + else { + auto tx = _redis->transaction(true, false); + for (std::string k : deletes) { + tx.del(k); + } + tx.exec(); + } + } + catch (sw::redis::Error& e) { + // ignore + } + } + } + + char qbuf[2048]; + sprintf( + qbuf, + "SELECT m.id, m.network_id, m.active_bridge, m.authorized, m.capabilities, " + "(EXTRACT(EPOCH FROM m.creation_time AT TIME ZONE 'UTC')*1000)::bigint, m.identity, " + "(EXTRACT(EPOCH FROM m.last_authorized_time AT TIME ZONE 'UTC')*1000)::bigint, " + "(EXTRACT(EPOCH FROM m.last_deauthorized_time AT TIME ZONE 'UTC')*1000)::bigint, " + "m.remote_trace_level, m.remote_trace_target, m.tags, m.v_major, m.v_minor, m.v_rev, m.v_proto, " + "m.no_auto_assign_ips, m.revision, m.sso_exempt, " + "(CASE WHEN n.sso_enabled = TRUE AND m.sso_exempt = FALSE THEN " + " ( " + " SELECT (EXTRACT(EPOCH FROM e.authentication_expiry_time)*1000)::bigint " + " FROM ztc_sso_expiry e " + " INNER JOIN ztc_network n1 " + " ON n1.id = e.network_id AND n1.deleted = TRUE " + " WHERE e.network_id = m.network_id AND e.member_id = m.id AND n.sso_enabled = TRUE AND e.authentication_expiry_time IS NOT NULL " + " ORDER BY e.authentication_expiry_time DESC LIMIT 1 " + " ) " + " ELSE NULL " + " END) AS authentication_expiry_time, " + "ARRAY(SELECT DISTINCT address FROM ztc_member_ip_assignment WHERE member_id = m.id AND network_id = m.network_id) AS assigned_addresses " + "FROM ztc_member m " + "INNER JOIN ztc_network n " + " ON n.id = m.network_id " + "WHERE n.controller_id = '%s' AND n.deleted = FALSE AND m.deleted = FALSE", + _myAddressStr.c_str()); + auto c = _pool->borrow(); + auto c2 = _pool->borrow(); + pqxx::work w { *c->c }; + + fprintf(stderr, "Load members from psql...\n"); + auto stream = pqxx::stream_from::query(w, qbuf); + + std::tuple< + std::string // memberId + , + std::string // memberId + , + std::optional // activeBridge + , + std::optional // authorized + , + std::optional // capabilities + , + std::optional // creationTime + , + std::optional // identity + , + std::optional // lastAuthorizedTime + , + std::optional // lastDeauthorizedTime + , + std::optional // remoteTraceLevel + , + std::optional // remoteTraceTarget + , + std::optional // tags + , + std::optional // vMajor + , + std::optional // vMinor + , + std::optional // vRev + , + std::optional // vProto + , + std::optional // noAutoAssignIps + , + std::optional // revision + , + std::optional // ssoExempt + , + std::optional // authenticationExpiryTime + , + std::string // assignedAddresses + > + row; + + uint64_t count = 0; + auto tmp = std::chrono::high_resolution_clock::now(); + uint64_t total = 0; + while (stream >> row) { + auto start = std::chrono::high_resolution_clock::now(); + json empty; + json config; + + initMember(config); + + memberId = std::get<0>(row); + networkId = std::get<1>(row); + std::optional activeBridge = std::get<2>(row); + std::optional authorized = std::get<3>(row); + std::optional capabilities = std::get<4>(row); + std::optional creationTime = std::get<5>(row); + std::optional identity = std::get<6>(row); + std::optional lastAuthorizedTime = std::get<7>(row); + std::optional lastDeauthorizedTime = std::get<8>(row); + std::optional remoteTraceLevel = std::get<9>(row); + std::optional remoteTraceTarget = std::get<10>(row); + std::optional tags = std::get<11>(row); + std::optional vMajor = std::get<12>(row); + std::optional vMinor = std::get<13>(row); + std::optional vRev = std::get<14>(row); + std::optional vProto = std::get<15>(row); + std::optional noAutoAssignIps = std::get<16>(row); + std::optional revision = std::get<17>(row); + std::optional ssoExempt = std::get<18>(row); + std::optional authenticationExpiryTime = std::get<19>(row); + std::string assignedAddresses = std::get<20>(row); + + networkMembers.insert(std::pair(setKeyBase + networkId, memberId)); + + config["id"] = memberId; + config["address"] = memberId; + config["nwid"] = networkId; + config["activeBridge"] = activeBridge.value_or(false); + config["authorized"] = authorized.value_or(false); + config["capabilities"] = json::parse(capabilities.value_or("[]")); + config["creationTime"] = creationTime.value_or(0); + config["identity"] = identity.value_or(""); + config["lastAuthorizedTime"] = lastAuthorizedTime.value_or(0); + config["lastDeauthorizedTime"] = lastDeauthorizedTime.value_or(0); + config["remoteTraceLevel"] = remoteTraceLevel.value_or(0); + config["remoteTraceTarget"] = remoteTraceTarget.value_or(""); + config["tags"] = json::parse(tags.value_or("[]")); + config["vMajor"] = vMajor.value_or(-1); + config["vMinor"] = vMinor.value_or(-1); + config["vRev"] = vRev.value_or(-1); + config["vProto"] = vProto.value_or(-1); + config["noAutoAssignIps"] = noAutoAssignIps.value_or(false); + config["revision"] = revision.value_or(0); + config["ssoExempt"] = ssoExempt.value_or(false); + config["authenticationExpiryTime"] = authenticationExpiryTime.value_or(0); + config["objtype"] = "member"; + config["ipAssignments"] = json::array(); + + if (assignedAddresses != "{}") { + std::string tmp = assignedAddresses.substr(1, assignedAddresses.size() - 2); + std::vector addrs = split(tmp, ','); + for (auto it = addrs.begin(); it != addrs.end(); ++it) { + config["ipAssignments"].push_back(*it); + } + } + + Metrics::member_count++; + + _memberChanged(empty, config, false); + + memberId = ""; + networkId = ""; + + auto end = std::chrono::high_resolution_clock::now(); + auto dur = std::chrono::duration_cast(end - start); + total += dur.count(); + ++count; + if (count > 0 && count % 10000 == 0) { + fprintf(stderr, "Averaging %llu us per member\n", (total / count)); + } + } + if (count > 0) { + fprintf(stderr, "Took %llu us per member to load\n", (total / count)); + } + + stream.complete(); + + w.commit(); + _pool->unborrow(c2); + _pool->unborrow(c); + fprintf(stderr, "done.\n"); + + if (! networkMembers.empty()) { + if (_redisMemberStatus) { + fprintf(stderr, "Load member data into redis...\n"); + if (_rc->clusterMode) { + auto tx = _cluster->transaction(_myAddressStr, true, false); + uint64_t count = 0; + for (auto it : networkMembers) { + tx.sadd(it.first, it.second); + if (++count % 30000 == 0) { + tx.exec(); + tx = _cluster->transaction(_myAddressStr, true, false); + } + } + tx.exec(); + } + else { + auto tx = _redis->transaction(true, false); + uint64_t count = 0; + for (auto it : networkMembers) { + tx.sadd(it.first, it.second); + if (++count % 30000 == 0) { + tx.exec(); + tx = _redis->transaction(true, false); + } + } + tx.exec(); + } + fprintf(stderr, "done.\n"); + } + } + + fprintf(stderr, "Done loading members...\n"); + + if (++this->_ready == 2) { + if (_waitNoticePrinted) { + fprintf(stderr, "[%s] NOTICE: %.10llx controller PostgreSQL data download complete." ZT_EOL_S, _timestr(), (unsigned long long)_myAddress.toInt()); + } + _readyLock.unlock(); + } + } + catch (sw::redis::Error& e) { + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "ERROR: Error initializing members (redis): %s\n", e.what()); + exit(-1); + } + catch (std::exception& e) { + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "ERROR: Error initializing member: %s-%s %s\n", networkId.c_str(), memberId.c_str(), e.what()); + exit(-1); + } +} + +void CV1::heartbeat() +{ + char publicId[1024]; + char hostnameTmp[1024]; + _myId.toString(false, publicId); + if (gethostname(hostnameTmp, sizeof(hostnameTmp)) != 0) { + hostnameTmp[0] = (char)0; + } + else { + for (int i = 0; i < (int)sizeof(hostnameTmp); ++i) { + if ((hostnameTmp[i] == '.') || (hostnameTmp[i] == 0)) { + hostnameTmp[i] = (char)0; + break; + } + } + } + const char* controllerId = _myAddressStr.c_str(); + const char* publicIdentity = publicId; + const char* hostname = hostnameTmp; + + while (_run == 1) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::heartbeat"); + auto scope = tracer->WithActiveSpan(span); + + // fprintf(stderr, "%s: heartbeat\n", controllerId); + auto c = _pool->borrow(); + int64_t ts = OSUtils::now(); + + if (c->c) { + std::string major = std::to_string(ZEROTIER_ONE_VERSION_MAJOR); + std::string minor = std::to_string(ZEROTIER_ONE_VERSION_MINOR); + std::string rev = std::to_string(ZEROTIER_ONE_VERSION_REVISION); + std::string build = std::to_string(ZEROTIER_ONE_VERSION_BUILD); + std::string now = std::to_string(ts); + std::string host_port = std::to_string(_listenPort); + std::string use_redis = (_rc != NULL) ? "true" : "false"; + std::string redis_mem_status = (_redisMemberStatus) ? "true" : "false"; + + try { + pqxx::work w { *c->c }; + + pqxx::result res = w.exec0( + "INSERT INTO ztc_controller (id, cluster_host, last_alive, public_identity, v_major, v_minor, v_rev, v_build, host_port, use_redis, redis_member_status) " + "VALUES (" + + w.quote(controllerId) + ", " + w.quote(hostname) + ", TO_TIMESTAMP(" + now + "::double precision/1000), " + w.quote(publicIdentity) + ", " + major + ", " + minor + ", " + rev + ", " + build + ", " + host_port + ", " + + use_redis + ", " + redis_mem_status + + ") " + "ON CONFLICT (id) DO UPDATE SET cluster_host = EXCLUDED.cluster_host, last_alive = EXCLUDED.last_alive, " + "public_identity = EXCLUDED.public_identity, v_major = EXCLUDED.v_major, v_minor = EXCLUDED.v_minor, " + "v_rev = EXCLUDED.v_rev, v_build = EXCLUDED.v_rev, host_port = EXCLUDED.host_port, " + "use_redis = EXCLUDED.use_redis, redis_member_status = EXCLUDED.redis_member_status"); + w.commit(); + } + catch (std::exception& e) { + fprintf(stderr, "%s: Heartbeat update failed: %s\n", controllerId, e.what()); + span->End(); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + continue; + } + } + _pool->unborrow(c); + + try { + if (_redisMemberStatus) { + if (_rc->clusterMode) { + _cluster->zadd("controllers", "controllerId", ts); + } + else { + _redis->zadd("controllers", "controllerId", ts); + } + } + } + catch (sw::redis::Error& e) { + fprintf(stderr, "ERROR: Redis error in heartbeat thread: %s\n", e.what()); + } + + span->End(); + std::this_thread::sleep_for(std::chrono::milliseconds(1000)); + } + fprintf(stderr, "Exited heartbeat thread\n"); +} + +void CV1::membersDbWatcher() +{ + if (_rc) { + _membersWatcher_Redis(); + } + else { + _membersWatcher_Postgres(); + } + + if (_run == 1) { + fprintf(stderr, "ERROR: %s membersDbWatcher should still be running! Exiting Controller.\n", _myAddressStr.c_str()); + exit(9); + } + fprintf(stderr, "Exited membersDbWatcher\n"); +} + +void CV1::_membersWatcher_Postgres() +{ + auto c = _pool->borrow(); + + std::string stream = "member_" + _myAddressStr; + + fprintf(stderr, "Listening to member stream: %s\n", stream.c_str()); + MemberNotificationReceiver m(this, *c->c, stream); + + while (_run == 1) { + c->c->await_notification(5, 0); + } + + _pool->unborrow(c); +} + +void CV1::_membersWatcher_Redis() +{ + char buf[11] = { 0 }; + std::string key = "member-stream:{" + std::string(_myAddress.toString(buf)) + "}"; + std::string lastID = "0"; + fprintf(stderr, "Listening to member stream: %s\n", key.c_str()); + while (_run == 1) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::_membersWatcher_Redis"); + auto scope = tracer->WithActiveSpan(span); + + try { + json tmp; + std::unordered_map result; + if (_rc->clusterMode) { + _cluster->xread(key, lastID, std::chrono::seconds(1), 0, std::inserter(result, result.end())); + } + else { + _redis->xread(key, lastID, std::chrono::seconds(1), 0, std::inserter(result, result.end())); + } + if (! result.empty()) { + for (auto element : result) { +#ifdef REDIS_TRACE + fprintf(stdout, "Received notification from: %s\n", element.first.c_str()); +#endif + for (auto rec : element.second) { + std::string id = rec.first; + auto attrs = rec.second; +#ifdef REDIS_TRACE + fprintf(stdout, "Record ID: %s\n", id.c_str()); + fprintf(stdout, "attrs len: %lu\n", attrs.size()); +#endif + for (auto a : attrs) { +#ifdef REDIS_TRACE + fprintf(stdout, "key: %s\nvalue: %s\n", a.first.c_str(), a.second.c_str()); +#endif + try { + tmp = json::parse(a.second); + json& ov = tmp["old_val"]; + json& nv = tmp["new_val"]; + json oldConfig, newConfig; + if (ov.is_object()) + oldConfig = ov; + if (nv.is_object()) + newConfig = nv; + if (oldConfig.is_object() || newConfig.is_object()) { + _memberChanged(oldConfig, newConfig, (this->_ready >= 2)); + } + } + catch (...) { + fprintf(stderr, "json parse error in _membersWatcher_Redis: %s\n", a.second.c_str()); + } + } + if (_rc->clusterMode) { + _cluster->xdel(key, id); + } + else { + _redis->xdel(key, id); + } + lastID = id; + Metrics::redis_mem_notification++; + } + } + } + } + catch (sw::redis::Error& e) { + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "Error in Redis members watcher: %s\n", e.what()); + } + } + fprintf(stderr, "membersWatcher ended\n"); +} + +void CV1::networksDbWatcher() +{ + if (_rc) { + _networksWatcher_Redis(); + } + else { + _networksWatcher_Postgres(); + } + + if (_run == 1) { + fprintf(stderr, "ERROR: %s networksDbWatcher should still be running! Exiting Controller.\n", _myAddressStr.c_str()); + exit(8); + } + fprintf(stderr, "Exited networksDbWatcher\n"); +} + +void CV1::_networksWatcher_Postgres() +{ + std::string stream = "network_" + _myAddressStr; + + fprintf(stderr, "Listening to member stream: %s\n", stream.c_str()); + + auto c = _pool->borrow(); + + NetworkNotificationReceiver n(this, *c->c, stream); + + while (_run == 1) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::_networksWatcher_Postgres"); + auto scope = tracer->WithActiveSpan(span); + + c->c->await_notification(5, 0); + } +} + +void CV1::_networksWatcher_Redis() +{ + char buf[11] = { 0 }; + std::string key = "network-stream:{" + std::string(_myAddress.toString(buf)) + "}"; + std::string lastID = "0"; + while (_run == 1) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::_networksWatcher_Redis"); + auto scope = tracer->WithActiveSpan(span); + + try { + json tmp; + std::unordered_map result; + if (_rc->clusterMode) { + _cluster->xread(key, lastID, std::chrono::seconds(1), 0, std::inserter(result, result.end())); + } + else { + _redis->xread(key, lastID, std::chrono::seconds(1), 0, std::inserter(result, result.end())); + } + + if (! result.empty()) { + for (auto element : result) { +#ifdef REDIS_TRACE + fprintf(stdout, "Received notification from: %s\n", element.first.c_str()); +#endif + for (auto rec : element.second) { + std::string id = rec.first; + auto attrs = rec.second; +#ifdef REDIS_TRACE + fprintf(stdout, "Record ID: %s\n", id.c_str()); + fprintf(stdout, "attrs len: %lu\n", attrs.size()); +#endif + for (auto a : attrs) { +#ifdef REDIS_TRACE + fprintf(stdout, "key: %s\nvalue: %s\n", a.first.c_str(), a.second.c_str()); +#endif + try { + tmp = json::parse(a.second); + json& ov = tmp["old_val"]; + json& nv = tmp["new_val"]; + json oldConfig, newConfig; + if (ov.is_object()) + oldConfig = ov; + if (nv.is_object()) + newConfig = nv; + if (oldConfig.is_object() || newConfig.is_object()) { + _networkChanged(oldConfig, newConfig, (this->_ready >= 2)); + } + } + catch (std::exception& e) { + fprintf(stderr, "json parse error in networkWatcher_Redis: what: %s json: %s\n", e.what(), a.second.c_str()); + } + } + if (_rc->clusterMode) { + _cluster->xdel(key, id); + } + else { + _redis->xdel(key, id); + } + lastID = id; + } + Metrics::redis_net_notification++; + } + } + } + catch (sw::redis::Error& e) { + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "Error in Redis networks watcher: %s\n", e.what()); + } + } + fprintf(stderr, "networksWatcher ended\n"); +} + +void CV1::commitThread() +{ + fprintf(stderr, "%s: commitThread start\n", _myAddressStr.c_str()); + std::pair qitem; + while (_commitQueue.get(qitem) & (_run == 1)) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::commitThread"); + auto scope = tracer->WithActiveSpan(span); + + // fprintf(stderr, "commitThread tick\n"); + if (! qitem.first.is_object()) { + fprintf(stderr, "not an object\n"); + continue; + } + + std::shared_ptr c; + try { + c = _pool->borrow(); + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: %s\n", e.what()); + continue; + } + + if (! c) { + fprintf(stderr, "Error getting database connection\n"); + continue; + } + + Metrics::pgsql_commit_ticks++; + try { + nlohmann::json& config = (qitem.first); + const std::string objtype = config["objtype"]; + if (objtype == "member") { + auto mspan = tracer->StartSpan("cv1::commitThread::member"); + auto mscope = tracer->WithActiveSpan(mspan); + + // fprintf(stderr, "%s: commitThread: member\n", _myAddressStr.c_str()); + std::string memberId; + std::string networkId; + try { + pqxx::work w(*c->c); + + memberId = config["id"]; + networkId = config["nwid"]; + + std::string target = "NULL"; + if (! config["remoteTraceTarget"].is_null()) { + target = config["remoteTraceTarget"]; + } + + pqxx::row nwrow = w.exec_params1("SELECT COUNT(id) FROM ztc_network WHERE id = $1", networkId); + int nwcount = nwrow[0].as(); + + if (nwcount != 1) { + fprintf(stderr, "network %s does not exist. skipping member upsert\n", networkId.c_str()); + w.abort(); + _pool->unborrow(c); + continue; + } + + pqxx::row mrow = w.exec_params1("SELECT COUNT(id) FROM ztc_member WHERE id = $1 AND network_id = $2", memberId, networkId); + int membercount = mrow[0].as(); + + bool isNewMember = false; + if (membercount == 0) { + // new member + isNewMember = true; + pqxx::result res = w.exec_params0( + "INSERT INTO ztc_member (id, network_id, active_bridge, authorized, capabilities, " + "identity, last_authorized_time, last_deauthorized_time, no_auto_assign_ips, " + "remote_trace_level, remote_trace_target, revision, tags, v_major, v_minor, v_rev, v_proto) " + "VALUES ($1, $2, $3, $4, $5, $6, " + "TO_TIMESTAMP($7::double precision/1000), TO_TIMESTAMP($8::double precision/1000), " + "$9, $10, $11, $12, $13, $14, $15, $16, $17)", + memberId, + networkId, + (bool)config["activeBridge"], + (bool)config["authorized"], + OSUtils::jsonDump(config["capabilities"], -1), + OSUtils::jsonString(config["identity"], ""), + (uint64_t)config["lastAuthorizedTime"], + (uint64_t)config["lastDeauthorizedTime"], + (bool)config["noAutoAssignIps"], + (int)config["remoteTraceLevel"], + target, + (uint64_t)config["revision"], + OSUtils::jsonDump(config["tags"], -1), + (int)config["vMajor"], + (int)config["vMinor"], + (int)config["vRev"], + (int)config["vProto"]); + } + else { + // existing member + pqxx::result res = w.exec_params0( + "UPDATE ztc_member " + "SET active_bridge = $3, authorized = $4, capabilities = $5, identity = $6, " + "last_authorized_time = TO_TIMESTAMP($7::double precision/1000), " + "last_deauthorized_time = TO_TIMESTAMP($8::double precision/1000), " + "no_auto_assign_ips = $9, remote_trace_level = $10, remote_trace_target= $11, " + "revision = $12, tags = $13, v_major = $14, v_minor = $15, v_rev = $16, v_proto = $17 " + "WHERE id = $1 AND network_id = $2", + memberId, + networkId, + (bool)config["activeBridge"], + (bool)config["authorized"], + OSUtils::jsonDump(config["capabilities"], -1), + OSUtils::jsonString(config["identity"], ""), + (uint64_t)config["lastAuthorizedTime"], + (uint64_t)config["lastDeauthorizedTime"], + (bool)config["noAutoAssignIps"], + (int)config["remoteTraceLevel"], + target, + (uint64_t)config["revision"], + OSUtils::jsonDump(config["tags"], -1), + (int)config["vMajor"], + (int)config["vMinor"], + (int)config["vRev"], + (int)config["vProto"]); + } + + if (! isNewMember) { + pqxx::result res = w.exec_params0("DELETE FROM ztc_member_ip_assignment WHERE member_id = $1 AND network_id = $2", memberId, networkId); + } + + std::vector assignments; + bool ipAssignError = false; + for (auto i = config["ipAssignments"].begin(); i != config["ipAssignments"].end(); ++i) { + std::string addr = *i; + + if (std::find(assignments.begin(), assignments.end(), addr) != assignments.end()) { + continue; + } + + pqxx::result res = w.exec_params0("INSERT INTO ztc_member_ip_assignment (member_id, network_id, address) VALUES ($1, $2, $3) ON CONFLICT (network_id, member_id, address) DO NOTHING", memberId, networkId, addr); + + assignments.push_back(addr); + } + if (ipAssignError) { + fprintf(stderr, "%s: ipAssignError\n", _myAddressStr.c_str()); + w.abort(); + _pool->unborrow(c); + c.reset(); + continue; + } + + w.commit(); + + if (_smee != NULL && isNewMember) { + pqxx::row row = w.exec_params1( + "SELECT " + " count(h.hook_id) " + "FROM " + " ztc_hook h " + " INNER JOIN ztc_org o ON o.org_id = h.org_id " + " INNER JOIN ztc_network n ON n.owner_id = o.owner_id " + " WHERE " + "n.id = $1 ", + networkId); + int64_t hookCount = row[0].as(); + if (hookCount > 0) { + notifyNewMember(networkId, memberId); + } + } + + const uint64_t nwidInt = OSUtils::jsonIntHex(config["nwid"], 0ULL); + const uint64_t memberidInt = OSUtils::jsonIntHex(config["id"], 0ULL); + if (nwidInt && memberidInt) { + nlohmann::json nwOrig; + nlohmann::json memOrig; + + nlohmann::json memNew(config); + + get(nwidInt, nwOrig, memberidInt, memOrig); + + _memberChanged(memOrig, memNew, qitem.second); + } + else { + fprintf(stderr, "%s: Can't notify of change. Error parsing nwid or memberid: %llu-%llu\n", _myAddressStr.c_str(), (unsigned long long)nwidInt, (unsigned long long)memberidInt); + } + } + catch (std::exception& e) { + fprintf(stderr, "%s ERROR: Error updating member %s-%s: %s\n", _myAddressStr.c_str(), networkId.c_str(), memberId.c_str(), e.what()); + mspan->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + } + } + else if (objtype == "network") { + auto nspan = tracer->StartSpan("cv1::commitThread::network"); + auto nscope = tracer->WithActiveSpan(nspan); + + try { + // fprintf(stderr, "%s: commitThread: network\n", _myAddressStr.c_str()); + pqxx::work w(*c->c); + + std::string id = config["id"]; + std::string remoteTraceTarget = ""; + if (! config["remoteTraceTarget"].is_null()) { + remoteTraceTarget = config["remoteTraceTarget"]; + } + std::string rulesSource = ""; + if (config["rulesSource"].is_string()) { + rulesSource = config["rulesSource"]; + } + + // This ugly query exists because when we want to mirror networks to/from + // another data store (e.g. FileDB or LFDB) it is possible to get a network + // that doesn't exist in Central's database. This does an upsert and sets + // the owner_id to the "first" global admin in the user DB if the record + // did not previously exist. If the record already exists owner_id is left + // unchanged, so owner_id should be left out of the update clause. + pqxx::result res = w.exec_params0( + "INSERT INTO ztc_network (id, creation_time, owner_id, controller_id, capabilities, enable_broadcast, " + "last_modified, mtu, multicast_limit, name, private, " + "remote_trace_level, remote_trace_target, rules, rules_source, " + "tags, v4_assign_mode, v6_assign_mode, sso_enabled) VALUES (" + "$1, TO_TIMESTAMP($5::double precision/1000), " + "(SELECT user_id AS owner_id FROM ztc_global_permissions WHERE authorize = true AND del = true AND modify = true AND read = true LIMIT 1)," + "$2, $3, $4, TO_TIMESTAMP($5::double precision/1000), " + "$6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17) " + "ON CONFLICT (id) DO UPDATE set controller_id = EXCLUDED.controller_id, " + "capabilities = EXCLUDED.capabilities, enable_broadcast = EXCLUDED.enable_broadcast, " + "last_modified = EXCLUDED.last_modified, mtu = EXCLUDED.mtu, " + "multicast_limit = EXCLUDED.multicast_limit, name = EXCLUDED.name, " + "private = EXCLUDED.private, remote_trace_level = EXCLUDED.remote_trace_level, " + "remote_trace_target = EXCLUDED.remote_trace_target, rules = EXCLUDED.rules, " + "rules_source = EXCLUDED.rules_source, tags = EXCLUDED.tags, " + "v4_assign_mode = EXCLUDED.v4_assign_mode, v6_assign_mode = EXCLUDED.v6_assign_mode, " + "sso_enabled = EXCLUDED.sso_enabled", + id, + _myAddressStr, + OSUtils::jsonDump(config["capabilities"], -1), + (bool)config["enableBroadcast"], + OSUtils::now(), + (int)config["mtu"], + (int)config["multicastLimit"], + OSUtils::jsonString(config["name"], ""), + (bool)config["private"], + (int)config["remoteTraceLevel"], + remoteTraceTarget, + OSUtils::jsonDump(config["rules"], -1), + rulesSource, + OSUtils::jsonDump(config["tags"], -1), + OSUtils::jsonDump(config["v4AssignMode"], -1), + OSUtils::jsonDump(config["v6AssignMode"], -1), + OSUtils::jsonBool(config["ssoEnabled"], false)); + + res = w.exec_params0("DELETE FROM ztc_network_assignment_pool WHERE network_id = $1", 0); + + auto pool = config["ipAssignmentPools"]; + bool err = false; + for (auto i = pool.begin(); i != pool.end(); ++i) { + std::string start = (*i)["ipRangeStart"]; + std::string end = (*i)["ipRangeEnd"]; + + res = w.exec_params0( + "INSERT INTO ztc_network_assignment_pool (network_id, ip_range_start, ip_range_end) " + "VALUES ($1, $2, $3)", + id, + start, + end); + } + + res = w.exec_params0("DELETE FROM ztc_network_route WHERE network_id = $1", id); + + auto routes = config["routes"]; + err = false; + for (auto i = routes.begin(); i != routes.end(); ++i) { + std::string t = (*i)["target"]; + std::vector target; + std::istringstream f(t); + std::string s; + while (std::getline(f, s, '/')) { + target.push_back(s); + } + if (target.empty() || target.size() != 2) { + continue; + } + std::string targetAddr = target[0]; + std::string targetBits = target[1]; + std::string via = "NULL"; + if (! (*i)["via"].is_null()) { + via = (*i)["via"]; + } + + res = w.exec_params0("INSERT INTO ztc_network_route (network_id, address, bits, via) VALUES ($1, $2, $3, $4)", id, targetAddr, targetBits, (via == "NULL" ? NULL : via.c_str())); + } + if (err) { + fprintf(stderr, "%s: route add error\n", _myAddressStr.c_str()); + w.abort(); + _pool->unborrow(c); + continue; + } + + auto dns = config["dns"]; + std::string domain = dns["domain"]; + std::stringstream servers; + servers << "{"; + for (auto j = dns["servers"].begin(); j < dns["servers"].end(); ++j) { + servers << *j; + if ((j + 1) != dns["servers"].end()) { + servers << ","; + } + } + servers << "}"; + + std::string s = servers.str(); + + res = w.exec_params0("INSERT INTO ztc_network_dns (network_id, domain, servers) VALUES ($1, $2, $3) ON CONFLICT (network_id) DO UPDATE SET domain = EXCLUDED.domain, servers = EXCLUDED.servers", id, domain, s); + + w.commit(); + + const uint64_t nwidInt = OSUtils::jsonIntHex(config["nwid"], 0ULL); + if (nwidInt) { + nlohmann::json nwOrig; + nlohmann::json nwNew(config); + + get(nwidInt, nwOrig); + + _networkChanged(nwOrig, nwNew, qitem.second); + } + else { + fprintf(stderr, "%s: Can't notify network changed: %llu\n", _myAddressStr.c_str(), (unsigned long long)nwidInt); + } + } + catch (std::exception& e) { + nspan->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "%s ERROR: Error updating network: %s\n", _myAddressStr.c_str(), e.what()); + } + if (_redisMemberStatus) { + try { + std::string id = config["id"]; + std::string controllerId = _myAddressStr.c_str(); + std::string key = "networks:{" + controllerId + "}"; + if (_rc->clusterMode) { + _cluster->sadd(key, id); + } + else { + _redis->sadd(key, id); + } + } + catch (sw::redis::Error& e) { + nspan->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "ERROR: Error adding network to Redis: %s\n", e.what()); + } + } + } + else if (objtype == "_delete_network") { + auto dspan = tracer->StartSpan("cv1::commitThread::_delete_network"); + auto dscope = tracer->WithActiveSpan(dspan); + + // fprintf(stderr, "%s: commitThread: delete network\n", _myAddressStr.c_str()); + try { + pqxx::work w(*c->c); + + std::string networkId = config["nwid"]; + + pqxx::result res = w.exec_params0("UPDATE ztc_network SET deleted = true WHERE id = $1", networkId); + + w.commit(); + } + catch (std::exception& e) { + dspan->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "%s ERROR: Error deleting network: %s\n", _myAddressStr.c_str(), e.what()); + } + if (_redisMemberStatus) { + try { + std::string id = config["id"]; + std::string controllerId = _myAddressStr.c_str(); + std::string key = "networks:{" + controllerId + "}"; + if (_rc->clusterMode) { + _cluster->srem(key, id); + _cluster->del("network-nodes-online:{" + controllerId + "}:" + id); + } + else { + _redis->srem(key, id); + _redis->del("network-nodes-online:{" + controllerId + "}:" + id); + } + } + catch (sw::redis::Error& e) { + dspan->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "ERROR: Error adding network to Redis: %s\n", e.what()); + } + } + } + else if (objtype == "_delete_member") { + auto mspan = tracer->StartSpan("cv1::commitThread::_delete_member"); + auto mscope = tracer->WithActiveSpan(mspan); + + // fprintf(stderr, "%s commitThread: delete member\n", _myAddressStr.c_str()); + try { + pqxx::work w(*c->c); + + std::string memberId = config["id"]; + std::string networkId = config["nwid"]; + + pqxx::result res = w.exec_params0("UPDATE ztc_member SET hidden = true, deleted = true WHERE id = $1 AND network_id = $2", memberId, networkId); + + w.commit(); + } + catch (std::exception& e) { + mspan->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "%s ERROR: Error deleting member: %s\n", _myAddressStr.c_str(), e.what()); + } + if (_redisMemberStatus) { + try { + std::string memberId = config["id"]; + std::string networkId = config["nwid"]; + std::string controllerId = _myAddressStr.c_str(); + std::string key = "network-nodes-all:{" + controllerId + "}:" + networkId; + if (_rc->clusterMode) { + _cluster->srem(key, memberId); + _cluster->del("member:{" + controllerId + "}:" + networkId + ":" + memberId); + } + else { + _redis->srem(key, memberId); + _redis->del("member:{" + controllerId + "}:" + networkId + ":" + memberId); + } + } + catch (sw::redis::Error& e) { + mspan->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "ERROR: Error deleting member from Redis: %s\n", e.what()); + } + } + } + else { + fprintf(stderr, "%s ERROR: unknown objtype\n", _myAddressStr.c_str()); + } + } + catch (std::exception& e) { + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + fprintf(stderr, "%s ERROR: Error getting objtype: %s\n", _myAddressStr.c_str(), e.what()); + } + _pool->unborrow(c); + c.reset(); + } + + fprintf(stderr, "%s commitThread finished\n", _myAddressStr.c_str()); +} + +void CV1::notifyNewMember(const std::string& networkID, const std::string& memberID) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::notifyNewMember"); + auto scope = tracer->WithActiveSpan(span); + + smeeclient::smee_client_notify_network_joined(_smee, networkID.c_str(), memberID.c_str()); +} + +void CV1::onlineNotificationThread() +{ + waitForReady(); + if (_redisMemberStatus) { + onlineNotification_Redis(); + } + else { + onlineNotification_Postgres(); + } +} + +/** + * ONLY UNCOMMENT FOR TEMPORARY DB MAINTENANCE + * + * This define temporarily turns off writing to the member status table + * so it can be reindexed when the indexes get too large. + */ + +// #define DISABLE_MEMBER_STATUS 1 + +void CV1::onlineNotification_Postgres() +{ + _connected = 1; + + nlohmann::json jtmp1, jtmp2; + while (_run == 1) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::onlineNotification_Postgres"); + auto scope = tracer->WithActiveSpan(span); + + auto c = _pool->borrow(); + auto c2 = _pool->borrow(); + try { + fprintf(stderr, "%s onlineNotification_Postgres\n", _myAddressStr.c_str()); + std::unordered_map, NodeOnlineRecord, _PairHasher> lastOnline; + { + std::lock_guard l(_lastOnline_l); + lastOnline.swap(_lastOnline); + } + +#ifndef DISABLE_MEMBER_STATUS + pqxx::work w(*c->c); + pqxx::work w2(*c2->c); + + fprintf(stderr, "online notification tick\n"); + + bool firstRun = true; + bool memberAdded = false; + int updateCount = 0; + + pqxx::pipeline pipe(w); + + for (auto i = lastOnline.begin(); i != lastOnline.end(); ++i) { + updateCount += 1; + uint64_t nwid_i = i->first.first; + char nwidTmp[64]; + char memTmp[64]; + char ipTmp[64]; + OSUtils::ztsnprintf(nwidTmp, sizeof(nwidTmp), "%.16llx", nwid_i); + OSUtils::ztsnprintf(memTmp, sizeof(memTmp), "%.10llx", i->first.second); + + if (! get(nwid_i, jtmp1, i->first.second, jtmp2)) { + continue; // skip non existent networks/members + } + + std::string networkId(nwidTmp); + std::string memberId(memTmp); + + try { + pqxx::row r = w2.exec_params1("SELECT id, network_id FROM ztc_member WHERE network_id = $1 AND id = $2", networkId, memberId); + } + catch (pqxx::unexpected_rows& e) { + continue; + } + + int64_t ts = i->second.lastSeen; + std::string ipAddr = i->second.physicalAddress.toIpString(ipTmp); + std::string timestamp = std::to_string(ts); + std::string osArch = i->second.osArch; + + std::stringstream memberUpdate; + memberUpdate << "INSERT INTO ztc_member_status (network_id, member_id, address, last_updated) VALUES " + << "('" << networkId << "', '" << memberId << "', "; + if (ipAddr.empty()) { + memberUpdate << "NULL, "; + } + else { + memberUpdate << "'" << ipAddr << "', "; + } + memberUpdate << "TO_TIMESTAMP(" << timestamp << "::double precision/1000)) " + << " ON CONFLICT (network_id, member_id) DO UPDATE SET address = EXCLUDED.address, last_updated = EXCLUDED.last_updated"; + + pipe.insert(memberUpdate.str()); + Metrics::pgsql_node_checkin++; + } + while (! pipe.empty()) { + pipe.retrieve(); + } + + pipe.complete(); + w.commit(); + fprintf(stderr, "%s: Updated online status of %d members\n", _myAddressStr.c_str(), updateCount); +#endif + } + catch (std::exception& e) { + fprintf(stderr, "%s: error in onlinenotification thread: %s\n", _myAddressStr.c_str(), e.what()); + } + _pool->unborrow(c2); + _pool->unborrow(c); + + ConnectionPoolStats stats = _pool->get_stats(); + fprintf(stderr, "%s pool stats: in use size: %llu, available size: %llu, total: %llu\n", _myAddressStr.c_str(), stats.borrowed_size, stats.pool_size, (stats.borrowed_size + stats.pool_size)); + + span->End(); + + std::this_thread::sleep_for(std::chrono::seconds(10)); + } + fprintf(stderr, "%s: Fell out of run loop in onlineNotificationThread\n", _myAddressStr.c_str()); + if (_run == 1) { + fprintf(stderr, "ERROR: %s onlineNotificationThread should still be running! Exiting Controller.\n", _myAddressStr.c_str()); + exit(6); + } +} + +void CV1::onlineNotification_Redis() +{ + _connected = 1; + + char buf[11] = { 0 }; + std::string controllerId = std::string(_myAddress.toString(buf)); + + while (_run == 1) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::onlineNotification_Redis"); + auto scope = tracer->WithActiveSpan(span); + + fprintf(stderr, "onlineNotification tick\n"); + auto start = std::chrono::high_resolution_clock::now(); + uint64_t count = 0; + + std::unordered_map, NodeOnlineRecord, _PairHasher> lastOnline; + { + std::lock_guard l(_lastOnline_l); + lastOnline.swap(_lastOnline); + } + try { + if (! lastOnline.empty()) { + if (_rc->clusterMode) { + auto tx = _cluster->transaction(controllerId, true, false); + count = _doRedisUpdate(tx, controllerId, lastOnline); + } + else { + auto tx = _redis->transaction(true, false); + count = _doRedisUpdate(tx, controllerId, lastOnline); + } + } + } + catch (sw::redis::Error& e) { + fprintf(stderr, "Error in online notification thread (redis): %s\n", e.what()); + } + + auto end = std::chrono::high_resolution_clock::now(); + auto dur = std::chrono::duration_cast(end - start); + auto total = dur.count(); + + fprintf(stderr, "onlineNotification ran in %llu ms\n", total); + span->End(); + + std::this_thread::sleep_for(std::chrono::seconds(5)); + } +} + +uint64_t CV1::_doRedisUpdate(sw::redis::Transaction& tx, std::string& controllerId, std::unordered_map, NodeOnlineRecord, _PairHasher>& lastOnline) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv1"); + auto span = tracer->StartSpan("cv1::_doRedisUpdate"); + auto scope = tracer->WithActiveSpan(span); + + nlohmann::json jtmp1, jtmp2; + uint64_t count = 0; + for (auto i = lastOnline.begin(); i != lastOnline.end(); ++i) { + uint64_t nwid_i = i->first.first; + uint64_t memberid_i = i->first.second; + char nwidTmp[64]; + char memTmp[64]; + char ipTmp[64]; + OSUtils::ztsnprintf(nwidTmp, sizeof(nwidTmp), "%.16llx", nwid_i); + OSUtils::ztsnprintf(memTmp, sizeof(memTmp), "%.10llx", memberid_i); + + if (! get(nwid_i, jtmp1, memberid_i, jtmp2)) { + continue; // skip non existent members/networks + } + + std::string networkId(nwidTmp); + std::string memberId(memTmp); + + int64_t ts = i->second.lastSeen; + std::string ipAddr = i->second.physicalAddress.toIpString(ipTmp); + std::string timestamp = std::to_string(ts); + std::string osArch = i->second.osArch; + + std::unordered_map record = { { "id", memberId }, { "address", ipAddr }, { "last_updated", std::to_string(ts) } }; + tx.zadd("nodes-online:{" + controllerId + "}", memberId, ts) + .zadd("nodes-online2:{" + controllerId + "}", networkId + "-" + memberId, ts) + .zadd("network-nodes-online:{" + controllerId + "}:" + networkId, memberId, ts) + .zadd("active-networks:{" + controllerId + "}", networkId, ts) + .sadd("network-nodes-all:{" + controllerId + "}:" + networkId, memberId) + .hmset("member:{" + controllerId + "}:" + networkId + ":" + memberId, record.begin(), record.end()); + ++count; + Metrics::redis_node_checkin++; + } + + // expire records from all-nodes and network-nodes member list + uint64_t expireOld = OSUtils::now() - 300000; + + tx.zremrangebyscore("nodes-online:{" + controllerId + "}", sw::redis::RightBoundedInterval(expireOld, sw::redis::BoundType::LEFT_OPEN)); + tx.zremrangebyscore("nodes-online2:{" + controllerId + "}", sw::redis::RightBoundedInterval(expireOld, sw::redis::BoundType::LEFT_OPEN)); + tx.zremrangebyscore("active-networks:{" + controllerId + "}", sw::redis::RightBoundedInterval(expireOld, sw::redis::BoundType::LEFT_OPEN)); + { + std::shared_lock l(_networks_l); + for (const auto& it : _networks) { + uint64_t nwid_i = it.first; + char nwidTmp[64]; + OSUtils::ztsnprintf(nwidTmp, sizeof(nwidTmp), "%.16llx", nwid_i); + tx.zremrangebyscore("network-nodes-online:{" + controllerId + "}:" + nwidTmp, sw::redis::RightBoundedInterval(expireOld, sw::redis::BoundType::LEFT_OPEN)); + } + } + tx.exec(); + fprintf(stderr, "%s: Updated online status of %d members\n", _myAddressStr.c_str(), count); + + return count; +} + +#endif // ZT_CONTROLLER_USE_LIBPQ diff --git a/controller/CV1.hpp b/controller/CV1.hpp new file mode 100644 index 000000000..b1a362798 --- /dev/null +++ b/controller/CV1.hpp @@ -0,0 +1,144 @@ +/* + * Copyright (c)2019 ZeroTier, Inc. + * + * Use of this software is governed by the Business Source License included + * in the LICENSE.TXT file in the project's root directory. + * + * Change Date: 2026-01-01 + * + * On the date above, in accordance with the Business Source License, use + * of this software will be governed by version 2.0 of the Apache License. + */ +/****/ + +#include "DB.hpp" + +#ifdef ZT_CONTROLLER_USE_LIBPQ + +#ifndef ZT_CONTROLLER_CV1_HPP +#define ZT_CONTROLLER_CV1_HPP + +#define ZT_CENTRAL_CONTROLLER_COMMIT_THREADS 4 + +#include "../node/Metrics.hpp" +#include "ConnectionPool.hpp" +#include "PostgreSQL.hpp" + +#include +#include +#include + +namespace smeeclient { +struct SmeeClient; +} + +namespace ZeroTier { + +struct RedisConfig; + +/** + * A controller database driver that talks to PostgreSQL + * + * This is for use with ZeroTier Central. Others are free to build and use it + * but be aware that we might change it at any time. + */ +class CV1 : public DB { + friend class MemberNotificationReceiver; + friend class NetworkNotificationReceiver; + + public: + CV1(const Identity& myId, const char* path, int listenPort, RedisConfig* rc); + virtual ~CV1(); + + virtual bool waitForReady(); + virtual bool isReady(); + virtual bool save(nlohmann::json& record, bool notifyListeners); + virtual void eraseNetwork(const uint64_t networkId); + virtual void eraseMember(const uint64_t networkId, const uint64_t memberId); + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress); + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress, const char* osArch); + virtual AuthInfo getSSOAuthInfo(const nlohmann::json& member, const std::string& redirectURL); + + virtual bool ready() + { + return _ready == 2; + } + + protected: + struct _PairHasher { + inline std::size_t operator()(const std::pair& p) const + { + return (std::size_t)(p.first ^ p.second); + } + }; + virtual void _memberChanged(nlohmann::json& old, nlohmann::json& memberConfig, bool notifyListeners) + { + DB::_memberChanged(old, memberConfig, notifyListeners); + } + + virtual void _networkChanged(nlohmann::json& old, nlohmann::json& networkConfig, bool notifyListeners) + { + DB::_networkChanged(old, networkConfig, notifyListeners); + } + + private: + void initializeNetworks(); + void initializeMembers(); + void heartbeat(); + void membersDbWatcher(); + void _membersWatcher_Postgres(); + void networksDbWatcher(); + void _networksWatcher_Postgres(); + + void _membersWatcher_Redis(); + void _networksWatcher_Redis(); + + void commitThread(); + void onlineNotificationThread(); + void onlineNotification_Postgres(); + void onlineNotification_Redis(); + uint64_t _doRedisUpdate(sw::redis::Transaction& tx, std::string& controllerId, std::unordered_map, NodeOnlineRecord, _PairHasher>& lastOnline); + + void configureSmee(); + void notifyNewMember(const std::string& networkID, const std::string& memberID); + + enum OverrideMode { ALLOW_PGBOUNCER_OVERRIDE = 0, NO_OVERRIDE = 1 }; + + std::shared_ptr > _pool; + + const Identity _myId; + const Address _myAddress; + std::string _myAddressStr; + std::string _connString; + + BlockingQueue > _commitQueue; + + std::thread _heartbeatThread; + std::thread _membersDbWatcher; + std::thread _networksDbWatcher; + std::thread _commitThread[ZT_CENTRAL_CONTROLLER_COMMIT_THREADS]; + std::thread _onlineNotificationThread; + + std::unordered_map, NodeOnlineRecord, _PairHasher> _lastOnline; + + mutable std::mutex _lastOnline_l; + mutable std::mutex _readyLock; + std::atomic _ready, _connected, _run; + mutable volatile bool _waitNoticePrinted; + + int _listenPort; + uint8_t _ssoPsk[48]; + + RedisConfig* _rc; + std::shared_ptr _redis; + std::shared_ptr _cluster; + bool _redisMemberStatus; + + smeeclient::SmeeClient* _smee; +}; + +} // namespace ZeroTier + +#endif // ZT_CONTROLLER_CV1_HPP + +#endif // ZT_CONTROLLER_USE_LIBPQ diff --git a/controller/CV2.cpp b/controller/CV2.cpp new file mode 100644 index 000000000..db9effb3c --- /dev/null +++ b/controller/CV2.cpp @@ -0,0 +1,1244 @@ +/* + * Copyright (c)2025 ZeroTier, Inc. + * + * Use of this software is governed by the Business Source License included + * in the LICENSE.TXT file in the project's root directory. + * + * Change Date: 2026-01-01 + * + * On the date above, in accordance with the Business Source License, use + * of this software will be governed by version 2.0 of the Apache License. + */ +/****/ + +#include "CV2.hpp" + +#ifdef ZT_CONTROLLER_USE_LIBPQ + +#include "../node/Constants.hpp" +#include "../node/SHA512.hpp" +#include "../version.h" +#include "CtlUtil.hpp" +#include "EmbeddedNetworkController.hpp" +#include "opentelemetry/trace/provider.h" + +#include +#include +#include +#include +#include + +using json = nlohmann::json; + +namespace { + +} + +using namespace ZeroTier; + +CV2::CV2(const Identity& myId, const char* path, int listenPort) : DB(), _pool(), _myId(myId), _myAddress(myId.address()), _ready(0), _connected(1), _run(1), _waitNoticePrinted(false), _listenPort(listenPort) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv2"); + auto span = tracer->StartSpan("cv2::CV2"); + auto scope = tracer->WithActiveSpan(span); + + fprintf(stderr, "CV2::CV2\n"); + char myAddress[64]; + _myAddressStr = myId.address().toString(myAddress); + + _connString = std::string(path); + + auto f = std::make_shared(_connString); + _pool = std::make_shared >(15, 5, std::static_pointer_cast(f)); + + memset(_ssoPsk, 0, sizeof(_ssoPsk)); + char* const ssoPskHex = getenv("ZT_SSO_PSK"); +#ifdef ZT_TRACE + fprintf(stderr, "ZT_SSO_PSK: %s\n", ssoPskHex); +#endif + if (ssoPskHex) { + // SECURITY: note that ssoPskHex will always be null-terminated if libc actually + // returns something non-NULL. If the hex encodes something shorter than 48 bytes, + // it will be padded at the end with zeroes. If longer, it'll be truncated. + Utils::unhex(ssoPskHex, _ssoPsk, sizeof(_ssoPsk)); + } + + _readyLock.lock(); + + fprintf(stderr, "[%s] NOTICE: %.10llx controller PostgreSQL waiting for initial data download..." ZT_EOL_S, ::_timestr(), (unsigned long long)_myAddress.toInt()); + _waitNoticePrinted = true; + + initializeNetworks(); + initializeMembers(); + + _heartbeatThread = std::thread(&CV2::heartbeat, this); + _membersDbWatcher = std::thread(&CV2::membersDbWatcher, this); + _networksDbWatcher = std::thread(&CV2::networksDbWatcher, this); + for (int i = 0; i < ZT_CENTRAL_CONTROLLER_COMMIT_THREADS; ++i) { + _commitThread[i] = std::thread(&CV2::commitThread, this); + } + _onlineNotificationThread = std::thread(&CV2::onlineNotificationThread, this); +} + +CV2::~CV2() +{ + _run = 0; + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + + _heartbeatThread.join(); + _membersDbWatcher.join(); + _networksDbWatcher.join(); + _commitQueue.stop(); + for (int i = 0; i < ZT_CENTRAL_CONTROLLER_COMMIT_THREADS; ++i) { + _commitThread[i].join(); + } + _onlineNotificationThread.join(); +} + +bool CV2::waitForReady() +{ + while (_ready < 2) { + _readyLock.lock(); + _readyLock.unlock(); + } + return true; +} + +bool CV2::isReady() +{ + return (_ready == 2) && _connected; +} + +void CV2::_memberChanged(nlohmann::json& old, nlohmann::json& memberConfig, bool notifyListeners) +{ + DB::_memberChanged(old, memberConfig, notifyListeners); +} + +void CV2::_networkChanged(nlohmann::json& old, nlohmann::json& networkConfig, bool notifyListeners) +{ + DB::_networkChanged(old, networkConfig, notifyListeners); +} + +bool CV2::save(nlohmann::json& record, bool notifyListeners) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv2"); + auto span = tracer->StartSpan("cv2::save"); + auto scope = tracer->WithActiveSpan(span); + + bool modified = false; + try { + if (! record.is_object()) { + fprintf(stderr, "record is not an object?!?\n"); + return false; + } + const std::string objtype = record["objtype"]; + if (objtype == "network") { + auto nspan = tracer->StartSpan("cv2::save::network"); + auto nscope = tracer->WithActiveSpan(nspan); + + // fprintf(stderr, "network save\n"); + const uint64_t nwid = OSUtils::jsonIntHex(record["id"], 0ULL); + if (nwid) { + nlohmann::json old; + get(nwid, old); + if ((! old.is_object()) || (! _compareRecords(old, record))) { + record["revision"] = OSUtils::jsonInt(record["revision"], 0ULL) + 1ULL; + _commitQueue.post(std::pair(record, notifyListeners)); + modified = true; + } + } + } + else if (objtype == "member") { + auto mspan = tracer->StartSpan("cv2::save::member"); + auto mscope = tracer->WithActiveSpan(mspan); + + std::string networkId = record["nwid"]; + std::string memberId = record["id"]; + const uint64_t nwid = OSUtils::jsonIntHex(record["nwid"], 0ULL); + const uint64_t id = OSUtils::jsonIntHex(record["id"], 0ULL); + // fprintf(stderr, "member save %s-%s\n", networkId.c_str(), memberId.c_str()); + if ((id) && (nwid)) { + nlohmann::json network, old; + get(nwid, network, id, old); + if ((! old.is_object()) || (! _compareRecords(old, record))) { + // fprintf(stderr, "commit queue post\n"); + record["revision"] = OSUtils::jsonInt(record["revision"], 0ULL) + 1ULL; + _commitQueue.post(std::pair(record, notifyListeners)); + modified = true; + } + else { + // fprintf(stderr, "no change\n"); + } + } + } + else { + fprintf(stderr, "uhh waaat\n"); + } + } + catch (std::exception& e) { + fprintf(stderr, "Error on PostgreSQL::save: %s\n", e.what()); + } + catch (...) { + fprintf(stderr, "Unknown error on PostgreSQL::save\n"); + } + return modified; +} + +void CV2::eraseNetwork(const uint64_t networkId) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv2"); + auto span = tracer->StartSpan("cv2::eraseNetwork"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + std::string nwid = Utils::hex(networkId, networkIdStr); + span->SetAttribute("network_id", nwid); + + fprintf(stderr, "CV2::eraseNetwork\n"); + waitForReady(); + std::pair tmp; + tmp.first["id"] = nwid; + tmp.first["objtype"] = "_delete_network"; + tmp.second = true; + _commitQueue.post(tmp); + // nlohmann::json nullJson; + //_networkChanged(tmp.first, nullJson, isReady()); +} + +void CV2::eraseMember(const uint64_t networkId, const uint64_t memberId) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv2"); + auto span = tracer->StartSpan("cv2::eraseMember"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex10(memberId, memberIdStr)); + + fprintf(stderr, "PostgreSQL::eraseMember\n"); + char tmp2[24]; + waitForReady(); + std::pair tmp, nw; + Utils::hex(networkId, tmp2); + tmp.first["nwid"] = tmp2; + Utils::hex(memberId, tmp2); + tmp.first["id"] = tmp2; + tmp.first["objtype"] = "_delete_member"; + tmp.second = true; + _commitQueue.post(tmp); + // nlohmann::json nullJson; + //_memberChanged(tmp.first, nullJson, isReady()); +} + +void CV2::nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress, const char* osArch) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv2"); + auto span = tracer->StartSpan("cv2::nodeIsOnline"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + char ipAddressStr[INET6_ADDRSTRLEN]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex10(memberId, memberIdStr)); + span->SetAttribute("physical_address", ipAddressStr); + span->SetAttribute("os_arch", osArch); + + std::lock_guard l(_lastOnline_l); + NodeOnlineRecord& i = _lastOnline[std::pair(networkId, memberId)]; + i.lastSeen = OSUtils::now(); + if (physicalAddress) { + i.physicalAddress = physicalAddress; + } + i.osArch = std::string(osArch); +} + +void CV2::nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress) +{ + this->nodeIsOnline(networkId, memberId, physicalAddress, "unknown/unknown"); +} + +AuthInfo CV2::getSSOAuthInfo(const nlohmann::json& member, const std::string& redirectURL) +{ + // TODO: Redo this for CV2 + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv2"); + auto span = tracer->StartSpan("cv2::getSSOAuthInfo"); + auto scope = tracer->WithActiveSpan(span); + + Metrics::db_get_sso_info++; + // NONCE is just a random character string. no semantic meaning + // state = HMAC SHA384 of Nonce based on shared sso key + // + // need nonce timeout in database? make sure it's used within X time + // X is 5 minutes for now. Make configurable later? + // + // how do we tell when a nonce is used? if auth_expiration_time is set + std::string networkId = member["nwid"]; + std::string memberId = member["id"]; + + char authenticationURL[4096] = { 0 }; + AuthInfo info; + info.enabled = true; + + // if (memberId == "a10dccea52" && networkId == "8056c2e21c24673d") { + // fprintf(stderr, "invalid authinfo for grant's machine\n"); + // info.version=1; + // return info; + // } + // fprintf(stderr, "PostgreSQL::updateMemberOnLoad: %s-%s\n", networkId.c_str(), memberId.c_str()); + std::shared_ptr c; + try { + // c = _pool->borrow(); + // pqxx::work w(*c->c); + + // char nonceBytes[16] = {0}; + // std::string nonce = ""; + + // // check if the member exists first. + // pqxx::row count = w.exec_params1("SELECT count(id) FROM ztc_member WHERE id = $1 AND network_id = $2 AND deleted = false", memberId, networkId); + // if (count[0].as() == 1) { + // // get active nonce, if exists. + // pqxx::result r = w.exec_params("SELECT nonce FROM ztc_sso_expiry " + // "WHERE network_id = $1 AND member_id = $2 " + // "AND ((NOW() AT TIME ZONE 'UTC') <= authentication_expiry_time) AND ((NOW() AT TIME ZONE 'UTC') <= nonce_expiration)", + // networkId, memberId); + + // if (r.size() == 0) { + // // no active nonce. + // // find an unused nonce, if one exists. + // pqxx::result r = w.exec_params("SELECT nonce FROM ztc_sso_expiry " + // "WHERE network_id = $1 AND member_id = $2 " + // "AND authentication_expiry_time IS NULL AND ((NOW() AT TIME ZONE 'UTC') <= nonce_expiration)", + // networkId, memberId); + + // if (r.size() == 1) { + // // we have an existing nonce. Use it + // nonce = r.at(0)[0].as(); + // Utils::unhex(nonce.c_str(), nonceBytes, sizeof(nonceBytes)); + // } else if (r.empty()) { + // // create a nonce + // Utils::getSecureRandom(nonceBytes, 16); + // char nonceBuf[64] = {0}; + // Utils::hex(nonceBytes, sizeof(nonceBytes), nonceBuf); + // nonce = std::string(nonceBuf); + + // pqxx::result ir = w.exec_params0("INSERT INTO ztc_sso_expiry " + // "(nonce, nonce_expiration, network_id, member_id) VALUES " + // "($1, TO_TIMESTAMP($2::double precision/1000), $3, $4)", + // nonce, OSUtils::now() + 300000, networkId, memberId); + + // w.commit(); + // } else { + // // > 1 ?!? Thats an error! + // fprintf(stderr, "> 1 unused nonce!\n"); + // exit(6); + // } + // } else if (r.size() == 1) { + // nonce = r.at(0)[0].as(); + // Utils::unhex(nonce.c_str(), nonceBytes, sizeof(nonceBytes)); + // } else { + // // more than 1 nonce in use? Uhhh... + // fprintf(stderr, "> 1 nonce in use for network member?!?\n"); + // exit(7); + // } + + // r = w.exec_params( + // "SELECT oc.client_id, oc.authorization_endpoint, oc.issuer, oc.provider, oc.sso_impl_version " + // "FROM ztc_network AS n " + // "INNER JOIN ztc_org o " + // " ON o.owner_id = n.owner_id " + // "LEFT OUTER JOIN ztc_network_oidc_config noc " + // " ON noc.network_id = n.id " + // "LEFT OUTER JOIN ztc_oidc_config oc " + // " ON noc.client_id = oc.client_id AND oc.org_id = o.org_id " + // "WHERE n.id = $1 AND n.sso_enabled = true", networkId); + + // std::string client_id = ""; + // std::string authorization_endpoint = ""; + // std::string issuer = ""; + // std::string provider = ""; + // uint64_t sso_version = 0; + + // if (r.size() == 1) { + // client_id = r.at(0)[0].as>().value_or(""); + // authorization_endpoint = r.at(0)[1].as>().value_or(""); + // issuer = r.at(0)[2].as>().value_or(""); + // provider = r.at(0)[3].as>().value_or(""); + // sso_version = r.at(0)[4].as>().value_or(1); + // } else if (r.size() > 1) { + // fprintf(stderr, "ERROR: More than one auth endpoint for an organization?!?!? NetworkID: %s\n", networkId.c_str()); + // } else { + // fprintf(stderr, "No client or auth endpoint?!?\n"); + // } + + // info.version = sso_version; + + // // no catch all else because we don't actually care if no records exist here. just continue as normal. + // if ((!client_id.empty())&&(!authorization_endpoint.empty())) { + + // uint8_t state[48]; + // HMACSHA384(_ssoPsk, nonceBytes, sizeof(nonceBytes), state); + // char state_hex[256]; + // Utils::hex(state, 48, state_hex); + + // if (info.version == 0) { + // char url[2048] = {0}; + // OSUtils::ztsnprintf(url, sizeof(authenticationURL), + // "%s?response_type=id_token&response_mode=form_post&scope=openid+email+profile&redirect_uri=%s&nonce=%s&state=%s&client_id=%s", + // authorization_endpoint.c_str(), + // url_encode(redirectURL).c_str(), + // nonce.c_str(), + // state_hex, + // client_id.c_str()); + // info.authenticationURL = std::string(url); + // } else if (info.version == 1) { + // info.ssoClientID = client_id; + // info.issuerURL = issuer; + // info.ssoProvider = provider; + // info.ssoNonce = nonce; + // info.ssoState = std::string(state_hex) + "_" +networkId; + // info.centralAuthURL = redirectURL; + // #ifdef ZT_DEBUG + // fprintf( + // stderr, + // "ssoClientID: %s\nissuerURL: %s\nssoNonce: %s\nssoState: %s\ncentralAuthURL: %s\nprovider: %s\n", + // info.ssoClientID.c_str(), + // info.issuerURL.c_str(), + // info.ssoNonce.c_str(), + // info.ssoState.c_str(), + // info.centralAuthURL.c_str(), + // provider.c_str()); + // #endif + // } + // } else { + // fprintf(stderr, "client_id: %s\nauthorization_endpoint: %s\n", client_id.c_str(), authorization_endpoint.c_str()); + // } + // } + + // _pool->unborrow(c); + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: Error updating member on load for network %s: %s\n", networkId.c_str(), e.what()); + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + } + + return info; // std::string(authenticationURL); +} + +void CV2::initializeNetworks() +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv2"); + auto span = tracer->StartSpan("cv2::initializeNetworks"); + auto scope = tracer->WithActiveSpan(span); + + fprintf(stderr, "Initializing networks...\n"); + + try { + char qbuf[2048]; + sprintf( + qbuf, + "SELECT id, name, configuration , (EXTRACT(EPOCH FROM creation_time AT TIME ZONE 'UTC')*1000)::bigint, " + "(EXTRACT(EPOCH FROM last_modified AT TIME ZONE 'UTC')*1000)::bigint, revision " + "FROM networks_ctl WHERE controller_id = '%s'", + _myAddressStr.c_str()); + + auto c = _pool->borrow(); + pqxx::work w(*c->c); + + fprintf(stderr, "Load networks from psql...\n"); + auto stream = pqxx::stream_from::query(w, qbuf); + std::tuple< + std::string // network ID + , + std::optional // name + , + std::string // configuration + , + std::optional // creation_time + , + std::optional // last_modified + , + std::optional // revision + > + row; + uint64_t count = 0; + uint64_t total = 0; + while (stream >> row) { + auto start = std::chrono::high_resolution_clock::now(); + + json empty; + json config; + + initNetwork(config); + + std::string nwid = std::get<0>(row); + std::string name = std::get<1>(row).value_or(""); + json cfgtmp = json::parse(std::get<2>(row)); + std::optional created_at = std::get<3>(row); + std::optional last_modified = std::get<4>(row); + std::optional revision = std::get<5>(row); + + config["id"] = nwid; + config["name"] = name; + config["creationTime"] = created_at.value_or(0); + config["lastModified"] = last_modified.value_or(0); + config["revision"] = revision.value_or(0); + config["capabilities"] = cfgtmp["capabilities"].is_array() ? cfgtmp["capabilities"] : json::array(); + config["enableBroadcast"] = cfgtmp["enableBroadcast"].is_boolean() ? cfgtmp["enableBroadcast"].get() : false; + config["mtu"] = cfgtmp["mtu"].is_number() ? cfgtmp["mtu"].get() : 2800; + config["multicastLimit"] = cfgtmp["multicastLimit"].is_number() ? cfgtmp["multicastLimit"].get() : 64; + config["private"] = cfgtmp["private"].is_boolean() ? cfgtmp["private"].get() : true; + config["remoteTraceLevel"] = cfgtmp["remoteTraceLevel"].is_number() ? cfgtmp["remoteTraceLevel"].get() : 0; + config["remoteTraceTarget"] = cfgtmp["remoteTraceTarget"].is_string() ? cfgtmp["remoteTraceTarget"].get() : ""; + config["revision"] = revision.value_or(0); + config["rules"] = cfgtmp["rules"].is_array() ? cfgtmp["rules"] : json::array(); + config["tags"] = cfgtmp["tags"].is_array() ? cfgtmp["tags"] : json::array(); + if (cfgtmp["v4AssignMode"].is_object()) { + config["v4AssignMode"] = cfgtmp["v4AssignMode"]; + } + else { + config["v4AssignMode"] = json::object(); + config["v4AssignMode"]["zt"] = true; + } + if (cfgtmp["v6AssignMode"].is_object()) { + config["v6AssignMode"] = cfgtmp["v6AssignMode"]; + } + else { + config["v6AssignMode"] = json::object(); + config["v6AssignMode"]["zt"] = true; + config["v6AssignMode"]["6plane"] = true; + config["v6AssignMode"]["rfc4193"] = false; + } + config["ssoEnabled"] = cfgtmp["ssoEnabled"].is_boolean() ? cfgtmp["ssoEnabled"].get() : false; + config["objtype"] = "network"; + config["routes"] = cfgtmp["routes"].is_array() ? cfgtmp["routes"] : json::array(); + config["clientId"] = cfgtmp["clientId"].is_string() ? cfgtmp["clientId"].get() : ""; + config["authorizationEndpoint"] = cfgtmp["authorizationEndpoint"].is_string() ? cfgtmp["authorizationEndpoint"].get() : nullptr; + config["provider"] = cfgtmp["ssoProvider"].is_string() ? cfgtmp["ssoProvider"].get() : ""; + if (! cfgtmp["dns"].is_object()) { + cfgtmp["dns"] = json::object(); + cfgtmp["dns"]["domain"] = ""; + cfgtmp["dns"]["servers"] = json::array(); + } + else { + config["dns"] = cfgtmp["dns"]; + } + config["ipAssignmentPools"] = cfgtmp["ipAssignmentPools"].is_array() ? cfgtmp["ipAssignmentPools"] : json::array(); + + Metrics::network_count++; + + _networkChanged(empty, config, false); + + auto end = std::chrono::high_resolution_clock::now(); + auto dur = std::chrono::duration_cast(end - start); + ; + total += dur.count(); + ++count; + if (count > 0 && count % 10000 == 0) { + fprintf(stderr, "Averaging %lu us per network\n", (total / count)); + } + } + + w.commit(); + _pool->unborrow(c); + fprintf(stderr, "done.\n"); + + if (++this->_ready == 2) { + if (_waitNoticePrinted) { + fprintf(stderr, "[%s] NOTICE: %.10llx controller PostgreSQL data download complete." ZT_EOL_S, _timestr(), (unsigned long long)_myAddress.toInt()); + } + _readyLock.unlock(); + } + fprintf(stderr, "network init done\n"); + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: Error initializing networks: %s\n", e.what()); + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + std::this_thread::sleep_for(std::chrono::milliseconds(5000)); + exit(-1); + } +} + +void CV2::initializeMembers() +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv2"); + auto span = tracer->StartSpan("cv2::initializeMembers"); + auto scope = tracer->WithActiveSpan(span); + + std::string memberId; + std::string networkId; + try { + char qbuf[2048]; + sprintf( + qbuf, + "SELECT nm.device_id, nm.network_id, nm.authorized, nm.active_bridge, nm.ip_assignments, nm.no_auto_assign_ips, " + "nm.sso_exempt, (EXTRACT(EPOCH FROM nm.authentication_expiry_time AT TIME ZONE 'UTC')*1000)::bigint, " + "(EXTRACT(EPOCH FROM nm.creation_time AT TIME ZONE 'UTC')*1000)::bigint, nm.identity, " + "(EXTRACT(EPOCH FROM nm.last_authorized_time AT TIME ZONE 'UTC')*1000)::bigint, " + "(EXTRACT(EPOCH FROM nm.last_deauthorized_time AT TIME ZONE 'UTC')*1000)::bigint, " + "nm.remote_trace_level, nm.remote_trace_target, nm.revision, nm.capabilities, nm.tags " + "FROM network_memberships_ctl nm " + "INNER JOIN networks_ctl n " + " ON nm.network_id = n.id " + "WHERE n.controller_id = '%s'", + _myAddressStr.c_str()); + + auto c = _pool->borrow(); + pqxx::work w(*c->c); + fprintf(stderr, "Load members from psql...\n"); + auto stream = pqxx::stream_from::query(w, qbuf); + std::tuple< + std::string // device ID + , + std::string // network ID + , + bool // authorized + , + std::optional // active_bridge + , + std::optional // ip_assignments + , + std::optional // no_auto_assign_ips + , + std::optional // sso_exempt + , + std::optional // authentication_expiry_time + , + std::optional // creation_time + , + std::optional // identity + , + std::optional // last_authorized_time + , + std::optional // last_deauthorized_time + , + std::optional // remote_trace_level + , + std::optional // remote_trace_target + , + std::optional // revision + , + std::optional // capabilities + , + std::optional // tags + > + row; + + uint64_t count = 0; + uint64_t total = 0; + while (stream >> row) { + auto start = std::chrono::high_resolution_clock::now(); + json empty; + json config; + + initMember(config); + + memberId = std::get<0>(row); + networkId = std::get<1>(row); + bool authorized = std::get<2>(row); + std::optional active_bridge = std::get<3>(row); + std::string ip_assignments = std::get<4>(row).value_or(""); + std::optional no_auto_assign_ips = std::get<5>(row); + std::optional sso_exempt = std::get<6>(row); + std::optional authentication_expiry_time = std::get<7>(row); + std::optional creation_time = std::get<8>(row); + std::optional identity = std::get<9>(row); + std::optional last_authorized_time = std::get<10>(row); + std::optional last_deauthorized_time = std::get<11>(row); + std::optional remote_trace_level = std::get<12>(row); + std::optional remote_trace_target = std::get<13>(row); + std::optional revision = std::get<14>(row); + std::optional capabilities = std::get<15>(row); + std::optional tags = std::get<16>(row); + + config["objtype"] = "member"; + config["id"] = memberId; + config["address"] = identity.value_or(""); + config["nwid"] = networkId; + config["authorized"] = authorized; + config["activeBridge"] = active_bridge.value_or(false); + config["ipAssignments"] = json::array(); + if (ip_assignments != "{}") { + std::string tmp = ip_assignments.substr(1, ip_assignments.length() - 2); + std::vector addrs = split(tmp, ','); + for (auto it = addrs.begin(); it != addrs.end(); ++it) { + config["ipAssignments"].push_back(*it); + } + } + config["capabilities"] = json::parse(capabilities.value_or("[]")); + config["creationTime"] = creation_time.value_or(0); + config["lastAuthorizedTime"] = last_authorized_time.value_or(0); + config["lastDeauthorizedTime"] = last_deauthorized_time.value_or(0); + config["noAutoAssignIPs"] = no_auto_assign_ips.value_or(false); + config["remoteTraceLevel"] = remote_trace_level.value_or(0); + config["remoteTraceTarget"] = remote_trace_target.value_or(nullptr); + config["revision"] = revision.value_or(0); + config["ssoExempt"] = sso_exempt.value_or(false); + config["authenticationExpiryTime"] = authentication_expiry_time.value_or(0); + config["tags"] = json::parse(tags.value_or("[]")); + + Metrics::member_count++; + + _memberChanged(empty, config, false); + + memberId = ""; + networkId = ""; + + auto end = std::chrono::high_resolution_clock::now(); + auto dur = std::chrono::duration_cast(end - start); + total += dur.count(); + ++count; + if (count > 0 && count % 10000 == 0) { + fprintf(stderr, "Averaging %lu us per member\n", (total / count)); + } + } + if (count > 0) { + fprintf(stderr, "Took %lu us per member to load\n", (total / count)); + } + + stream.complete(); + w.commit(); + _pool->unborrow(c); + fprintf(stderr, "done.\n"); + + if (++this->_ready == 2) { + if (_waitNoticePrinted) { + fprintf(stderr, "[%s] NOTICE: %.10llx controller PostgreSQL data download complete." ZT_EOL_S, _timestr(), (unsigned long long)_myAddress.toInt()); + } + _readyLock.unlock(); + } + fprintf(stderr, "member init done\n"); + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: Error initializing member: %s-%s %s\n", networkId.c_str(), memberId.c_str(), e.what()); + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + exit(-1); + } +} + +void CV2::heartbeat() +{ + char publicId[1024]; + char hostnameTmp[1024]; + _myId.toString(false, publicId); + if (gethostname(hostnameTmp, sizeof(hostnameTmp)) != 0) { + hostnameTmp[0] = (char)0; + } + else { + for (int i = 0; i < (int)sizeof(hostnameTmp); ++i) { + if ((hostnameTmp[i] == '.') || (hostnameTmp[i] == 0)) { + hostnameTmp[i] = (char)0; + break; + } + } + } + const char* controllerId = _myAddressStr.c_str(); + const char* publicIdentity = publicId; + const char* hostname = hostnameTmp; + + while (_run == 1) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv2"); + auto span = tracer->StartSpan("cv2::heartbeat"); + auto scope = tracer->WithActiveSpan(span); + + auto c = _pool->borrow(); + int64_t ts = OSUtils::now(); + + if (c->c) { + std::string major = std::to_string(ZEROTIER_ONE_VERSION_MAJOR); + std::string minor = std::to_string(ZEROTIER_ONE_VERSION_MINOR); + std::string rev = std::to_string(ZEROTIER_ONE_VERSION_REVISION); + std::string version = major + "." + minor + "." + rev; + std::string versionStr = "v" + version; + + try { + pqxx::work w { *c->c }; + w.exec_params0( + "INSERT INTO controllers_ctl (id, hostname, last_heartbeat, public_identity, version) VALUES " + "($1, $2, TO_TIMESTAMP($3::double precision/1000), $4, $5) " + "ON CONFLICT (id) DO UPDATE SET hostname = EXCLUDED.hostname, last_heartbeat = EXCLUDED.last_heartbeat, " + "public_identity = EXCLUDED.public_identity, version = EXCLUDED.version", + controllerId, + hostname, + ts, + publicIdentity, + versionStr); + w.commit(); + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: Error in heartbeat: %s\n", e.what()); + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + continue; + } + catch (...) { + fprintf(stderr, "ERROR: Unknown error in heartbeat\n"); + span->SetStatus(opentelemetry::trace::StatusCode::kError, "Unknown error in heartbeat"); + continue; + } + } + + _pool->unborrow(c); + span->End(); + + std::this_thread::sleep_for(std::chrono::seconds(1)); + } + fprintf(stderr, "Exited heartbeat thread\n"); +} + +void CV2::membersDbWatcher() +{ + auto c = _pool->borrow(); + + std::string stream = "member_" + _myAddressStr; + + fprintf(stderr, "Listening to member stream: %s\n", stream.c_str()); + MemberNotificationReceiver m(this, *c->c, stream); + + while (_run == 1) { + c->c->await_notification(5, 0); + } + + _pool->unborrow(c); + + fprintf(stderr, "Exited membersDbWatcher\n"); +} + +void CV2::networksDbWatcher() +{ + std::string stream = "network_" + _myAddressStr; + + fprintf(stderr, "Listening to member stream: %s\n", stream.c_str()); + + auto c = _pool->borrow(); + + NetworkNotificationReceiver n(this, *c->c, stream); + + while (_run == 1) { + c->c->await_notification(5, 0); + } + + _pool->unborrow(c); + fprintf(stderr, "Exited networksDbWatcher\n"); +} + +void CV2::commitThread() +{ + fprintf(stderr, "%s: commitThread start\n", _myAddressStr.c_str()); + std::pair qitem; + while (_commitQueue.get(qitem) && (_run == 1)) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv2"); + auto span = tracer->StartSpan("cv2::commitThread"); + auto scope = tracer->WithActiveSpan(span); + + // fprintf(stderr, "commitThread tick\n"); + if (! qitem.first.is_object()) { + fprintf(stderr, "not an object\n"); + continue; + } + + std::shared_ptr c; + try { + c = _pool->borrow(); + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: %s\n", e.what()); + continue; + } + + if (! c) { + fprintf(stderr, "Error getting database connection\n"); + continue; + } + + Metrics::pgsql_commit_ticks++; + try { + nlohmann::json& config = (qitem.first); + const std::string objtype = config["objtype"]; + if (objtype == "member") { + auto mspan = tracer->StartSpan("cv2::commitThread::member"); + auto mscope = tracer->WithActiveSpan(span); + + // fprintf(stderr, "%s: commitThread: member\n", _myAddressStr.c_str()); + std::string memberId; + std::string networkId; + try { + pqxx::work w(*c->c); + + memberId = config["id"]; + networkId = config["nwid"]; + + std::string target = "NULL"; + if (! config["remoteTraceTarget"].is_null()) { + target = config["remoteTraceTarget"]; + } + + pqxx::row nwrow = w.exec_params1("SELECT COUNT(id) FROM networks WHERE id = $1", networkId); + int nwcount = nwrow[0].as(); + + if (nwcount != 1) { + fprintf(stderr, "network %s does not exist. skipping member upsert\n", networkId.c_str()); + w.abort(); + _pool->unborrow(c); + continue; + } + + // only needed for hooks, and no hooks for now + // pqxx::row mrow = w.exec_params1("SELECT COUNT(id) FROM device_networks WHERE device_id = $1 AND network_id = $2", memberId, networkId); + // int membercount = mrow[0].as(); + // bool isNewMember = (membercount == 0); + + pqxx::result res = w.exec_params0( + "INSERT INTO network_memberships_ctl (device_id, network_id, authorized, active_bridge, ip_assignments, " + "no_auto_assign_ips, sso_exempt, authentication_expiry_time, capabilities, creation_time, " + "identity, last_authorized_time, last_deauthorized_time, " + "remote_trace_level, remote_trace_target, revision, tags, version_major, version_minor, " + "version_revision, version_protocol) " + "VALUES ($1, $2, $3, $4, $5, $6, $7, TO_TIMESTAMP($8::double precision/1000), $9, " + "TO_TIMESTAMP($10::double precision/1000), $11, TO_TIMESTAMP($12::double precision/1000), " + "TO_TIMESTAMP($13::double precision/1000), $14, $15, $16, $17, $18, $19, $20, $21) " + "ON CONFLICT (device_id, network_id) DO UPDATE SET " + "authorized = EXCLUDED.authorized, active_bridge = EXCLUDED.active_bridge, " + "ip_assignments = EXCLUDED.ip_assignments, no_auto_assign_ips = EXCLUDED.no_auto_assign_ips, " + "sso_exempt = EXCLUDED.sso_exempt, authentication_expiry_time = EXCLUDED.authentication_expiry_time, " + "capabilities = EXCLUDED.capabilities, creation_time = EXCLUDED.creation_time, " + "identity = EXCLUDED.identity, last_authorized_time = EXCLUDED.last_authorized_time, " + "last_deauthorized_time = EXCLUDED.last_deauthorized_time, " + "remote_trace_level = EXCLUDED.remote_trace_level, remote_trace_target = EXCLUDED.remote_trace_target, " + "revision = EXCLUDED.revision, tags = EXCLUDED.tags, version_major = EXCLUDED.version_major, " + "version_minor = EXCLUDED.version_minor, version_revision = EXCLUDED.version_revision, " + "version_protocol = EXCLUDED.version_protocol", + memberId, + networkId, + (bool)config["authorized"], + (bool)config["activeBridge"], + config["ipAssignments"].get >(), + (bool)config["noAutoAssignIps"], + (bool)config["ssoExempt"], + (uint64_t)config["authenticationExpiryTime"], + OSUtils::jsonDump(config["capabilities"], -1), + (uint64_t)config["creationTime"], + OSUtils::jsonString(config["identity"], ""), + (uint64_t)config["lastAuthorizedTime"], + (uint64_t)config["lastDeauthorizedTime"], + (int)config["remoteTraceLevel"], + target, + (uint64_t)config["revision"], + OSUtils::jsonDump(config["tags"], -1), + (int)config["vMajor"], + (int)config["vMinor"], + (int)config["vRev"], + (int)config["vProto"]); + + w.commit(); + + // No hooks for now + // if (_smee != NULL && isNewMember) { + // pqxx::row row = w.exec_params1( + // "SELECT " + // " count(h.hook_id) " + // "FROM " + // " ztc_hook h " + // " INNER JOIN ztc_org o ON o.org_id = h.org_id " + // " INNER JOIN ztc_network n ON n.owner_id = o.owner_id " + // " WHERE " + // "n.id = $1 ", + // networkId + // ); + // int64_t hookCount = row[0].as(); + // if (hookCount > 0) { + // notifyNewMember(networkId, memberId); + // } + // } + + const uint64_t nwidInt = OSUtils::jsonIntHex(config["nwid"], 0ULL); + const uint64_t memberidInt = OSUtils::jsonIntHex(config["id"], 0ULL); + if (nwidInt && memberidInt) { + nlohmann::json nwOrig; + nlohmann::json memOrig; + + nlohmann::json memNew(config); + + get(nwidInt, nwOrig, memberidInt, memOrig); + + _memberChanged(memOrig, memNew, qitem.second); + } + else { + fprintf(stderr, "%s: Can't notify of change. Error parsing nwid or memberid: %llu-%llu\n", _myAddressStr.c_str(), (unsigned long long)nwidInt, (unsigned long long)memberidInt); + } + } + catch (pqxx::data_exception& e) { + std::string cfgDump = OSUtils::jsonDump(config, 2); + fprintf(stderr, "Member save %s-%s: %s\n", networkId.c_str(), memberId.c_str(), cfgDump.c_str()); + + const pqxx::sql_error* s = dynamic_cast(&e); + fprintf(stderr, "%s ERROR: Error updating member: %s\n", _myAddressStr.c_str(), e.what()); + if (s) { + fprintf(stderr, "%s ERROR: SQL error: %s\n", _myAddressStr.c_str(), s->query().c_str()); + } + mspan->SetStatus(opentelemetry::trace::StatusCode::kError, "pqxx::data_exception"); + mspan->SetAttribute("error", e.what()); + mspan->SetAttribute("config", cfgDump); + } + catch (std::exception& e) { + std::string cfgDump = OSUtils::jsonDump(config, 2); + fprintf(stderr, "%s ERROR: Error updating member %s-%s: %s\njsonDump: %s\n", _myAddressStr.c_str(), networkId.c_str(), memberId.c_str(), e.what(), cfgDump.c_str()); + mspan->SetStatus(opentelemetry::trace::StatusCode::kError, "std::exception"); + mspan->SetAttribute("error", e.what()); + mspan->SetAttribute("config", cfgDump); + } + } + else if (objtype == "network") { + auto nspan = tracer->StartSpan("cv2::commitThread::network"); + auto nscope = tracer->WithActiveSpan(span); + + try { + // fprintf(stderr, "%s: commitThread: network\n", _myAddressStr.c_str()); + pqxx::work w(*c->c); + + std::string id = config["id"]; + + // network must already exist + pqxx::result res = w.exec_params0( + "INSERT INTO networks_ctl (id, name, configuration, controller_id, revision) " + "VALUES ($1, $2, $3, $4, $5) " + "ON CONFLICT (id) DO UPDATE SET " + "name = EXCLUDED.name, configuration = EXCLUDED.configuration, revision = EXCLUDED.revision+1", + id, + OSUtils::jsonString(config["name"], ""), + OSUtils::jsonDump(config, -1), + _myAddressStr, + ((uint64_t)config["revision"])); + + w.commit(); + + const uint64_t nwidInt = OSUtils::jsonIntHex(config["nwid"], 0ULL); + if (nwidInt) { + nlohmann::json nwOrig; + nlohmann::json nwNew(config); + + get(nwidInt, nwOrig); + + _networkChanged(nwOrig, nwNew, qitem.second); + } + else { + fprintf(stderr, "%s: Can't notify network changed: %llu\n", _myAddressStr.c_str(), (unsigned long long)nwidInt); + } + } + catch (pqxx::data_exception& e) { + const pqxx::sql_error* s = dynamic_cast(&e); + fprintf(stderr, "%s ERROR: Error updating network: %s\n", _myAddressStr.c_str(), e.what()); + if (s) { + fprintf(stderr, "%s ERROR: SQL error: %s\n", _myAddressStr.c_str(), s->query().c_str()); + } + nspan->SetStatus(opentelemetry::trace::StatusCode::kError, "pqxx::data_exception"); + nspan->SetAttribute("error", e.what()); + nspan->SetAttribute("config", OSUtils::jsonDump(config, 2)); + } + catch (std::exception& e) { + fprintf(stderr, "%s ERROR: Error updating network: %s\n", _myAddressStr.c_str(), e.what()); + nspan->SetStatus(opentelemetry::trace::StatusCode::kError, "std::exception"); + nspan->SetAttribute("error", e.what()); + nspan->SetAttribute("config", OSUtils::jsonDump(config, 2)); + } + } + else if (objtype == "_delete_network") { + auto dspan = tracer->StartSpan("cv2::commitThread::delete_network"); + auto dscope = tracer->WithActiveSpan(span); + + // fprintf(stderr, "%s: commitThread: delete network\n", _myAddressStr.c_str()); + try { + pqxx::work w(*c->c); + std::string networkId = config["id"]; + fprintf(stderr, "Deleting network %s\n", networkId.c_str()); + w.exec_params0("DELETE FROM network_memberships_ctl WHERE network_id = $1", networkId); + w.exec_params0("DELETE FROM networks_ctl WHERE id = $1", networkId); + + w.commit(); + + uint64_t nwidInt = OSUtils::jsonIntHex(config["nwid"], 0ULL); + json oldConfig; + get(nwidInt, oldConfig); + json empty; + _networkChanged(oldConfig, empty, qitem.second); + } + catch (std::exception& e) { + fprintf(stderr, "%s ERROR: Error deleting network: %s\n", _myAddressStr.c_str(), e.what()); + dspan->SetStatus(opentelemetry::trace::StatusCode::kError, "std::exception"); + dspan->SetAttribute("error", e.what()); + dspan->SetAttribute("config", OSUtils::jsonDump(config, 2)); + } + } + else if (objtype == "_delete_member") { + auto dspan = tracer->StartSpan("cv2::commitThread::delete_member"); + auto dscope = tracer->WithActiveSpan(span); + + // fprintf(stderr, "%s commitThread: delete member\n", _myAddressStr.c_str()); + try { + pqxx::work w(*c->c); + + std::string memberId = config["id"]; + std::string networkId = config["nwid"]; + + pqxx::result res = w.exec_params0("DELETE FROM network_memberships_ctl WHERE device_id = $1 AND network_id = $2", memberId, networkId); + + w.commit(); + + uint64_t nwidInt = OSUtils::jsonIntHex(config["nwid"], 0ULL); + uint64_t memberidInt = OSUtils::jsonIntHex(config["id"], 0ULL); + + nlohmann::json networkConfig; + nlohmann::json oldConfig; + + get(nwidInt, networkConfig, memberidInt, oldConfig); + json empty; + _memberChanged(oldConfig, empty, qitem.second); + } + catch (std::exception& e) { + fprintf(stderr, "%s ERROR: Error deleting member: %s\n", _myAddressStr.c_str(), e.what()); + dspan->SetStatus(opentelemetry::trace::StatusCode::kError, "std::exception"); + dspan->SetAttribute("error", e.what()); + dspan->SetAttribute("config", OSUtils::jsonDump(config, 2)); + } + } + else { + fprintf(stderr, "%s ERROR: unknown objtype\n", _myAddressStr.c_str()); + } + } + catch (std::exception& e) { + fprintf(stderr, "%s ERROR: Error getting objtype: %s\n", _myAddressStr.c_str(), e.what()); + span->SetStatus(opentelemetry::trace::StatusCode::kError, "std::exception"); + span->SetAttribute("error", e.what()); + } + _pool->unborrow(c); + c.reset(); + } + + fprintf(stderr, "%s commitThread finished\n", _myAddressStr.c_str()); +} + +void CV2::onlineNotificationThread() +{ + waitForReady(); + + _connected = 1; + + nlohmann::json jtmp1, jtmp2; + while (_run == 1) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("cv2"); + auto span = tracer->StartSpan("cv2::onlineNotificationThread"); + auto scope = tracer->WithActiveSpan(span); + + auto c = _pool->borrow(); + auto c2 = _pool->borrow(); + + try { + fprintf(stderr, "%s onlineNotificationThread\n", _myAddressStr.c_str()); + + std::unordered_map, NodeOnlineRecord, _PairHasher> lastOnline; + { + std::lock_guard l(_lastOnline_l); + lastOnline.swap(_lastOnline); + } + + pqxx::work w(*c->c); + pqxx::work w2(*c2->c); + + bool firstRun = true; + bool memberAdded = false; + uint64_t updateCount = 0; + + pqxx::pipeline pipe(w); + + for (auto i = lastOnline.begin(); i != lastOnline.end(); ++i) { + updateCount++; + + uint64_t nwid_i = i->first.first; + char nwidTmp[64]; + char memTmp[64]; + char ipTmp[64]; + + OSUtils::ztsnprintf(nwidTmp, sizeof(nwidTmp), "%.16llx", nwid_i); + OSUtils::ztsnprintf(memTmp, sizeof(memTmp), "%.10llx", i->first.second); + + if (! get(nwid_i, jtmp1, i->first.second, jtmp2)) { + continue; // skip non existent networks/members + } + + std::string networkId(nwidTmp); + std::string memberId(memTmp); + + try { + pqxx::row r = w2.exec_params1("SELECT device_id, network_id FROM network_memberships_ctl WHERE network_id = $1 AND device_id = $2", networkId, memberId); + } + catch (pqxx::unexpected_rows& e) { + continue; + } + + int64_t ts = i->second.lastSeen; + std::string ipAddr = i->second.physicalAddress.toIpString(ipTmp); + std::string timestamp = std::to_string(ts); + std::string osArch = i->second.osArch; + std::vector osArchSplit = split(osArch, '/'); + std::string os = osArchSplit[0]; + std::string arch = osArchSplit[1]; + + if (ipAddr.empty()) { + ipAddr = "relayed"; + } + + json record = { + { ipAddr, ts }, + }; + + std::string device_network_insert = "INSERT INTO network_memberships_ctl (device_id, network_id, last_seen, os, arch) " + "VALUES ('" + + w2.esc(memberId) + "', '" + w2.esc(networkId) + "', '" + w2.esc(record.dump()) + + "'::JSONB, " + "'" + + w2.esc(os) + "', '" + w2.esc(arch) + + "') " + "ON CONFLICT (device_id, network_id) DO UPDATE SET os = EXCLUDED.os, arch = EXCLUDED.arch, " + "last_seen = network_memberships_ctl.last_seen || EXCLUDED.last_seen"; + pipe.insert(device_network_insert); + + Metrics::pgsql_node_checkin++; + } + + pipe.complete(); + ; + w2.commit(); + w.commit(); + fprintf(stderr, "%s: Updated online status of %lu members\n", _myAddressStr.c_str(), updateCount); + } + catch (std::exception& e) { + fprintf(stderr, "%s ERROR: Error in onlineNotificationThread: %s\n", _myAddressStr.c_str(), e.what()); + span->SetStatus(opentelemetry::trace::StatusCode::kError, "std::exception"); + span->SetAttribute("error", e.what()); + } + catch (...) { + fprintf(stderr, "%s ERROR: Unknown error in onlineNotificationThread\n", _myAddressStr.c_str()); + span->SetStatus(opentelemetry::trace::StatusCode::kError, "unknown"); + } + _pool->unborrow(c2); + _pool->unborrow(c); + span->End(); + + std::this_thread::sleep_for(std::chrono::seconds(10)); + } + + fprintf(stderr, "%s: Fell out of run loop in onlineNotificationThread\n", _myAddressStr.c_str()); + if (_run == 1) { + fprintf(stderr, "ERROR: %s onlineNotificationThread should still be running! Exiting Controller.\n", _myAddressStr.c_str()); + exit(6); + } +} +#endif // ZT_CONTROLLER_USE_LIBPQ \ No newline at end of file diff --git a/controller/CV2.hpp b/controller/CV2.hpp new file mode 100644 index 000000000..09501e021 --- /dev/null +++ b/controller/CV2.hpp @@ -0,0 +1,109 @@ +/* + * Copyright (c)2025 ZeroTier, Inc. + * + * Use of this software is governed by the Business Source License included + * in the LICENSE.TXT file in the project's root directory. + * + * Change Date: 2026-01-01 + * + * On the date above, in accordance with the Business Source License, use + * of this software will be governed by version 2.0 of the Apache License. + */ +/****/ + +#include "DB.hpp" + +#ifdef ZT_CONTROLLER_USE_LIBPQ + +#ifndef ZT_CONTROLLER_CV2_HPP +#define ZT_CONTROLLER_CV2_HPP + +#define ZT_CENTRAL_CONTROLLER_COMMIT_THREADS 4 + +#include "../node/Metrics.hpp" +#include "ConnectionPool.hpp" +#include "PostgreSQL.hpp" + +#include +#include +#include + +namespace ZeroTier { + +class CV2 : public DB { + friend class MemberNotificationReceiver; + friend class NetworkNotificationReceiver; + + public: + CV2(const Identity& myId, const char* path, int listenPort); + virtual ~CV2(); + + virtual bool waitForReady(); + virtual bool isReady(); + virtual bool save(nlohmann::json& record, bool notifyListeners); + virtual void eraseNetwork(const uint64_t networkId); + virtual void eraseMember(const uint64_t networkId, const uint64_t memberId); + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress); + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress, const char* osArch); + virtual AuthInfo getSSOAuthInfo(const nlohmann::json& member, const std::string& redirectURL); + + virtual bool ready() + { + return _ready == 2; + } + + protected: + struct _PairHasher { + inline std::size_t operator()(const std::pair& p) const + { + return (std::size_t)(p.first ^ p.second); + } + }; + virtual void _memberChanged(nlohmann::json& old, nlohmann::json& memberConfig, bool notifyListeners); + + virtual void _networkChanged(nlohmann::json& old, nlohmann::json& networkConfig, bool notifyListeners); + + private: + void initializeNetworks(); + void initializeMembers(); + void heartbeat(); + void membersDbWatcher(); + void networksDbWatcher(); + + void commitThread(); + void onlineNotificationThread(); + + // void notifyNewMember(const std::string &networkID, const std::string &memberID); + + enum OverrideMode { ALLOW_PGBOUNCER_OVERRIDE = 0, NO_OVERRIDE = 1 }; + + std::shared_ptr > _pool; + + const Identity _myId; + const Address _myAddress; + std::string _myAddressStr; + std::string _connString; + + BlockingQueue > _commitQueue; + + std::thread _heartbeatThread; + std::thread _membersDbWatcher; + std::thread _networksDbWatcher; + std::thread _commitThread[ZT_CENTRAL_CONTROLLER_COMMIT_THREADS]; + std::thread _onlineNotificationThread; + + std::unordered_map, NodeOnlineRecord, _PairHasher> _lastOnline; + + mutable std::mutex _lastOnline_l; + mutable std::mutex _readyLock; + std::atomic _ready, _connected, _run; + mutable volatile bool _waitNoticePrinted; + + int _listenPort; + uint8_t _ssoPsk[48]; +}; + +} // namespace ZeroTier + +#endif // ZT_CONTROLLER_CV2_HPP +#endif // ZT_CONTROLLER_USE_LIBPQ \ No newline at end of file diff --git a/controller/ConnectionPool.hpp b/controller/ConnectionPool.hpp index 74672eb43..75690fab4 100644 --- a/controller/ConnectionPool.hpp +++ b/controller/ConnectionPool.hpp @@ -4,7 +4,7 @@ * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. @@ -14,148 +14,177 @@ #ifndef ZT_CONNECTION_POOL_H_ #define ZT_CONNECTION_POOL_H_ - #ifndef _DEBUG - #define _DEBUG(x) +#define _DEBUG(x) #endif +#include "../node/Metrics.hpp" +#include "opentelemetry/trace/provider.h" + #include -#include +#include #include #include -#include +#include #include namespace ZeroTier { -struct ConnectionUnavailable : std::exception { - char const* what() const throw() { - return "Unable to allocate connection"; - }; +struct ConnectionUnavailable : std::exception { + char const* what() const throw() + { + return "Unable to allocate connection"; + }; }; - class Connection { -public: - virtual ~Connection() {}; + public: + virtual ~Connection() {}; }; class ConnectionFactory { -public: - virtual ~ConnectionFactory() {}; - virtual std::shared_ptr create()=0; + public: + virtual ~ConnectionFactory() {}; + virtual std::shared_ptr create() = 0; }; struct ConnectionPoolStats { - size_t pool_size; - size_t borrowed_size; + size_t pool_size; + size_t borrowed_size; }; -template -class ConnectionPool { -public: - ConnectionPool(size_t max_pool_size, size_t min_pool_size, std::shared_ptr factory) - : m_maxPoolSize(max_pool_size) - , m_minPoolSize(min_pool_size) - , m_factory(factory) - { - while(m_pool.size() < m_minPoolSize){ - m_pool.push_back(m_factory->create()); - } - }; +template class ConnectionPool { + public: + ConnectionPool(size_t max_pool_size, size_t min_pool_size, std::shared_ptr factory) : m_maxPoolSize(max_pool_size), m_minPoolSize(min_pool_size), m_factory(factory) + { + Metrics::max_pool_size += max_pool_size; + Metrics::min_pool_size += min_pool_size; + while (m_pool.size() < m_minPoolSize) { + m_pool.push_back(m_factory->create()); + Metrics::pool_avail++; + } + }; - ConnectionPoolStats get_stats() { - std::unique_lock lock(m_poolMutex); + ConnectionPoolStats get_stats() + { + std::unique_lock lock(m_poolMutex); - ConnectionPoolStats stats; - stats.pool_size = m_pool.size(); - stats.borrowed_size = m_borrowed.size(); + ConnectionPoolStats stats; + stats.pool_size = m_pool.size(); + stats.borrowed_size = m_borrowed.size(); - return stats; - }; + return stats; + }; - ~ConnectionPool() { - }; + ~ConnectionPool() {}; - /** - * Borrow - * - * Borrow a connection for temporary use - * - * When done, either (a) call unborrow() to return it, or (b) (if it's bad) just let it go out of scope. This will cause it to automatically be replaced. - * @retval a shared_ptr to the connection object - */ - std::shared_ptr borrow() { - std::unique_lock l(m_poolMutex); - - while((m_pool.size() + m_borrowed.size()) < m_minPoolSize) { - std::shared_ptr conn = m_factory->create(); - m_pool.push_back(conn); - } + /** + * Borrow + * + * Borrow a connection for temporary use + * + * When done, either (a) call unborrow() to return it, or (b) (if it's bad) just let it go out of scope. This will cause it to automatically be replaced. + * @retval a shared_ptr to the connection object + */ + std::shared_ptr borrow() + { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("connection_pool"); + auto span = tracer->StartSpan("connection_pool::borrow"); + auto scope = tracer->WithActiveSpan(span); - if(m_pool.size()==0){ - - if ((m_pool.size() + m_borrowed.size()) < m_maxPoolSize) { - try { - std::shared_ptr conn = m_factory->create(); - m_borrowed.insert(conn); - return std::static_pointer_cast(conn); - } catch (std::exception &e) { - throw ConnectionUnavailable(); - } - } else { - for(auto it = m_borrowed.begin(); it != m_borrowed.end(); ++it){ - if((*it).unique()) { - // This connection has been abandoned! Destroy it and create a new connection - try { - // If we are able to create a new connection, return it - _DEBUG("Creating new connection to replace discarded connection"); - std::shared_ptr conn = m_factory->create(); - m_borrowed.erase(it); - m_borrowed.insert(conn); - return std::static_pointer_cast(conn); - } catch(std::exception& e) { - // Error creating a replacement connection - throw ConnectionUnavailable(); - } - } - } - // Nothing available - throw ConnectionUnavailable(); - } - } + std::unique_lock l(m_poolMutex); - // Take one off the front - std::shared_ptr conn = m_pool.front(); - m_pool.pop_front(); - // Add it to the borrowed list - m_borrowed.insert(conn); - return std::static_pointer_cast(conn); - }; + while ((m_pool.size() + m_borrowed.size()) < m_minPoolSize) { + std::shared_ptr conn = m_factory->create(); + m_pool.push_back(conn); + Metrics::pool_avail++; + } - /** - * Unborrow a connection - * - * Only call this if you are returning a working connection. If the connection was bad, just let it go out of scope (so the connection manager can replace it). - * @param the connection - */ - void unborrow(std::shared_ptr conn) { - // Lock - std::unique_lock lock(m_poolMutex); - m_borrowed.erase(conn); - if ((m_pool.size() + m_borrowed.size()) < m_maxPoolSize) { - m_pool.push_back(conn); - } - }; -protected: - size_t m_maxPoolSize; - size_t m_minPoolSize; - std::shared_ptr m_factory; - std::deque > m_pool; - std::set > m_borrowed; - std::mutex m_poolMutex; + if (m_pool.size() == 0) { + if ((m_pool.size() + m_borrowed.size()) < m_maxPoolSize) { + try { + std::shared_ptr conn = m_factory->create(); + m_borrowed.insert(conn); + Metrics::pool_in_use++; + return std::static_pointer_cast(conn); + } + catch (std::exception& e) { + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + Metrics::pool_errors++; + throw ConnectionUnavailable(); + } + } + else { + for (auto it = m_borrowed.begin(); it != m_borrowed.end(); ++it) { + if ((*it).unique()) { + // This connection has been abandoned! Destroy it and create a new connection + try { + // If we are able to create a new connection, return it + _DEBUG("Creating new connection to replace discarded connection"); + std::shared_ptr conn = m_factory->create(); + m_borrowed.erase(it); + m_borrowed.insert(conn); + return std::static_pointer_cast(conn); + } + catch (std::exception& e) { + span->SetStatus(opentelemetry::trace::StatusCode::kError, e.what()); + // Error creating a replacement connection + Metrics::pool_errors++; + throw ConnectionUnavailable(); + } + } + } + + span->SetStatus(opentelemetry::trace::StatusCode::kError, "No available connections in pool"); + // Nothing available + Metrics::pool_errors++; + throw ConnectionUnavailable(); + } + } + + // Take one off the front + std::shared_ptr conn = m_pool.front(); + m_pool.pop_front(); + Metrics::pool_avail--; + // Add it to the borrowed list + m_borrowed.insert(conn); + Metrics::pool_in_use++; + return std::static_pointer_cast(conn); + }; + + /** + * Unborrow a connection + * + * Only call this if you are returning a working connection. If the connection was bad, just let it go out of scope (so the connection manager can replace it). + * @param the connection + */ + void unborrow(std::shared_ptr conn) + { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("connection_pool"); + auto span = tracer->StartSpan("connection_pool::unborrow"); + auto scope = tracer->WithActiveSpan(span); + + // Lock + std::unique_lock lock(m_poolMutex); + m_borrowed.erase(conn); + Metrics::pool_in_use--; + if ((m_pool.size() + m_borrowed.size()) < m_maxPoolSize) { + Metrics::pool_avail++; + m_pool.push_back(conn); + } + }; + + protected: + size_t m_maxPoolSize; + size_t m_minPoolSize; + std::shared_ptr m_factory; + std::deque > m_pool; + std::set > m_borrowed; + std::mutex m_poolMutex; }; -} +} // namespace ZeroTier -#endif \ No newline at end of file +#endif diff --git a/controller/CtlUtil.cpp b/controller/CtlUtil.cpp new file mode 100644 index 000000000..e1e82f7a3 --- /dev/null +++ b/controller/CtlUtil.cpp @@ -0,0 +1,64 @@ +#include "CtlUtil.hpp" + +#ifdef ZT_CONTROLLER_USE_LIBPQ + +#include +#include + +namespace ZeroTier { + +const char* _timestr() +{ + time_t t = time(0); + char* ts = ctime(&t); + char* p = ts; + if (! p) + return ""; + while (*p) { + if (*p == '\n') { + *p = (char)0; + break; + } + ++p; + } + return ts; +} + +std::vector split(std::string str, char delim) +{ + std::istringstream iss(str); + std::vector tokens; + std::string item; + while (std::getline(iss, item, delim)) { + tokens.push_back(item); + } + return tokens; +} + +std::string url_encode(const std::string& value) +{ + std::ostringstream escaped; + escaped.fill('0'); + escaped << std::hex; + + for (std::string::const_iterator i = value.begin(), n = value.end(); i != n; ++i) { + std::string::value_type c = (*i); + + // Keep alphanumeric and other accepted characters intact + if (isalnum(c) || c == '-' || c == '_' || c == '.' || c == '~') { + escaped << c; + continue; + } + + // Any other characters are percent-encoded + escaped << std::uppercase; + escaped << '%' << std::setw(2) << int((unsigned char)c); + escaped << std::nouppercase; + } + + return escaped.str(); +} + +} // namespace ZeroTier + +#endif \ No newline at end of file diff --git a/controller/CtlUtil.hpp b/controller/CtlUtil.hpp new file mode 100644 index 000000000..0ba665af3 --- /dev/null +++ b/controller/CtlUtil.hpp @@ -0,0 +1,16 @@ +#ifndef ZT_CTLUTIL_HPP +#define ZT_CTLUTIL_HPP + +#include +#include + +namespace ZeroTier { + +const char* _timestr(); + +std::vector split(std::string str, char delim); + +std::string url_encode(const std::string& value); +} // namespace ZeroTier + +#endif // namespace ZeroTier \ No newline at end of file diff --git a/controller/DB.cpp b/controller/DB.cpp index 2edcadbbe..4cce312ed 100644 --- a/controller/DB.cpp +++ b/controller/DB.cpp @@ -4,7 +4,7 @@ * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. @@ -12,77 +12,131 @@ /****/ #include "DB.hpp" -#include "EmbeddedNetworkController.hpp" -#include +#include "../node/Metrics.hpp" +#include "EmbeddedNetworkController.hpp" +#include "opentelemetry/trace/provider.h" + #include +#include #include using json = nlohmann::json; namespace ZeroTier { -void DB::initNetwork(nlohmann::json &network) +void DB::initNetwork(nlohmann::json& network) { - if (!network.count("private")) network["private"] = true; - if (!network.count("creationTime")) network["creationTime"] = OSUtils::now(); - if (!network.count("name")) network["name"] = ""; - if (!network.count("multicastLimit")) network["multicastLimit"] = (uint64_t)32; - if (!network.count("enableBroadcast")) network["enableBroadcast"] = true; - if (!network.count("v4AssignMode")) network["v4AssignMode"] = {{"zt",false}}; - if (!network.count("v6AssignMode")) network["v6AssignMode"] = {{"rfc4193",false},{"zt",false},{"6plane",false}}; - if (!network.count("authTokens")) network["authTokens"] = {{}}; - if (!network.count("capabilities")) network["capabilities"] = nlohmann::json::array(); - if (!network.count("tags")) network["tags"] = nlohmann::json::array(); - if (!network.count("routes")) network["routes"] = nlohmann::json::array(); - if (!network.count("ipAssignmentPools")) network["ipAssignmentPools"] = nlohmann::json::array(); - if (!network.count("mtu")) network["mtu"] = ZT_DEFAULT_MTU; - if (!network.count("remoteTraceTarget")) network["remoteTraceTarget"] = nlohmann::json(); - if (!network.count("removeTraceLevel")) network["remoteTraceLevel"] = 0; - if (!network.count("rulesSource")) network["rulesSource"] = ""; - if (!network.count("rules")) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::initNetwork"); + auto scope = tracer->WithActiveSpan(span); + + if (! network.count("private")) + network["private"] = true; + if (! network.count("creationTime")) + network["creationTime"] = OSUtils::now(); + if (! network.count("name")) + network["name"] = ""; + if (! network.count("multicastLimit")) + network["multicastLimit"] = (uint64_t)32; + if (! network.count("enableBroadcast")) + network["enableBroadcast"] = true; + if (! network.count("v4AssignMode")) + network["v4AssignMode"] = { { "zt", false } }; + if (! network.count("v6AssignMode")) + network["v6AssignMode"] = { { "rfc4193", false }, { "zt", false }, { "6plane", false } }; + if (! network.count("authTokens")) + network["authTokens"] = { {} }; + if (! network.count("capabilities")) + network["capabilities"] = nlohmann::json::array(); + if (! network.count("tags")) + network["tags"] = nlohmann::json::array(); + if (! network.count("routes")) + network["routes"] = nlohmann::json::array(); + if (! network.count("ipAssignmentPools")) + network["ipAssignmentPools"] = nlohmann::json::array(); + if (! network.count("mtu")) + network["mtu"] = ZT_DEFAULT_MTU; + if (! network.count("remoteTraceTarget")) + network["remoteTraceTarget"] = nlohmann::json(); + if (! network.count("removeTraceLevel")) + network["remoteTraceLevel"] = 0; + if (! network.count("rulesSource")) + network["rulesSource"] = ""; + if (! network.count("rules")) { // If unspecified, rules are set to allow anything and behave like a flat L2 segment - network["rules"] = {{ - { "not",false }, - { "or", false }, - { "type","ACTION_ACCEPT" } - }}; + network["rules"] = { { { "not", false }, { "or", false }, { "type", "ACTION_ACCEPT" } } }; } - if (!network.count("dns")) network["dns"] = nlohmann::json::array(); - if (!network.count("ssoEnabled")) network["ssoEnabled"] = false; - if (!network.count("clientId")) network["clientId"] = ""; - if (!network.count("authorizationEndpoint")) network["authorizationEndpoint"] = ""; + if (! network.count("dns")) + network["dns"] = nlohmann::json::array(); + if (! network.count("ssoEnabled")) + network["ssoEnabled"] = false; + if (! network.count("clientId")) + network["clientId"] = ""; + if (! network.count("authorizationEndpoint")) + network["authorizationEndpoint"] = ""; network["objtype"] = "network"; } -void DB::initMember(nlohmann::json &member) +void DB::initMember(nlohmann::json& member) { - if (!member.count("authorized")) member["authorized"] = false; - if (!member.count("ssoExempt")) member["ssoExempt"] = false; - if (!member.count("ipAssignments")) member["ipAssignments"] = nlohmann::json::array(); - if (!member.count("activeBridge")) member["activeBridge"] = false; - if (!member.count("tags")) member["tags"] = nlohmann::json::array(); - if (!member.count("capabilities")) member["capabilities"] = nlohmann::json::array(); - if (!member.count("creationTime")) member["creationTime"] = OSUtils::now(); - if (!member.count("noAutoAssignIps")) member["noAutoAssignIps"] = false; - if (!member.count("revision")) member["revision"] = 0ULL; - if (!member.count("lastDeauthorizedTime")) member["lastDeauthorizedTime"] = 0ULL; - if (!member.count("lastAuthorizedTime")) member["lastAuthorizedTime"] = 0ULL; - if (!member.count("lastAuthorizedCredentialType")) member["lastAuthorizedCredentialType"] = nlohmann::json(); - if (!member.count("lastAuthorizedCredential")) member["lastAuthorizedCredential"] = nlohmann::json(); - if (!member.count("authenticationExpiryTime")) member["authenticationExpiryTime"] = 0LL; - if (!member.count("vMajor")) member["vMajor"] = -1; - if (!member.count("vMinor")) member["vMinor"] = -1; - if (!member.count("vRev")) member["vRev"] = -1; - if (!member.count("vProto")) member["vProto"] = -1; - if (!member.count("remoteTraceTarget")) member["remoteTraceTarget"] = nlohmann::json(); - if (!member.count("removeTraceLevel")) member["remoteTraceLevel"] = 0; + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::initMember"); + auto scope = tracer->WithActiveSpan(span); + + if (! member.count("authorized")) + member["authorized"] = false; + if (! member.count("ssoExempt")) + member["ssoExempt"] = false; + if (! member.count("ipAssignments")) + member["ipAssignments"] = nlohmann::json::array(); + if (! member.count("activeBridge")) + member["activeBridge"] = false; + if (! member.count("tags")) + member["tags"] = nlohmann::json::array(); + if (! member.count("capabilities")) + member["capabilities"] = nlohmann::json::array(); + if (! member.count("creationTime")) + member["creationTime"] = OSUtils::now(); + if (! member.count("noAutoAssignIps")) + member["noAutoAssignIps"] = false; + if (! member.count("revision")) + member["revision"] = 0ULL; + if (! member.count("lastDeauthorizedTime")) + member["lastDeauthorizedTime"] = 0ULL; + if (! member.count("lastAuthorizedTime")) + member["lastAuthorizedTime"] = 0ULL; + if (! member.count("lastAuthorizedCredentialType")) + member["lastAuthorizedCredentialType"] = nlohmann::json(); + if (! member.count("lastAuthorizedCredential")) + member["lastAuthorizedCredential"] = nlohmann::json(); + if (! member.count("authenticationExpiryTime")) + member["authenticationExpiryTime"] = 0LL; + if (! member.count("vMajor")) + member["vMajor"] = -1; + if (! member.count("vMinor")) + member["vMinor"] = -1; + if (! member.count("vRev")) + member["vRev"] = -1; + if (! member.count("vProto")) + member["vProto"] = -1; + if (! member.count("remoteTraceTarget")) + member["remoteTraceTarget"] = nlohmann::json(); + if (! member.count("removeTraceLevel")) + member["remoteTraceLevel"] = 0; member["objtype"] = "member"; } -void DB::cleanNetwork(nlohmann::json &network) +void DB::cleanNetwork(nlohmann::json& network) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::cleanNetwork"); + auto scope = tracer->WithActiveSpan(span); + network.erase("clock"); network.erase("authorizedMemberCount"); network.erase("activeMemberCount"); @@ -90,51 +144,78 @@ void DB::cleanNetwork(nlohmann::json &network) network.erase("lastModified"); } -void DB::cleanMember(nlohmann::json &member) +void DB::cleanMember(nlohmann::json& member) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::cleanMember"); + auto scope = tracer->WithActiveSpan(span); + member.erase("clock"); member.erase("physicalAddr"); member.erase("recentLog"); member.erase("lastModified"); member.erase("lastRequestMetaData"); - member.erase("authenticationURL"); // computed - member.erase("authenticationClientID"); // computed + member.erase("authenticationURL"); // computed + member.erase("authenticationClientID"); // computed } -DB::DB() {} -DB::~DB() {} - -bool DB::get(const uint64_t networkId,nlohmann::json &network) +DB::DB() { +} +DB::~DB() +{ +} + +bool DB::get(const uint64_t networkId, nlohmann::json& network) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::getNetwork"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + waitForReady(); + Metrics::db_get_network++; std::shared_ptr<_Network> nw; { - std::lock_guard l(_networks_l); + std::shared_lock l(_networks_l); auto nwi = _networks.find(networkId); if (nwi == _networks.end()) return false; nw = nwi->second; } { - std::lock_guard l2(nw->lock); + std::shared_lock l2(nw->lock); network = nw->config; } return true; } -bool DB::get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member) +bool DB::get(const uint64_t networkId, nlohmann::json& network, const uint64_t memberId, nlohmann::json& member) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::getNetworkAndMember"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex(networkId, memberIdStr)); + waitForReady(); + Metrics::db_get_network_and_member++; std::shared_ptr<_Network> nw; { - std::lock_guard l(_networks_l); + std::shared_lock l(_networks_l); auto nwi = _networks.find(networkId); if (nwi == _networks.end()) return false; nw = nwi->second; } { - std::lock_guard l2(nw->lock); + std::shared_lock l2(nw->lock); network = nw->config; auto m = nw->members.find(memberId); if (m == nw->members.end()) @@ -144,60 +225,91 @@ bool DB::get(const uint64_t networkId,nlohmann::json &network,const uint64_t mem return true; } -bool DB::get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member,NetworkSummaryInfo &info) +bool DB::get(const uint64_t networkId, nlohmann::json& network, const uint64_t memberId, nlohmann::json& member, NetworkSummaryInfo& info) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::getNetworkAndMemberAndSummary"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex(memberId, memberIdStr)); + waitForReady(); + Metrics::db_get_network_and_member_and_summary++; std::shared_ptr<_Network> nw; { - std::lock_guard l(_networks_l); + std::shared_lock l(_networks_l); auto nwi = _networks.find(networkId); if (nwi == _networks.end()) return false; nw = nwi->second; } { - std::lock_guard l2(nw->lock); + std::shared_lock l2(nw->lock); network = nw->config; - _fillSummaryInfo(nw,info); + _fillSummaryInfo(nw, info); auto m = nw->members.find(memberId); if (m == nw->members.end()) return false; member = m->second; } + return true; } -bool DB::get(const uint64_t networkId,nlohmann::json &network,std::vector &members) +bool DB::get(const uint64_t networkId, nlohmann::json& network, std::vector& members) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::getNetworkAndMembers"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + waitForReady(); + Metrics::db_get_member_list++; std::shared_ptr<_Network> nw; { - std::lock_guard l(_networks_l); + std::shared_lock l(_networks_l); auto nwi = _networks.find(networkId); if (nwi == _networks.end()) return false; nw = nwi->second; } { - std::lock_guard l2(nw->lock); + std::shared_lock l2(nw->lock); network = nw->config; - for(auto m=nw->members.begin();m!=nw->members.end();++m) { + for (auto m = nw->members.begin(); m != nw->members.end(); ++m) { members.push_back(m->second); } } return true; } -void DB::networks(std::set &networks) +void DB::networks(std::set& networks) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::networks"); + auto scope = tracer->WithActiveSpan(span); + waitForReady(); - std::lock_guard l(_networks_l); - for(auto n=_networks.begin();n!=_networks.end();++n) + Metrics::db_get_network_list++; + std::shared_lock l(_networks_l); + for (auto n = _networks.begin(); n != _networks.end(); ++n) networks.insert(n->first); } -void DB::_memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool notifyListeners) +void DB::_memberChanged(nlohmann::json& old, nlohmann::json& memberConfig, bool notifyListeners) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::_memberChanged"); + auto scope = tracer->WithActiveSpan(span); + + Metrics::db_member_change++; uint64_t memberId = 0; uint64_t networkId = 0; bool isAuth = false; @@ -205,26 +317,29 @@ void DB::_memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool no std::shared_ptr<_Network> nw; if (old.is_object()) { - memberId = OSUtils::jsonIntHex(old["id"],0ULL); - networkId = OSUtils::jsonIntHex(old["nwid"],0ULL); - if ((memberId)&&(networkId)) { + memberId = OSUtils::jsonIntHex(old["id"], 0ULL); + networkId = OSUtils::jsonIntHex(old["nwid"], 0ULL); + if ((memberId) && (networkId)) { { - std::lock_guard l(_networks_l); + std::unique_lock l(_networks_l); auto nw2 = _networks.find(networkId); - if (nw2 != _networks.end()) + if (nw2 != _networks.end()) { nw = nw2->second; + } } if (nw) { - std::lock_guard l(nw->lock); - if (OSUtils::jsonBool(old["activeBridge"],false)) + std::unique_lock l(nw->lock); + if (OSUtils::jsonBool(old["activeBridge"], false)) { nw->activeBridgeMembers.erase(memberId); - wasAuth = OSUtils::jsonBool(old["authorized"],false); - if (wasAuth) + } + wasAuth = OSUtils::jsonBool(old["authorized"], false); + if (wasAuth) { nw->authorizedMembers.erase(memberId); - json &ips = old["ipAssignments"]; + } + json& ips = old["ipAssignments"]; if (ips.is_array()) { - for(unsigned long i=0;i l(_networks_l); - std::shared_ptr<_Network> &nw2 = _networks[networkId]; - if (!nw2) + std::unique_lock l(_networks_l); + std::shared_ptr<_Network>& nw2 = _networks[networkId]; + if (! nw2) nw2.reset(new _Network); nw = nw2; } { - std::lock_guard l(nw->lock); + std::unique_lock l(nw->lock); nw->members[memberId] = memberConfig; - if (OSUtils::jsonBool(memberConfig["activeBridge"],false)) + if (OSUtils::jsonBool(memberConfig["activeBridge"], false)) { nw->activeBridgeMembers.insert(memberId); - isAuth = OSUtils::jsonBool(memberConfig["authorized"],false); - if (isAuth) + } + isAuth = OSUtils::jsonBool(memberConfig["authorized"], false); + if (isAuth) { + Metrics::member_auths++; nw->authorizedMembers.insert(memberId); - json &ips = memberConfig["ipAssignments"]; + } + json& ips = memberConfig["ipAssignments"]; if (ips.is_array()) { - for(unsigned long i=0;i nw->mostRecentDeauthTime) nw->mostRecentDeauthTime = ldt; } } if (notifyListeners) { - std::lock_guard ll(_changeListeners_l); - for(auto i=_changeListeners.begin();i!=_changeListeners.end();++i) { - (*i)->onNetworkMemberUpdate(this,networkId,memberId,memberConfig); + std::unique_lock ll(_changeListeners_l); + for (auto i = _changeListeners.begin(); i != _changeListeners.end(); ++i) { + (*i)->onNetworkMemberUpdate(this, networkId, memberId, memberConfig); } } - } else if (memberId) { + } + else if (memberId) { if (nw) { - std::lock_guard l(nw->lock); + std::unique_lock l(nw->lock); nw->members.erase(memberId); } if (networkId) { - std::lock_guard l(_networks_l); + std::unique_lock l(_networks_l); auto er = _networkByMember.equal_range(memberId); - for(auto i=er.first;i!=er.second;++i) { + for (auto i = er.first; i != er.second; ++i) { if (i->second == networkId) { _networkByMember.erase(i); break; @@ -303,60 +422,127 @@ void DB::_memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool no } } - if ((notifyListeners)&&((wasAuth)&&(!isAuth)&&(networkId)&&(memberId))) { - std::lock_guard ll(_changeListeners_l); - for(auto i=_changeListeners.begin();i!=_changeListeners.end();++i) { - (*i)->onNetworkMemberDeauthorize(this,networkId,memberId); + if (notifyListeners) { + if (networkId != 0 && memberId != 0 && old.is_object() && ! memberConfig.is_object()) { + // member delete + Metrics::member_count--; + } + else if (networkId != 0 && memberId != 0 && ! old.is_object() && memberConfig.is_object()) { + // new member + Metrics::member_count++; + } + + if (! wasAuth && isAuth) { + Metrics::member_auths++; + } + else if (wasAuth && ! isAuth) { + Metrics::member_deauths++; + } + else { + Metrics::member_changes++; + } + } + + if ((notifyListeners) && ((wasAuth) && (! isAuth) && (networkId) && (memberId))) { + std::unique_lock ll(_changeListeners_l); + for (auto i = _changeListeners.begin(); i != _changeListeners.end(); ++i) { + (*i)->onNetworkMemberDeauthorize(this, networkId, memberId); } } } -void DB::_networkChanged(nlohmann::json &old,nlohmann::json &networkConfig,bool notifyListeners) +void DB::_networkChanged(nlohmann::json& old, nlohmann::json& networkConfig, bool notifyListeners) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::_networkChanged"); + span->SetAttribute("old_network_config", old.dump()); + span->SetAttribute("network_config", networkConfig.dump()); + span->SetAttribute("notify_listeners", notifyListeners); + auto scope = tracer->WithActiveSpan(span); + + Metrics::db_network_change++; + if (notifyListeners) { + if (old.is_object() && old.contains("id") && networkConfig.is_object() && networkConfig.contains("id")) { + Metrics::network_changes++; + } + else if (! old.is_object() && networkConfig.is_object() && networkConfig.contains("id")) { + Metrics::network_count++; + } + else if (old.is_object() && old.contains("id") && ! networkConfig.is_object()) { + Metrics::network_count--; + } + } + if (networkConfig.is_object()) { const std::string ids = networkConfig["id"]; const uint64_t networkId = Utils::hexStrToU64(ids.c_str()); if (networkId) { std::shared_ptr<_Network> nw; { - std::lock_guard l(_networks_l); - std::shared_ptr<_Network> &nw2 = _networks[networkId]; - if (!nw2) + std::unique_lock l(_networks_l); + std::shared_ptr<_Network>& nw2 = _networks[networkId]; + if (! nw2) nw2.reset(new _Network); nw = nw2; } { - std::lock_guard l2(nw->lock); + std::unique_lock l2(nw->lock); nw->config = networkConfig; } if (notifyListeners) { - std::lock_guard ll(_changeListeners_l); - for(auto i=_changeListeners.begin();i!=_changeListeners.end();++i) { - (*i)->onNetworkUpdate(this,networkId,networkConfig); + std::unique_lock ll(_changeListeners_l); + for (auto i = _changeListeners.begin(); i != _changeListeners.end(); ++i) { + (*i)->onNetworkUpdate(this, networkId, networkConfig); } } } - } else if (old.is_object()) { + } + else if (old.is_object()) { const std::string ids = old["id"]; const uint64_t networkId = Utils::hexStrToU64(ids.c_str()); if (networkId) { - std::lock_guard l(_networks_l); + try { + // deauth all members on the network + nlohmann::json network; + std::vector members; + this->get(networkId, network, members); + for (auto i = members.begin(); i != members.end(); ++i) { + const std::string nodeID = (*i)["id"]; + const uint64_t memberId = Utils::hexStrToU64(nodeID.c_str()); + std::unique_lock ll(_changeListeners_l); + for (auto j = _changeListeners.begin(); j != _changeListeners.end(); ++j) { + (*j)->onNetworkMemberDeauthorize(this, networkId, memberId); + } + } + } + catch (std::exception& e) { + std::cerr << "Error deauthorizing members on network delete: " << e.what() << std::endl; + } + + // delete the network + std::unique_lock l(_networks_l); _networks.erase(networkId); } } } -void DB::_fillSummaryInfo(const std::shared_ptr<_Network> &nw,NetworkSummaryInfo &info) +void DB::_fillSummaryInfo(const std::shared_ptr<_Network>& nw, NetworkSummaryInfo& info) { - for(auto ab=nw->activeBridgeMembers.begin();ab!=nw->activeBridgeMembers.end();++ab) + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db"); + auto span = tracer->StartSpan("db::_fillSummaryInfo"); + auto scope = tracer->WithActiveSpan(span); + + for (auto ab = nw->activeBridgeMembers.begin(); ab != nw->activeBridgeMembers.end(); ++ab) info.activeBridges.push_back(Address(*ab)); - std::sort(info.activeBridges.begin(),info.activeBridges.end()); - for(auto ip=nw->allocatedIps.begin();ip!=nw->allocatedIps.end();++ip) + std::sort(info.activeBridges.begin(), info.activeBridges.end()); + for (auto ip = nw->allocatedIps.begin(); ip != nw->allocatedIps.end(); ++ip) info.allocatedIps.push_back(*ip); - std::sort(info.allocatedIps.begin(),info.allocatedIps.end()); + std::sort(info.allocatedIps.begin(), info.allocatedIps.end()); info.authorizedMemberCount = (unsigned long)nw->authorizedMembers.size(); info.totalMemberCount = (unsigned long)nw->members.size(); info.mostRecentDeauthTime = nw->mostRecentDeauthTime; } -} // namespace ZeroTier +} // namespace ZeroTier diff --git a/controller/DB.hpp b/controller/DB.hpp index b792304e5..01b6f1266 100644 --- a/controller/DB.hpp +++ b/controller/DB.hpp @@ -4,7 +4,7 @@ * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. @@ -14,46 +14,36 @@ #ifndef ZT_CONTROLLER_DB_HPP #define ZT_CONTROLLER_DB_HPP -//#define ZT_CONTROLLER_USE_LIBPQ +// #define ZT_CONTROLLER_USE_LIBPQ #include "../node/Constants.hpp" #include "../node/Identity.hpp" #include "../node/InetAddress.hpp" -#include "../osdep/OSUtils.hpp" #include "../osdep/BlockingQueue.hpp" +#include "../osdep/OSUtils.hpp" +#include +#include #include +#include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include - -#include "../ext/json/json.hpp" #define ZT_MEMBER_AUTH_TIMEOUT_NOTIFY_BEFORE 25000 -namespace ZeroTier -{ +namespace ZeroTier { -struct AuthInfo -{ -public: - AuthInfo() - : enabled(false) - , version(0) - , authenticationURL() - , authenticationExpiryTime(0) - , issuerURL() - , centralAuthURL() - , ssoNonce() - , ssoState() - , ssoClientID() - {} +struct AuthInfo { + public: + AuthInfo() : enabled(false), version(0), authenticationURL(), authenticationExpiryTime(0), issuerURL(), centralAuthURL(), ssoNonce(), ssoState(), ssoClientID(), ssoProvider("default") + { + } bool enabled; uint64_t version; @@ -64,27 +54,37 @@ public: std::string ssoNonce; std::string ssoState; std::string ssoClientID; + std::string ssoProvider; }; /** * Base class with common infrastructure for all controller DB implementations */ -class DB -{ -public: - class ChangeListener - { - public: - ChangeListener() {} - virtual ~ChangeListener() {} - virtual void onNetworkUpdate(const void *db,uint64_t networkId,const nlohmann::json &network) {} - virtual void onNetworkMemberUpdate(const void *db,uint64_t networkId,uint64_t memberId,const nlohmann::json &member) {} - virtual void onNetworkMemberDeauthorize(const void *db,uint64_t networkId,uint64_t memberId) {} +class DB { + public: + class ChangeListener { + public: + ChangeListener() + { + } + virtual ~ChangeListener() + { + } + virtual void onNetworkUpdate(const void* db, uint64_t networkId, const nlohmann::json& network) + { + } + virtual void onNetworkMemberUpdate(const void* db, uint64_t networkId, uint64_t memberId, const nlohmann::json& member) + { + } + virtual void onNetworkMemberDeauthorize(const void* db, uint64_t networkId, uint64_t memberId) + { + } }; - struct NetworkSummaryInfo - { - NetworkSummaryInfo() : authorizedMemberCount(0),totalMemberCount(0),mostRecentDeauthTime(0) {} + struct NetworkSummaryInfo { + NetworkSummaryInfo() : authorizedMemberCount(0), totalMemberCount(0), mostRecentDeauthTime(0) + { + } std::vector
activeBridges; std::vector allocatedIps; unsigned long authorizedMemberCount; @@ -92,10 +92,10 @@ public: int64_t mostRecentDeauthTime; }; - static void initNetwork(nlohmann::json &network); - static void initMember(nlohmann::json &member); - static void cleanNetwork(nlohmann::json &network); - static void cleanMember(nlohmann::json &member); + static void initNetwork(nlohmann::json& network); + static void initMember(nlohmann::json& member); + static void cleanNetwork(nlohmann::json& network); + static void cleanMember(nlohmann::json& member); DB(); virtual ~DB(); @@ -105,45 +105,48 @@ public: inline bool hasNetwork(const uint64_t networkId) const { - std::lock_guard l(_networks_l); + std::shared_lock l(_networks_l); return (_networks.find(networkId) != _networks.end()); } - bool get(const uint64_t networkId,nlohmann::json &network); - bool get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member); - bool get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member,NetworkSummaryInfo &info); - bool get(const uint64_t networkId,nlohmann::json &network,std::vector &members); + bool get(const uint64_t networkId, nlohmann::json& network); + bool get(const uint64_t networkId, nlohmann::json& network, const uint64_t memberId, nlohmann::json& member); + bool get(const uint64_t networkId, nlohmann::json& network, const uint64_t memberId, nlohmann::json& member, NetworkSummaryInfo& info); + bool get(const uint64_t networkId, nlohmann::json& network, std::vector& members); - void networks(std::set &networks); + void networks(std::set& networks); - template - inline void each(F f) + template inline void each(F f) { nlohmann::json nullJson; - std::lock_guard lck(_networks_l); - for(auto nw=_networks.begin();nw!=_networks.end();++nw) { - f(nw->first,nw->second->config,0,nullJson); // first provide network with 0 for member ID - for(auto m=nw->second->members.begin();m!=nw->second->members.end();++m) { - f(nw->first,nw->second->config,m->first,m->second); + std::unique_lock lck(_networks_l); + for (auto nw = _networks.begin(); nw != _networks.end(); ++nw) { + f(nw->first, nw->second->config, 0, nullJson); // first provide network with 0 for member ID + for (auto m = nw->second->members.begin(); m != nw->second->members.end(); ++m) { + f(nw->first, nw->second->config, m->first, m->second); } } } - virtual bool save(nlohmann::json &record,bool notifyListeners) = 0; + virtual bool save(nlohmann::json& record, bool notifyListeners) = 0; virtual void eraseNetwork(const uint64_t networkId) = 0; - virtual void eraseMember(const uint64_t networkId,const uint64_t memberId) = 0; - virtual void nodeIsOnline(const uint64_t networkId,const uint64_t memberId,const InetAddress &physicalAddress) = 0; + virtual void eraseMember(const uint64_t networkId, const uint64_t memberId) = 0; + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress) = 0; + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress, const char* osArch) = 0; - virtual AuthInfo getSSOAuthInfo(const nlohmann::json &member, const std::string &redirectURL) { return AuthInfo(); } - - inline void addListener(DB::ChangeListener *const listener) + virtual AuthInfo getSSOAuthInfo(const nlohmann::json& member, const std::string& redirectURL) { - std::lock_guard l(_changeListeners_l); + return AuthInfo(); + } + + inline void addListener(DB::ChangeListener* const listener) + { + std::unique_lock l(_changeListeners_l); _changeListeners.push_back(listener); } -protected: - static inline bool _compareRecords(const nlohmann::json &a,const nlohmann::json &b) + protected: + static inline bool _compareRecords(const nlohmann::json& a, const nlohmann::json& b) { if (a.is_object() == b.is_object()) { if (a.is_object()) { @@ -151,10 +154,10 @@ protected: return false; auto amap = a.get(); auto bmap = b.get(); - for(auto ai=amap.begin();ai!=amap.end();++ai) { - if (ai->first != "revision") { // ignore revision, compare only non-revision-counter fields + for (auto ai = amap.begin(); ai != amap.end(); ++ai) { + if (ai->first != "revision") { // ignore revision, compare only non-revision-counter fields auto bi = bmap.find(ai->first); - if ((bi == bmap.end())||(bi->second != ai->second)) + if ((bi == bmap.end()) || (bi->second != ai->second)) return false; } } @@ -165,29 +168,30 @@ protected: return false; } - struct _Network - { - _Network() : mostRecentDeauthTime(0) {} + struct _Network { + _Network() : mostRecentDeauthTime(0) + { + } nlohmann::json config; - std::unordered_map members; + std::unordered_map members; std::unordered_set activeBridgeMembers; std::unordered_set authorizedMembers; - std::unordered_set allocatedIps; + std::unordered_set allocatedIps; int64_t mostRecentDeauthTime; - std::mutex lock; + std::shared_mutex lock; }; - virtual void _memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool notifyListeners); - virtual void _networkChanged(nlohmann::json &old,nlohmann::json &networkConfig,bool notifyListeners); - void _fillSummaryInfo(const std::shared_ptr<_Network> &nw,NetworkSummaryInfo &info); + virtual void _memberChanged(nlohmann::json& old, nlohmann::json& memberConfig, bool notifyListeners); + virtual void _networkChanged(nlohmann::json& old, nlohmann::json& networkConfig, bool notifyListeners); + void _fillSummaryInfo(const std::shared_ptr<_Network>& nw, NetworkSummaryInfo& info); - std::vector _changeListeners; - std::unordered_map< uint64_t,std::shared_ptr<_Network> > _networks; - std::unordered_multimap< uint64_t,uint64_t > _networkByMember; - mutable std::mutex _changeListeners_l; - mutable std::mutex _networks_l; + std::vector _changeListeners; + std::unordered_map > _networks; + std::unordered_multimap _networkByMember; + mutable std::shared_mutex _changeListeners_l; + mutable std::shared_mutex _networks_l; }; -} // namespace ZeroTier +} // namespace ZeroTier #endif diff --git a/controller/DBMirrorSet.cpp b/controller/DBMirrorSet.cpp index fd7f32a22..f8c6f0366 100644 --- a/controller/DBMirrorSet.cpp +++ b/controller/DBMirrorSet.cpp @@ -4,7 +4,7 @@ * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. @@ -13,55 +13,63 @@ #include "DBMirrorSet.hpp" +#include "opentelemetry/trace/provider.h" + namespace ZeroTier { -DBMirrorSet::DBMirrorSet(DB::ChangeListener *listener) : - _listener(listener), - _running(true) +DBMirrorSet::DBMirrorSet(DB::ChangeListener* listener) : _listener(listener), _running(true), _syncCheckerThread(), _dbs(), _dbs_l() { _syncCheckerThread = std::thread([this]() { - for(;;) { - for(int i=0;i<120;++i) { // 1 minute delay between checks - if (!_running) + for (;;) { + for (int i = 0; i < 120; ++i) { // 1 minute delay between checks + if (! _running) return; std::this_thread::sleep_for(std::chrono::milliseconds(500)); } - std::vector< std::shared_ptr > dbs; + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db::syncChecker"); + auto scope = tracer->WithActiveSpan(span); + + std::vector > dbs; { - std::lock_guard l(_dbs_l); + std::unique_lock l(_dbs_l); if (_dbs.size() <= 1) - continue; // no need to do this if there's only one DB, so skip the iteration + continue; // no need to do this if there's only one DB, so skip the iteration dbs = _dbs; } - for(auto db=dbs.begin();db!=dbs.end();++db) { - (*db)->each([&dbs,&db](uint64_t networkId,const nlohmann::json &network,uint64_t memberId,const nlohmann::json &member) { + for (auto db = dbs.begin(); db != dbs.end(); ++db) { + (*db)->each([&dbs, &db](uint64_t networkId, const nlohmann::json& network, uint64_t memberId, const nlohmann::json& member) { try { if (network.is_object()) { if (memberId == 0) { - for(auto db2=dbs.begin();db2!=dbs.end();++db2) { + for (auto db2 = dbs.begin(); db2 != dbs.end(); ++db2) { if (db->get() != db2->get()) { nlohmann::json nw2; - if ((!(*db2)->get(networkId,nw2))||((nw2.is_object())&&(OSUtils::jsonInt(nw2["revision"],0) < OSUtils::jsonInt(network["revision"],0)))) { + if ((! (*db2)->get(networkId, nw2)) || ((nw2.is_object()) && (OSUtils::jsonInt(nw2["revision"], 0) < OSUtils::jsonInt(network["revision"], 0)))) { nw2 = network; - (*db2)->save(nw2,false); + (*db2)->save(nw2, false); } } } - } else if (member.is_object()) { - for(auto db2=dbs.begin();db2!=dbs.end();++db2) { + } + else if (member.is_object()) { + for (auto db2 = dbs.begin(); db2 != dbs.end(); ++db2) { if (db->get() != db2->get()) { - nlohmann::json nw2,m2; - if ((!(*db2)->get(networkId,nw2,memberId,m2))||((m2.is_object())&&(OSUtils::jsonInt(m2["revision"],0) < OSUtils::jsonInt(member["revision"],0)))) { + nlohmann::json nw2, m2; + if ((! (*db2)->get(networkId, nw2, memberId, m2)) || ((m2.is_object()) && (OSUtils::jsonInt(m2["revision"], 0) < OSUtils::jsonInt(member["revision"], 0)))) { m2 = member; - (*db2)->save(m2,false); + (*db2)->save(m2, false); } } } } } - } catch ( ... ) {} // skip entries that generate JSON errors + } + catch (...) { + } // skip entries that generate JSON errors }); } } @@ -76,59 +84,101 @@ DBMirrorSet::~DBMirrorSet() bool DBMirrorSet::hasNetwork(const uint64_t networkId) const { - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::hasNetwork"); + auto scope = tracer->WithActiveSpan(span); + + std::shared_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { if ((*d)->hasNetwork(networkId)) return true; } return false; } -bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network) +bool DBMirrorSet::get(const uint64_t networkId, nlohmann::json& network) { - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { - if ((*d)->get(networkId,network)) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::getNetwork"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + + std::shared_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { + if ((*d)->get(networkId, network)) { return true; } } return false; } -bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member) +bool DBMirrorSet::get(const uint64_t networkId, nlohmann::json& network, const uint64_t memberId, nlohmann::json& member) { - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { - if ((*d)->get(networkId,network,memberId,member)) + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::getNetworkAndMember"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex10(memberId, memberIdStr)); + + std::shared_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { + if ((*d)->get(networkId, network, memberId, member)) return true; } return false; } -bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member,DB::NetworkSummaryInfo &info) +bool DBMirrorSet::get(const uint64_t networkId, nlohmann::json& network, const uint64_t memberId, nlohmann::json& member, DB::NetworkSummaryInfo& info) { - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { - if ((*d)->get(networkId,network,memberId,member,info)) + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::getNetworkAndMemberWithSummary"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex10(memberId, memberIdStr)); + + std::shared_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { + if ((*d)->get(networkId, network, memberId, member, info)) return true; } return false; } -bool DBMirrorSet::get(const uint64_t networkId,nlohmann::json &network,std::vector &members) +bool DBMirrorSet::get(const uint64_t networkId, nlohmann::json& network, std::vector& members) { - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { - if ((*d)->get(networkId,network,members)) + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::getNetworkAndMembers"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + + std::shared_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { + if ((*d)->get(networkId, network, members)) return true; } return false; } -AuthInfo DBMirrorSet::getSSOAuthInfo(const nlohmann::json &member, const std::string &redirectURL) +AuthInfo DBMirrorSet::getSSOAuthInfo(const nlohmann::json& member, const std::string& redirectURL) { - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::getSSOAuthInfo"); + auto scope = tracer->WithActiveSpan(span); + + std::shared_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { AuthInfo info = (*d)->getSSOAuthInfo(member, redirectURL); if (info.enabled) { return info; @@ -137,19 +187,29 @@ AuthInfo DBMirrorSet::getSSOAuthInfo(const nlohmann::json &member, const std::st return AuthInfo(); } -void DBMirrorSet::networks(std::set &networks) +void DBMirrorSet::networks(std::set& networks) { - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::networks"); + auto scope = tracer->WithActiveSpan(span); + + std::shared_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { (*d)->networks(networks); } } bool DBMirrorSet::waitForReady() { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::waitForReady"); + auto scope = tracer->WithActiveSpan(span); + bool r = false; - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { + std::shared_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { r |= (*d)->waitForReady(); } return r; @@ -157,31 +217,42 @@ bool DBMirrorSet::waitForReady() bool DBMirrorSet::isReady() { - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { - if (!(*d)->isReady()) + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::isReady"); + auto scope = tracer->WithActiveSpan(span); + + std::shared_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { + if (! (*d)->isReady()) return false; } return true; } -bool DBMirrorSet::save(nlohmann::json &record,bool notifyListeners) +bool DBMirrorSet::save(nlohmann::json& record, bool notifyListeners) { - std::vector< std::shared_ptr > dbs; + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::save"); + auto scope = tracer->WithActiveSpan(span); + + std::vector > dbs; { - std::lock_guard l(_dbs_l); + std::unique_lock l(_dbs_l); dbs = _dbs; } if (notifyListeners) { - for(auto d=dbs.begin();d!=dbs.end();++d) { - if ((*d)->save(record,true)) + for (auto d = dbs.begin(); d != dbs.end(); ++d) { + if ((*d)->save(record, true)) return true; } return false; - } else { + } + else { bool modified = false; - for(auto d=dbs.begin();d!=dbs.end();++d) { - modified |= (*d)->save(record,false); + for (auto d = dbs.begin(); d != dbs.end(); ++d) { + modified |= (*d)->save(record, false); } return modified; } @@ -189,55 +260,113 @@ bool DBMirrorSet::save(nlohmann::json &record,bool notifyListeners) void DBMirrorSet::eraseNetwork(const uint64_t networkId) { - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::eraseNetwork"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + + std::unique_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { (*d)->eraseNetwork(networkId); } } -void DBMirrorSet::eraseMember(const uint64_t networkId,const uint64_t memberId) +void DBMirrorSet::eraseMember(const uint64_t networkId, const uint64_t memberId) { - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { - (*d)->eraseMember(networkId,memberId); + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::eraseMember"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex10(memberId, memberIdStr)); + + std::unique_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { + (*d)->eraseMember(networkId, memberId); } } -void DBMirrorSet::nodeIsOnline(const uint64_t networkId,const uint64_t memberId,const InetAddress &physicalAddress) +void DBMirrorSet::nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress, const char* osArch) { - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { - (*d)->nodeIsOnline(networkId,memberId,physicalAddress); + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::nodeIsOnline"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + char phyAddressStr[INET6_ADDRSTRLEN]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex10(memberId, memberIdStr)); + span->SetAttribute("physical_address", physicalAddress.toString(phyAddressStr)); + span->SetAttribute("os_arch", osArch); + + std::shared_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { + (*d)->nodeIsOnline(networkId, memberId, physicalAddress, osArch); } } -void DBMirrorSet::onNetworkUpdate(const void *db,uint64_t networkId,const nlohmann::json &network) +void DBMirrorSet::nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress) { + this->nodeIsOnline(networkId, memberId, physicalAddress, "unknown/unknown"); +} + +void DBMirrorSet::onNetworkUpdate(const void* db, uint64_t networkId, const nlohmann::json& network) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::onNetworkUpdate"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + nlohmann::json record(network); - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { + std::unique_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { if (d->get() != db) { - (*d)->save(record,false); + (*d)->save(record, false); } } - _listener->onNetworkUpdate(this,networkId,network); + _listener->onNetworkUpdate(this, networkId, network); } -void DBMirrorSet::onNetworkMemberUpdate(const void *db,uint64_t networkId,uint64_t memberId,const nlohmann::json &member) +void DBMirrorSet::onNetworkMemberUpdate(const void* db, uint64_t networkId, uint64_t memberId, const nlohmann::json& member) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::onNetworkMemberUpdate"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex10(memberId, memberIdStr)); + nlohmann::json record(member); - std::lock_guard l(_dbs_l); - for(auto d=_dbs.begin();d!=_dbs.end();++d) { + std::unique_lock l(_dbs_l); + for (auto d = _dbs.begin(); d != _dbs.end(); ++d) { if (d->get() != db) { - (*d)->save(record,false); + (*d)->save(record, false); } } - _listener->onNetworkMemberUpdate(this,networkId,memberId,member); + _listener->onNetworkMemberUpdate(this, networkId, memberId, member); } -void DBMirrorSet::onNetworkMemberDeauthorize(const void *db,uint64_t networkId,uint64_t memberId) +void DBMirrorSet::onNetworkMemberDeauthorize(const void* db, uint64_t networkId, uint64_t memberId) { - _listener->onNetworkMemberDeauthorize(this,networkId,memberId); + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_mirror_set"); + auto span = tracer->StartSpan("db_mirror_set::onNetworkMemberDeauthorize"); + auto scope = tracer->WithActiveSpan(span); + char networkIdStr[17]; + char memberIdStr[11]; + span->SetAttribute("network_id", Utils::hex(networkId, networkIdStr)); + span->SetAttribute("member_id", Utils::hex10(memberId, memberIdStr)); + + _listener->onNetworkMemberDeauthorize(this, networkId, memberId); } -} // namespace ZeroTier +} // namespace ZeroTier diff --git a/controller/DBMirrorSet.hpp b/controller/DBMirrorSet.hpp index 883c98fd7..50230c545 100644 --- a/controller/DBMirrorSet.hpp +++ b/controller/DBMirrorSet.hpp @@ -4,7 +4,7 @@ * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. @@ -16,58 +16,58 @@ #include "DB.hpp" -#include #include -#include #include +#include #include +#include namespace ZeroTier { -class DBMirrorSet : public DB::ChangeListener -{ -public: - DBMirrorSet(DB::ChangeListener *listener); +class DBMirrorSet : public DB::ChangeListener { + public: + DBMirrorSet(DB::ChangeListener* listener); virtual ~DBMirrorSet(); bool hasNetwork(const uint64_t networkId) const; - bool get(const uint64_t networkId,nlohmann::json &network); - bool get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member); - bool get(const uint64_t networkId,nlohmann::json &network,const uint64_t memberId,nlohmann::json &member,DB::NetworkSummaryInfo &info); - bool get(const uint64_t networkId,nlohmann::json &network,std::vector &members); + bool get(const uint64_t networkId, nlohmann::json& network); + bool get(const uint64_t networkId, nlohmann::json& network, const uint64_t memberId, nlohmann::json& member); + bool get(const uint64_t networkId, nlohmann::json& network, const uint64_t memberId, nlohmann::json& member, DB::NetworkSummaryInfo& info); + bool get(const uint64_t networkId, nlohmann::json& network, std::vector& members); - void networks(std::set &networks); + void networks(std::set& networks); bool waitForReady(); bool isReady(); - bool save(nlohmann::json &record,bool notifyListeners); + bool save(nlohmann::json& record, bool notifyListeners); void eraseNetwork(const uint64_t networkId); - void eraseMember(const uint64_t networkId,const uint64_t memberId); - void nodeIsOnline(const uint64_t networkId,const uint64_t memberId,const InetAddress &physicalAddress); + void eraseMember(const uint64_t networkId, const uint64_t memberId); + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress); + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress, const char* osArch); // These are called by various DB instances when changes occur. - virtual void onNetworkUpdate(const void *db,uint64_t networkId,const nlohmann::json &network); - virtual void onNetworkMemberUpdate(const void *db,uint64_t networkId,uint64_t memberId,const nlohmann::json &member); - virtual void onNetworkMemberDeauthorize(const void *db,uint64_t networkId,uint64_t memberId); + virtual void onNetworkUpdate(const void* db, uint64_t networkId, const nlohmann::json& network); + virtual void onNetworkMemberUpdate(const void* db, uint64_t networkId, uint64_t memberId, const nlohmann::json& member); + virtual void onNetworkMemberDeauthorize(const void* db, uint64_t networkId, uint64_t memberId); - AuthInfo getSSOAuthInfo(const nlohmann::json &member, const std::string &redirectURL); + AuthInfo getSSOAuthInfo(const nlohmann::json& member, const std::string& redirectURL); - inline void addDB(const std::shared_ptr &db) + inline void addDB(const std::shared_ptr& db) { db->addListener(this); - std::lock_guard l(_dbs_l); + std::unique_lock l(_dbs_l); _dbs.push_back(db); } -private: - DB::ChangeListener *const _listener; + private: + DB::ChangeListener* const _listener; std::atomic_bool _running; std::thread _syncCheckerThread; - std::vector< std::shared_ptr< DB > > _dbs; - mutable std::mutex _dbs_l; + std::vector > _dbs; + mutable std::shared_mutex _dbs_l; }; -} // namespace ZeroTier +} // namespace ZeroTier #endif diff --git a/controller/EmbeddedNetworkController.cpp b/controller/EmbeddedNetworkController.cpp index d625da829..b7ba04299 100644 --- a/controller/EmbeddedNetworkController.cpp +++ b/controller/EmbeddedNetworkController.cpp @@ -4,7 +4,7 @@ * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. @@ -16,37 +16,38 @@ #include #include #include +#include #ifndef _WIN32 #include #endif -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - #include "../include/ZeroTierOne.h" #include "../version.h" - #include "EmbeddedNetworkController.hpp" -#include "LFDB.hpp" #include "FileDB.hpp" +#include "LFDB.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #ifdef ZT_CONTROLLER_USE_LIBPQ -#include "PostgreSQL.hpp" +#include "CV1.hpp" +#include "CV2.hpp" #endif -#include "../node/Node.hpp" #include "../node/CertificateOfMembership.hpp" -#include "../node/NetworkConfig.hpp" #include "../node/Dictionary.hpp" #include "../node/MAC.hpp" +#include "../node/NetworkConfig.hpp" +#include "../node/Node.hpp" +#include "opentelemetry/trace/provider.h" using json = nlohmann::json; @@ -63,13 +64,18 @@ namespace ZeroTier { namespace { -static json _renderRule(ZT_VirtualNetworkRule &rule) +static json _renderRule(ZT_VirtualNetworkRule& rule) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::renderRule"); + auto scope = tracer->WithActiveSpan(span); + char tmp[128]; json r = json::object(); const ZT_VirtualNetworkRuleType rt = (ZT_VirtualNetworkRuleType)(rule.t & 0x3f); - switch(rt) { + switch (rt) { case ZT_NETWORK_RULE_ACTION_DROP: r["type"] = "ACTION_DROP"; break; @@ -101,7 +107,7 @@ static json _renderRule(ZT_VirtualNetworkRule &rule) } if (r.empty()) { - switch(rt) { + switch (rt) { case ZT_NETWORK_RULE_MATCH_SOURCE_ZEROTIER_ADDRESS: r["type"] = "MATCH_SOURCE_ZEROTIER_ADDRESS"; r["zt"] = Address(rule.v.zt).toString(tmp); @@ -124,29 +130,47 @@ static json _renderRule(ZT_VirtualNetworkRule &rule) break; case ZT_NETWORK_RULE_MATCH_MAC_SOURCE: r["type"] = "MATCH_MAC_SOURCE"; - OSUtils::ztsnprintf(tmp,sizeof(tmp),"%.2x:%.2x:%.2x:%.2x:%.2x:%.2x",(unsigned int)rule.v.mac[0],(unsigned int)rule.v.mac[1],(unsigned int)rule.v.mac[2],(unsigned int)rule.v.mac[3],(unsigned int)rule.v.mac[4],(unsigned int)rule.v.mac[5]); + OSUtils::ztsnprintf( + tmp, + sizeof(tmp), + "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", + (unsigned int)rule.v.mac[0], + (unsigned int)rule.v.mac[1], + (unsigned int)rule.v.mac[2], + (unsigned int)rule.v.mac[3], + (unsigned int)rule.v.mac[4], + (unsigned int)rule.v.mac[5]); r["mac"] = tmp; break; case ZT_NETWORK_RULE_MATCH_MAC_DEST: r["type"] = "MATCH_MAC_DEST"; - OSUtils::ztsnprintf(tmp,sizeof(tmp),"%.2x:%.2x:%.2x:%.2x:%.2x:%.2x",(unsigned int)rule.v.mac[0],(unsigned int)rule.v.mac[1],(unsigned int)rule.v.mac[2],(unsigned int)rule.v.mac[3],(unsigned int)rule.v.mac[4],(unsigned int)rule.v.mac[5]); + OSUtils::ztsnprintf( + tmp, + sizeof(tmp), + "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", + (unsigned int)rule.v.mac[0], + (unsigned int)rule.v.mac[1], + (unsigned int)rule.v.mac[2], + (unsigned int)rule.v.mac[3], + (unsigned int)rule.v.mac[4], + (unsigned int)rule.v.mac[5]); r["mac"] = tmp; break; case ZT_NETWORK_RULE_MATCH_IPV4_SOURCE: r["type"] = "MATCH_IPV4_SOURCE"; - r["ip"] = InetAddress(&(rule.v.ipv4.ip),4,(unsigned int)rule.v.ipv4.mask).toString(tmp); + r["ip"] = InetAddress(&(rule.v.ipv4.ip), 4, (unsigned int)rule.v.ipv4.mask).toString(tmp); break; case ZT_NETWORK_RULE_MATCH_IPV4_DEST: r["type"] = "MATCH_IPV4_DEST"; - r["ip"] = InetAddress(&(rule.v.ipv4.ip),4,(unsigned int)rule.v.ipv4.mask).toString(tmp); + r["ip"] = InetAddress(&(rule.v.ipv4.ip), 4, (unsigned int)rule.v.ipv4.mask).toString(tmp); break; case ZT_NETWORK_RULE_MATCH_IPV6_SOURCE: r["type"] = "MATCH_IPV6_SOURCE"; - r["ip"] = InetAddress(rule.v.ipv6.ip,16,(unsigned int)rule.v.ipv6.mask).toString(tmp); + r["ip"] = InetAddress(rule.v.ipv6.ip, 16, (unsigned int)rule.v.ipv6.mask).toString(tmp); break; case ZT_NETWORK_RULE_MATCH_IPV6_DEST: r["type"] = "MATCH_IPV6_DEST"; - r["ip"] = InetAddress(rule.v.ipv6.ip,16,(unsigned int)rule.v.ipv6.mask).toString(tmp); + r["ip"] = InetAddress(rule.v.ipv6.ip, 16, (unsigned int)rule.v.ipv6.mask).toString(tmp); break; case ZT_NETWORK_RULE_MATCH_IP_TOS: r["type"] = "MATCH_IP_TOS"; @@ -167,7 +191,8 @@ static json _renderRule(ZT_VirtualNetworkRule &rule) r["icmpType"] = (unsigned int)rule.v.icmp.type; if ((rule.v.icmp.flags & 0x01) != 0) r["icmpCode"] = (unsigned int)rule.v.icmp.code; - else r["icmpCode"] = json(); + else + r["icmpCode"] = json(); break; case ZT_NETWORK_RULE_MATCH_IP_SOURCE_PORT_RANGE: r["type"] = "MATCH_IP_SOURCE_PORT_RANGE"; @@ -181,7 +206,7 @@ static json _renderRule(ZT_VirtualNetworkRule &rule) break; case ZT_NETWORK_RULE_MATCH_CHARACTERISTICS: r["type"] = "MATCH_CHARACTERISTICS"; - OSUtils::ztsnprintf(tmp,sizeof(tmp),"%.16llx",rule.v.characteristics); + OSUtils::ztsnprintf(tmp, sizeof(tmp), "%.16llx", rule.v.characteristics); r["mask"] = tmp; break; case ZT_NETWORK_RULE_MATCH_FRAME_SIZE_RANGE: @@ -230,9 +255,9 @@ static json _renderRule(ZT_VirtualNetworkRule &rule) break; case ZT_NETWORK_RULE_MATCH_INTEGER_RANGE: r["type"] = "INTEGER_RANGE"; - OSUtils::ztsnprintf(tmp,sizeof(tmp),"%.16llx",rule.v.intRange.start); + OSUtils::ztsnprintf(tmp, sizeof(tmp), "%.16llx", rule.v.intRange.start); r["start"] = tmp; - OSUtils::ztsnprintf(tmp,sizeof(tmp),"%.16llx",rule.v.intRange.start + (uint64_t)rule.v.intRange.end); + OSUtils::ztsnprintf(tmp, sizeof(tmp), "%.16llx", rule.v.intRange.start + (uint64_t)rule.v.intRange.end); r["end"] = tmp; r["idx"] = rule.v.intRange.idx; r["little"] = ((rule.v.intRange.format & 0x80) != 0); @@ -242,7 +267,7 @@ static json _renderRule(ZT_VirtualNetworkRule &rule) break; } - if (!r.empty()) { + if (! r.empty()) { r["not"] = ((rule.t & 0x80) != 0); r["or"] = ((rule.t & 0x40) != 0); } @@ -251,223 +276,308 @@ static json _renderRule(ZT_VirtualNetworkRule &rule) return r; } -static bool _parseRule(json &r,ZT_VirtualNetworkRule &rule) +static bool _parseRule(json& r, ZT_VirtualNetworkRule& rule) { - if (!r.is_object()) + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::parseRule"); + auto scope = tracer->WithActiveSpan(span); + + if (! r.is_object()) return false; - const std::string t(OSUtils::jsonString(r["type"],"")); - memset(&rule,0,sizeof(ZT_VirtualNetworkRule)); + const std::string t(OSUtils::jsonString(r["type"], "")); + memset(&rule, 0, sizeof(ZT_VirtualNetworkRule)); - if (OSUtils::jsonBool(r["not"],false)) + if (OSUtils::jsonBool(r["not"], false)) rule.t = 0x80; - else rule.t = 0x00; - if (OSUtils::jsonBool(r["or"],false)) + else + rule.t = 0x00; + if (OSUtils::jsonBool(r["or"], false)) rule.t |= 0x40; bool tag = false; if (t == "ACTION_DROP") { rule.t |= ZT_NETWORK_RULE_ACTION_DROP; return true; - } else if (t == "ACTION_ACCEPT") { + } + else if (t == "ACTION_ACCEPT") { rule.t |= ZT_NETWORK_RULE_ACTION_ACCEPT; return true; - } else if (t == "ACTION_TEE") { + } + else if (t == "ACTION_TEE") { rule.t |= ZT_NETWORK_RULE_ACTION_TEE; - rule.v.fwd.address = Utils::hexStrToU64(OSUtils::jsonString(r["address"],"0").c_str()) & 0xffffffffffULL; - rule.v.fwd.flags = (uint32_t)(OSUtils::jsonInt(r["flags"],0ULL) & 0xffffffffULL); - rule.v.fwd.length = (uint16_t)(OSUtils::jsonInt(r["length"],0ULL) & 0xffffULL); + rule.v.fwd.address = Utils::hexStrToU64(OSUtils::jsonString(r["address"], "0").c_str()) & 0xffffffffffULL; + rule.v.fwd.flags = (uint32_t)(OSUtils::jsonInt(r["flags"], 0ULL) & 0xffffffffULL); + rule.v.fwd.length = (uint16_t)(OSUtils::jsonInt(r["length"], 0ULL) & 0xffffULL); return true; - } else if (t == "ACTION_WATCH") { + } + else if (t == "ACTION_WATCH") { rule.t |= ZT_NETWORK_RULE_ACTION_WATCH; - rule.v.fwd.address = Utils::hexStrToU64(OSUtils::jsonString(r["address"],"0").c_str()) & 0xffffffffffULL; - rule.v.fwd.flags = (uint32_t)(OSUtils::jsonInt(r["flags"],0ULL) & 0xffffffffULL); - rule.v.fwd.length = (uint16_t)(OSUtils::jsonInt(r["length"],0ULL) & 0xffffULL); + rule.v.fwd.address = Utils::hexStrToU64(OSUtils::jsonString(r["address"], "0").c_str()) & 0xffffffffffULL; + rule.v.fwd.flags = (uint32_t)(OSUtils::jsonInt(r["flags"], 0ULL) & 0xffffffffULL); + rule.v.fwd.length = (uint16_t)(OSUtils::jsonInt(r["length"], 0ULL) & 0xffffULL); return true; - } else if (t == "ACTION_REDIRECT") { + } + else if (t == "ACTION_REDIRECT") { rule.t |= ZT_NETWORK_RULE_ACTION_REDIRECT; - rule.v.fwd.address = Utils::hexStrToU64(OSUtils::jsonString(r["address"],"0").c_str()) & 0xffffffffffULL; - rule.v.fwd.flags = (uint32_t)(OSUtils::jsonInt(r["flags"],0ULL) & 0xffffffffULL); + rule.v.fwd.address = Utils::hexStrToU64(OSUtils::jsonString(r["address"], "0").c_str()) & 0xffffffffffULL; + rule.v.fwd.flags = (uint32_t)(OSUtils::jsonInt(r["flags"], 0ULL) & 0xffffffffULL); return true; - } else if (t == "ACTION_BREAK") { + } + else if (t == "ACTION_BREAK") { rule.t |= ZT_NETWORK_RULE_ACTION_BREAK; return true; - } else if (t == "MATCH_SOURCE_ZEROTIER_ADDRESS") { + } + else if (t == "MATCH_SOURCE_ZEROTIER_ADDRESS") { rule.t |= ZT_NETWORK_RULE_MATCH_SOURCE_ZEROTIER_ADDRESS; - rule.v.zt = Utils::hexStrToU64(OSUtils::jsonString(r["zt"],"0").c_str()) & 0xffffffffffULL; + rule.v.zt = Utils::hexStrToU64(OSUtils::jsonString(r["zt"], "0").c_str()) & 0xffffffffffULL; return true; - } else if (t == "MATCH_DEST_ZEROTIER_ADDRESS") { + } + else if (t == "MATCH_DEST_ZEROTIER_ADDRESS") { rule.t |= ZT_NETWORK_RULE_MATCH_DEST_ZEROTIER_ADDRESS; - rule.v.zt = Utils::hexStrToU64(OSUtils::jsonString(r["zt"],"0").c_str()) & 0xffffffffffULL; + rule.v.zt = Utils::hexStrToU64(OSUtils::jsonString(r["zt"], "0").c_str()) & 0xffffffffffULL; return true; - } else if (t == "MATCH_VLAN_ID") { + } + else if (t == "MATCH_VLAN_ID") { rule.t |= ZT_NETWORK_RULE_MATCH_VLAN_ID; - rule.v.vlanId = (uint16_t)(OSUtils::jsonInt(r["vlanId"],0ULL) & 0xffffULL); + rule.v.vlanId = (uint16_t)(OSUtils::jsonInt(r["vlanId"], 0ULL) & 0xffffULL); return true; - } else if (t == "MATCH_VLAN_PCP") { + } + else if (t == "MATCH_VLAN_PCP") { rule.t |= ZT_NETWORK_RULE_MATCH_VLAN_PCP; - rule.v.vlanPcp = (uint8_t)(OSUtils::jsonInt(r["vlanPcp"],0ULL) & 0xffULL); + rule.v.vlanPcp = (uint8_t)(OSUtils::jsonInt(r["vlanPcp"], 0ULL) & 0xffULL); return true; - } else if (t == "MATCH_VLAN_DEI") { + } + else if (t == "MATCH_VLAN_DEI") { rule.t |= ZT_NETWORK_RULE_MATCH_VLAN_DEI; - rule.v.vlanDei = (uint8_t)(OSUtils::jsonInt(r["vlanDei"],0ULL) & 0xffULL); + rule.v.vlanDei = (uint8_t)(OSUtils::jsonInt(r["vlanDei"], 0ULL) & 0xffULL); return true; - } else if (t == "MATCH_MAC_SOURCE") { + } + else if (t == "MATCH_MAC_SOURCE") { rule.t |= ZT_NETWORK_RULE_MATCH_MAC_SOURCE; - const std::string mac(OSUtils::jsonString(r["mac"],"0")); - Utils::unhex(mac.c_str(),(unsigned int)mac.length(),rule.v.mac,6); + std::string mac(OSUtils::jsonString(r["mac"], "0")); + Utils::cleanMac(mac); + Utils::unhex(mac.c_str(), (unsigned int)mac.length(), rule.v.mac, 6); return true; - } else if (t == "MATCH_MAC_DEST") { + } + else if (t == "MATCH_MAC_DEST") { rule.t |= ZT_NETWORK_RULE_MATCH_MAC_DEST; - const std::string mac(OSUtils::jsonString(r["mac"],"0")); - Utils::unhex(mac.c_str(),(unsigned int)mac.length(),rule.v.mac,6); + std::string mac(OSUtils::jsonString(r["mac"], "0")); + Utils::cleanMac(mac); + Utils::unhex(mac.c_str(), (unsigned int)mac.length(), rule.v.mac, 6); return true; - } else if (t == "MATCH_IPV4_SOURCE") { + } + else if (t == "MATCH_IPV4_SOURCE") { rule.t |= ZT_NETWORK_RULE_MATCH_IPV4_SOURCE; - InetAddress ip(OSUtils::jsonString(r["ip"],"0.0.0.0").c_str()); - rule.v.ipv4.ip = reinterpret_cast(&ip)->sin_addr.s_addr; - rule.v.ipv4.mask = Utils::ntoh(reinterpret_cast(&ip)->sin_port) & 0xff; - if (rule.v.ipv4.mask > 32) rule.v.ipv4.mask = 32; + InetAddress ip(OSUtils::jsonString(r["ip"], "0.0.0.0").c_str()); + rule.v.ipv4.ip = reinterpret_cast(&ip)->sin_addr.s_addr; + rule.v.ipv4.mask = Utils::ntoh(reinterpret_cast(&ip)->sin_port) & 0xff; + if (rule.v.ipv4.mask > 32) + rule.v.ipv4.mask = 32; return true; - } else if (t == "MATCH_IPV4_DEST") { + } + else if (t == "MATCH_IPV4_DEST") { rule.t |= ZT_NETWORK_RULE_MATCH_IPV4_DEST; - InetAddress ip(OSUtils::jsonString(r["ip"],"0.0.0.0").c_str()); - rule.v.ipv4.ip = reinterpret_cast(&ip)->sin_addr.s_addr; - rule.v.ipv4.mask = Utils::ntoh(reinterpret_cast(&ip)->sin_port) & 0xff; - if (rule.v.ipv4.mask > 32) rule.v.ipv4.mask = 32; + InetAddress ip(OSUtils::jsonString(r["ip"], "0.0.0.0").c_str()); + rule.v.ipv4.ip = reinterpret_cast(&ip)->sin_addr.s_addr; + rule.v.ipv4.mask = Utils::ntoh(reinterpret_cast(&ip)->sin_port) & 0xff; + if (rule.v.ipv4.mask > 32) + rule.v.ipv4.mask = 32; return true; - } else if (t == "MATCH_IPV6_SOURCE") { + } + else if (t == "MATCH_IPV6_SOURCE") { rule.t |= ZT_NETWORK_RULE_MATCH_IPV6_SOURCE; - InetAddress ip(OSUtils::jsonString(r["ip"],"::0").c_str()); - memcpy(rule.v.ipv6.ip,reinterpret_cast(&ip)->sin6_addr.s6_addr,16); - rule.v.ipv6.mask = Utils::ntoh(reinterpret_cast(&ip)->sin6_port) & 0xff; - if (rule.v.ipv6.mask > 128) rule.v.ipv6.mask = 128; + InetAddress ip(OSUtils::jsonString(r["ip"], "::0").c_str()); + memcpy(rule.v.ipv6.ip, reinterpret_cast(&ip)->sin6_addr.s6_addr, 16); + rule.v.ipv6.mask = Utils::ntoh(reinterpret_cast(&ip)->sin6_port) & 0xff; + if (rule.v.ipv6.mask > 128) + rule.v.ipv6.mask = 128; return true; - } else if (t == "MATCH_IPV6_DEST") { + } + else if (t == "MATCH_IPV6_DEST") { rule.t |= ZT_NETWORK_RULE_MATCH_IPV6_DEST; - InetAddress ip(OSUtils::jsonString(r["ip"],"::0").c_str()); - memcpy(rule.v.ipv6.ip,reinterpret_cast(&ip)->sin6_addr.s6_addr,16); - rule.v.ipv6.mask = Utils::ntoh(reinterpret_cast(&ip)->sin6_port) & 0xff; - if (rule.v.ipv6.mask > 128) rule.v.ipv6.mask = 128; + InetAddress ip(OSUtils::jsonString(r["ip"], "::0").c_str()); + memcpy(rule.v.ipv6.ip, reinterpret_cast(&ip)->sin6_addr.s6_addr, 16); + rule.v.ipv6.mask = Utils::ntoh(reinterpret_cast(&ip)->sin6_port) & 0xff; + if (rule.v.ipv6.mask > 128) + rule.v.ipv6.mask = 128; return true; - } else if (t == "MATCH_IP_TOS") { + } + else if (t == "MATCH_IP_TOS") { rule.t |= ZT_NETWORK_RULE_MATCH_IP_TOS; - rule.v.ipTos.mask = (uint8_t)(OSUtils::jsonInt(r["mask"],0ULL) & 0xffULL); - rule.v.ipTos.value[0] = (uint8_t)(OSUtils::jsonInt(r["start"],0ULL) & 0xffULL); - rule.v.ipTos.value[1] = (uint8_t)(OSUtils::jsonInt(r["end"],0ULL) & 0xffULL); + rule.v.ipTos.mask = (uint8_t)(OSUtils::jsonInt(r["mask"], 0ULL) & 0xffULL); + rule.v.ipTos.value[0] = (uint8_t)(OSUtils::jsonInt(r["start"], 0ULL) & 0xffULL); + rule.v.ipTos.value[1] = (uint8_t)(OSUtils::jsonInt(r["end"], 0ULL) & 0xffULL); return true; - } else if (t == "MATCH_IP_PROTOCOL") { + } + else if (t == "MATCH_IP_PROTOCOL") { rule.t |= ZT_NETWORK_RULE_MATCH_IP_PROTOCOL; - rule.v.ipProtocol = (uint8_t)(OSUtils::jsonInt(r["ipProtocol"],0ULL) & 0xffULL); + rule.v.ipProtocol = (uint8_t)(OSUtils::jsonInt(r["ipProtocol"], 0ULL) & 0xffULL); return true; - } else if (t == "MATCH_ETHERTYPE") { + } + else if (t == "MATCH_ETHERTYPE") { rule.t |= ZT_NETWORK_RULE_MATCH_ETHERTYPE; - rule.v.etherType = (uint16_t)(OSUtils::jsonInt(r["etherType"],0ULL) & 0xffffULL); + rule.v.etherType = (uint16_t)(OSUtils::jsonInt(r["etherType"], 0ULL) & 0xffffULL); return true; - } else if (t == "MATCH_ICMP") { + } + else if (t == "MATCH_ICMP") { rule.t |= ZT_NETWORK_RULE_MATCH_ICMP; - rule.v.icmp.type = (uint8_t)(OSUtils::jsonInt(r["icmpType"],0ULL) & 0xffULL); - json &code = r["icmpCode"]; + rule.v.icmp.type = (uint8_t)(OSUtils::jsonInt(r["icmpType"], 0ULL) & 0xffULL); + json& code = r["icmpCode"]; if (code.is_null()) { rule.v.icmp.code = 0; rule.v.icmp.flags = 0x00; - } else { - rule.v.icmp.code = (uint8_t)(OSUtils::jsonInt(code,0ULL) & 0xffULL); + } + else { + rule.v.icmp.code = (uint8_t)(OSUtils::jsonInt(code, 0ULL) & 0xffULL); rule.v.icmp.flags = 0x01; } return true; - } else if (t == "MATCH_IP_SOURCE_PORT_RANGE") { + } + else if (t == "MATCH_IP_SOURCE_PORT_RANGE") { rule.t |= ZT_NETWORK_RULE_MATCH_IP_SOURCE_PORT_RANGE; - rule.v.port[0] = (uint16_t)(OSUtils::jsonInt(r["start"],0ULL) & 0xffffULL); - rule.v.port[1] = (uint16_t)(OSUtils::jsonInt(r["end"],(uint64_t)rule.v.port[0]) & 0xffffULL); + rule.v.port[0] = (uint16_t)(OSUtils::jsonInt(r["start"], 0ULL) & 0xffffULL); + rule.v.port[1] = (uint16_t)(OSUtils::jsonInt(r["end"], (uint64_t)rule.v.port[0]) & 0xffffULL); return true; - } else if (t == "MATCH_IP_DEST_PORT_RANGE") { + } + else if (t == "MATCH_IP_DEST_PORT_RANGE") { rule.t |= ZT_NETWORK_RULE_MATCH_IP_DEST_PORT_RANGE; - rule.v.port[0] = (uint16_t)(OSUtils::jsonInt(r["start"],0ULL) & 0xffffULL); - rule.v.port[1] = (uint16_t)(OSUtils::jsonInt(r["end"],(uint64_t)rule.v.port[0]) & 0xffffULL); + rule.v.port[0] = (uint16_t)(OSUtils::jsonInt(r["start"], 0ULL) & 0xffffULL); + rule.v.port[1] = (uint16_t)(OSUtils::jsonInt(r["end"], (uint64_t)rule.v.port[0]) & 0xffffULL); return true; - } else if (t == "MATCH_CHARACTERISTICS") { + } + else if (t == "MATCH_CHARACTERISTICS") { rule.t |= ZT_NETWORK_RULE_MATCH_CHARACTERISTICS; if (r.count("mask")) { - json &v = r["mask"]; + json& v = r["mask"]; if (v.is_number()) { rule.v.characteristics = v; - } else { + } + else { std::string tmp = v; rule.v.characteristics = Utils::hexStrToU64(tmp.c_str()); } } return true; - } else if (t == "MATCH_FRAME_SIZE_RANGE") { + } + else if (t == "MATCH_FRAME_SIZE_RANGE") { rule.t |= ZT_NETWORK_RULE_MATCH_FRAME_SIZE_RANGE; - rule.v.frameSize[0] = (uint16_t)(OSUtils::jsonInt(r["start"],0ULL) & 0xffffULL); - rule.v.frameSize[1] = (uint16_t)(OSUtils::jsonInt(r["end"],(uint64_t)rule.v.frameSize[0]) & 0xffffULL); + rule.v.frameSize[0] = (uint16_t)(OSUtils::jsonInt(r["start"], 0ULL) & 0xffffULL); + rule.v.frameSize[1] = (uint16_t)(OSUtils::jsonInt(r["end"], (uint64_t)rule.v.frameSize[0]) & 0xffffULL); return true; - } else if (t == "MATCH_RANDOM") { + } + else if (t == "MATCH_RANDOM") { rule.t |= ZT_NETWORK_RULE_MATCH_RANDOM; - rule.v.randomProbability = (uint32_t)(OSUtils::jsonInt(r["probability"],0ULL) & 0xffffffffULL); + rule.v.randomProbability = (uint32_t)(OSUtils::jsonInt(r["probability"], 0ULL) & 0xffffffffULL); return true; - } else if (t == "MATCH_TAGS_DIFFERENCE") { + } + else if (t == "MATCH_TAGS_DIFFERENCE") { rule.t |= ZT_NETWORK_RULE_MATCH_TAGS_DIFFERENCE; tag = true; - } else if (t == "MATCH_TAGS_BITWISE_AND") { + } + else if (t == "MATCH_TAGS_BITWISE_AND") { rule.t |= ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_AND; tag = true; - } else if (t == "MATCH_TAGS_BITWISE_OR") { + } + else if (t == "MATCH_TAGS_BITWISE_OR") { rule.t |= ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_OR; tag = true; - } else if (t == "MATCH_TAGS_BITWISE_XOR") { + } + else if (t == "MATCH_TAGS_BITWISE_XOR") { rule.t |= ZT_NETWORK_RULE_MATCH_TAGS_BITWISE_XOR; tag = true; - } else if (t == "MATCH_TAGS_EQUAL") { + } + else if (t == "MATCH_TAGS_EQUAL") { rule.t |= ZT_NETWORK_RULE_MATCH_TAGS_EQUAL; tag = true; - } else if (t == "MATCH_TAG_SENDER") { + } + else if (t == "MATCH_TAG_SENDER") { rule.t |= ZT_NETWORK_RULE_MATCH_TAG_SENDER; tag = true; - } else if (t == "MATCH_TAG_RECEIVER") { + } + else if (t == "MATCH_TAG_RECEIVER") { rule.t |= ZT_NETWORK_RULE_MATCH_TAG_RECEIVER; tag = true; - } else if (t == "INTEGER_RANGE") { - json &s = r["start"]; + } + else if (t == "INTEGER_RANGE") { + json& s = r["start"]; if (s.is_string()) { std::string tmp = s; rule.v.intRange.start = Utils::hexStrToU64(tmp.c_str()); - } else { - rule.v.intRange.start = OSUtils::jsonInt(s,0ULL); } - json &e = r["end"]; + else { + rule.v.intRange.start = OSUtils::jsonInt(s, 0ULL); + } + json& e = r["end"]; if (e.is_string()) { std::string tmp = e; rule.v.intRange.end = (uint32_t)(Utils::hexStrToU64(tmp.c_str()) - rule.v.intRange.start); - } else { - rule.v.intRange.end = (uint32_t)(OSUtils::jsonInt(e,0ULL) - rule.v.intRange.start); } - rule.v.intRange.idx = (uint16_t)OSUtils::jsonInt(r["idx"],0ULL); - rule.v.intRange.format = (OSUtils::jsonBool(r["little"],false)) ? 0x80 : 0x00; - rule.v.intRange.format |= (uint8_t)((OSUtils::jsonInt(r["bits"],1ULL) - 1) & 63); + else { + rule.v.intRange.end = (uint32_t)(OSUtils::jsonInt(e, 0ULL) - rule.v.intRange.start); + } + rule.v.intRange.idx = (uint16_t)OSUtils::jsonInt(r["idx"], 0ULL); + rule.v.intRange.format = (OSUtils::jsonBool(r["little"], false)) ? 0x80 : 0x00; + rule.v.intRange.format |= (uint8_t)((OSUtils::jsonInt(r["bits"], 1ULL) - 1) & 63); } if (tag) { - rule.v.tag.id = (uint32_t)(OSUtils::jsonInt(r["id"],0ULL) & 0xffffffffULL); - rule.v.tag.value = (uint32_t)(OSUtils::jsonInt(r["value"],0ULL) & 0xffffffffULL); + rule.v.tag.id = (uint32_t)(OSUtils::jsonInt(r["id"], 0ULL) & 0xffffffffULL); + rule.v.tag.value = (uint32_t)(OSUtils::jsonInt(r["value"], 0ULL) & 0xffffffffULL); return true; } return false; } -} // anonymous namespace +} // anonymous namespace -EmbeddedNetworkController::EmbeddedNetworkController(Node *node,const char *ztPath,const char *dbPath, int listenPort, RedisConfig *rc) : - _startTime(OSUtils::now()), - _listenPort(listenPort), - _node(node), - _ztPath(ztPath), - _path(dbPath), - _sender((NetworkController::Sender *)0), - _db(this), - _rc(rc) +EmbeddedNetworkController::EmbeddedNetworkController(Node* node, const char* ztPath, const char* dbPath, int listenPort, RedisConfig* rc) + : _startTime(OSUtils::now()) + , _listenPort(listenPort) + , _node(node) + , _ztPath(ztPath) + , _path(dbPath) + , _signingId() + , _signingIdAddressString() + , _sender((NetworkController::Sender*)0) + , _db(this) + , _queue() + , _threads() + , _threads_l() + , _memberStatus() + , _memberStatus_l() + , _expiringSoon() + , _expiringSoon_l() + , _rc(rc) + , _ssoExpiryRunning(true) + , _ssoExpiry(std::thread(&EmbeddedNetworkController::_ssoExpiryThread, this)) + +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + , _member_status_lookup { "nc_member_status_lookup", "" } + , _member_status_lookup_count { "nc_member_status_lookup_count", "" } + , _node_is_online { "nc_node_is_online", "" } + , _node_is_online_count { "nc_node_is_online_count", "" } + , _get_and_init_member { "nc_get_and_init_member", "" } + , _get_and_init_member_count { "nc_get_and_init_member_count", "" } + , _have_identity { "nc_have_identity", "" } + , _have_identity_count { "nc_have_identity_count", "" } + , _determine_auth { "nc_determine_auth", "" } + , _determine_auth_count { "nc_determine_auth_count", "" } + , _sso_check { "nc_sso_check", "" } + , _sso_check_count { "nc_sso_check_count", "" } + , _auth_check { "nc_auth_check", "" } + , _auth_check_count { "nc_auth_check_count", "" } + , _json_schlep { "nc_json_schlep", "" } + , _json_schlep_count { "nc_json_schlep_count", "" } + , _issue_certificate { "nc_issue_certificate", "" } + , _issue_certificate_count { "nc_issue_certificate_count", "" } + , _save_member { "nc_save_member", "" } + , _save_member_count { "nc_save_member_count", "" } + , _send_netconf { "nc_send_netconf2", "" } + , _send_netconf_count { "nc_send_netconf2_count", "" } +#endif { } @@ -475,38 +585,54 @@ EmbeddedNetworkController::~EmbeddedNetworkController() { std::lock_guard l(_threads_l); _queue.stop(); - for(auto t=_threads.begin();t!=_threads.end();++t) + for (auto t = _threads.begin(); t != _threads.end(); ++t) { t->join(); + } + _ssoExpiryRunning = false; + _ssoExpiry.join(); } -void EmbeddedNetworkController::setSSORedirectURL(const std::string &url) { +void EmbeddedNetworkController::setSSORedirectURL(const std::string& url) +{ _ssoRedirectURL = url; } -void EmbeddedNetworkController::init(const Identity &signingId,Sender *sender) +void EmbeddedNetworkController::init(const Identity& signingId, Sender* sender) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::init"); + auto scope = tracer->WithActiveSpan(span); + char tmp[64]; _signingId = signingId; _sender = sender; _signingIdAddressString = signingId.address().toString(tmp); #ifdef ZT_CONTROLLER_USE_LIBPQ - if ((_path.length() > 9)&&(_path.substr(0,9) == "postgres:")) { - _db.addDB(std::shared_ptr(new PostgreSQL(_signingId,_path.substr(9).c_str(), _listenPort, _rc))); - } else { + if ((_path.length() > 9) && (_path.substr(0, 9) == "postgres:")) { + fprintf(stderr, "CV1\n"); + _db.addDB(std::shared_ptr(new CV1(_signingId, _path.substr(9).c_str(), _listenPort, _rc))); + } + else if ((_path.length() > 4) && (_path.substr(0, 4) == "cv2:")) { + fprintf(stderr, "CV2\n"); + _db.addDB(std::shared_ptr(new CV2(_signingId, _path.substr(4).c_str(), _listenPort))); + } + else { + fprintf(stderr, "FileDB\n"); #endif - _db.addDB(std::shared_ptr(new FileDB(_path.c_str()))); + _db.addDB(std::shared_ptr(new FileDB(_path.c_str()))); #ifdef ZT_CONTROLLER_USE_LIBPQ } #endif std::string lfJSON; - OSUtils::readFile((_ztPath + ZT_PATH_SEPARATOR_S "local.conf").c_str(),lfJSON); + OSUtils::readFile((_ztPath + ZT_PATH_SEPARATOR_S "local.conf").c_str(), lfJSON); if (lfJSON.length() > 0) { nlohmann::json lfConfig(OSUtils::jsonParse(lfJSON)); - nlohmann::json &settings = lfConfig["settings"]; + nlohmann::json& settings = lfConfig["settings"]; if (settings.is_object()) { - nlohmann::json &controllerDb = settings["controllerDb"]; + nlohmann::json& controllerDb = settings["controllerDb"]; if (controllerDb.is_object()) { std::string type = controllerDb["type"]; if (type == "lf") { @@ -514,14 +640,14 @@ void EmbeddedNetworkController::init(const Identity &signingId,Sender *sender) std::string lfHost = controllerDb["host"]; int lfPort = controllerDb["port"]; bool storeOnlineState = controllerDb["storeOnlineState"]; - if ((lfOwner.length())&&(lfHost.length())&&(lfPort > 0)&&(lfPort < 65536)) { + if ((lfOwner.length()) && (lfHost.length()) && (lfPort > 0) && (lfPort < 65536)) { std::size_t pubHdrLoc = lfOwner.find("Public: "); - if ((pubHdrLoc > 0)&&((pubHdrLoc + 8) < lfOwner.length())) { + if ((pubHdrLoc > 0) && ((pubHdrLoc + 8) < lfOwner.length())) { std::string lfOwnerPublic = lfOwner.substr(pubHdrLoc + 8); std::size_t pubHdrEnd = lfOwnerPublic.find_first_of("\n\r\t "); if (pubHdrEnd != std::string::npos) { - lfOwnerPublic = lfOwnerPublic.substr(0,pubHdrEnd); - _db.addDB(std::shared_ptr(new LFDB(_signingId,_path.c_str(),lfOwner.c_str(),lfOwnerPublic.c_str(),lfHost.c_str(),lfPort,storeOnlineState))); + lfOwnerPublic = lfOwnerPublic.substr(0, pubHdrEnd); + _db.addDB(std::shared_ptr(new LFDB(_signingId, _path.c_str(), lfOwner.c_str(), lfOwnerPublic.c_str(), lfHost.c_str(), lfPort, storeOnlineState))); } } } @@ -533,17 +659,29 @@ void EmbeddedNetworkController::init(const Identity &signingId,Sender *sender) _db.waitForReady(); } -void EmbeddedNetworkController::request( - uint64_t nwid, - const InetAddress &fromAddr, - uint64_t requestPacketId, - const Identity &identity, - const Dictionary &metaData) +void EmbeddedNetworkController::request(uint64_t nwid, const InetAddress& fromAddr, uint64_t requestPacketId, const Identity& identity, const Dictionary& metaData) { - if (((!_signingId)||(!_signingId.hasPrivate()))||(_signingId.address().toInt() != (nwid >> 24))||(!_sender)) + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::request"); + auto scope = tracer->WithActiveSpan(span); + + if (((! _signingId) || (! _signingId.hasPrivate())) || (_signingId.address().toInt() != (nwid >> 24)) || (! _sender)) return; _startThreads(); - _RQEntry *qe = new _RQEntry; + + const int64_t now = OSUtils::now(); + + if (requestPacketId) { + std::lock_guard l(_memberStatus_l); + _MemberStatus& ms = _memberStatus[_MemberStatusKey(nwid, identity.address().toInt())]; + if ((now - ms.lastRequestTime) <= ZT_NETCONF_MIN_REQUEST_PERIOD) { + return; + } + ms.lastRequestTime = now; + } + + _RQEntry* qe = new _RQEntry; qe->nwid = nwid; qe->requestPacketId = requestPacketId; qe->fromAddr = fromAddr; @@ -553,737 +691,1000 @@ void EmbeddedNetworkController::request( _queue.post(qe); } -unsigned int EmbeddedNetworkController::handleControlPlaneHttpGET( - const std::vector &path, - const std::map &urlArgs, - const std::map &headers, - const std::string &body, - std::string &responseBody, - std::string &responseContentType) +std::string EmbeddedNetworkController::networkUpdateFromPostData(uint64_t networkID, const std::string& body) { - if ((!path.empty())&&(path[0] == "network")) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::networkUpdateFromPostData"); + auto scope = tracer->WithActiveSpan(span); - if ((path.size() >= 2)&&(path[1].length() == 16)) { - const uint64_t nwid = Utils::hexStrToU64(path[1].c_str()); - json network; - if (!_db.get(nwid,network)) - return 404; + json b = OSUtils::jsonParse(body); - if (path.size() >= 3) { + char nwids[24]; + OSUtils::ztsnprintf(nwids, sizeof(nwids), "%.16llx", networkID); - if (path[2] == "member") { + json network; + _db.get(networkID, network); + DB::initNetwork(network); + if (b.count("name")) + network["name"] = OSUtils::jsonString(b["name"], ""); + if (b.count("private")) + network["private"] = OSUtils::jsonBool(b["private"], true); + if (b.count("enableBroadcast")) + network["enableBroadcast"] = OSUtils::jsonBool(b["enableBroadcast"], false); + if (b.count("multicastLimit")) + network["multicastLimit"] = OSUtils::jsonInt(b["multicastLimit"], 32ULL); + if (b.count("mtu")) + network["mtu"] = std::max(std::min((unsigned int)OSUtils::jsonInt(b["mtu"], ZT_DEFAULT_MTU), (unsigned int)ZT_MAX_MTU), (unsigned int)ZT_MIN_MTU); - if (path.size() >= 4) { - // Get member + if (b.count("remoteTraceTarget")) { + const std::string rtt(OSUtils::jsonString(b["remoteTraceTarget"], "")); + if (rtt.length() == 10) { + network["remoteTraceTarget"] = rtt; + } + else { + network["remoteTraceTarget"] = json(); + } + } + if (b.count("remoteTraceLevel")) + network["remoteTraceLevel"] = OSUtils::jsonInt(b["remoteTraceLevel"], 0ULL); - const uint64_t address = Utils::hexStrToU64(path[3].c_str()); - json member; - if (!_db.get(nwid,network,address,member)) - return 404; - responseBody = OSUtils::jsonDump(member); - responseContentType = "application/json"; + if (b.count("v4AssignMode")) { + json nv4m; + json& v4m = b["v4AssignMode"]; + if (v4m.is_string()) { // backward compatibility + nv4m["zt"] = (OSUtils::jsonString(v4m, "") == "zt"); + } + else if (v4m.is_object()) { + nv4m["zt"] = OSUtils::jsonBool(v4m["zt"], false); + } + else + nv4m["zt"] = false; + network["v4AssignMode"] = nv4m; + } - } else { - // List members and their revisions + if (b.count("v6AssignMode")) { + json nv6m; + json& v6m = b["v6AssignMode"]; + if (! nv6m.is_object()) + nv6m = json::object(); + if (v6m.is_string()) { // backward compatibility + std::vector v6ms(OSUtils::split(OSUtils::jsonString(v6m, "").c_str(), ",", "", "")); + std::sort(v6ms.begin(), v6ms.end()); + v6ms.erase(std::unique(v6ms.begin(), v6ms.end()), v6ms.end()); + nv6m["rfc4193"] = false; + nv6m["zt"] = false; + nv6m["6plane"] = false; + for (std::vector::iterator i(v6ms.begin()); i != v6ms.end(); ++i) { + if (*i == "rfc4193") + nv6m["rfc4193"] = true; + else if (*i == "zt") + nv6m["zt"] = true; + else if (*i == "6plane") + nv6m["6plane"] = true; + } + } + else if (v6m.is_object()) { + if (v6m.count("rfc4193")) + nv6m["rfc4193"] = OSUtils::jsonBool(v6m["rfc4193"], false); + if (v6m.count("zt")) + nv6m["zt"] = OSUtils::jsonBool(v6m["zt"], false); + if (v6m.count("6plane")) + nv6m["6plane"] = OSUtils::jsonBool(v6m["6plane"], false); + } + else { + nv6m["rfc4193"] = false; + nv6m["zt"] = false; + nv6m["6plane"] = false; + } + network["v6AssignMode"] = nv6m; + } - responseBody = "{"; - std::vector members; - if (_db.get(nwid,network,members)) { - responseBody.reserve((members.size() + 2) * 32); - std::string mid; - for(auto member=members.begin();member!=members.end();++member) { - mid = OSUtils::jsonString((*member)["id"], ""); - char tmp[128]; - OSUtils::ztsnprintf(tmp,sizeof(tmp),"%s\"%s\":%llu",(responseBody.length() > 1) ? "," : "",mid.c_str(),(unsigned long long)OSUtils::jsonInt((*member)["revision"],0)); - responseBody.append(tmp); + if (b.count("routes")) { + json& rts = b["routes"]; + if (rts.is_array()) { + json nrts = json::array(); + for (unsigned long i = 0; i < rts.size(); ++i) { + json& rt = rts[i]; + if (rt.is_object()) { + json& target = rt["target"]; + json& via = rt["via"]; + if (target.is_string()) { + InetAddress t(target.get().c_str()); + InetAddress v; + if (via.is_string()) + v.fromString(via.get().c_str()); + if (((t.ss_family == AF_INET) || (t.ss_family == AF_INET6)) && (t.netmaskBitsValid())) { + json tmp; + char tmp2[64]; + tmp["target"] = t.toString(tmp2); + if (v.ss_family == t.ss_family) + tmp["via"] = v.toIpString(tmp2); + else + tmp["via"] = json(); + nrts.push_back(tmp); + if (nrts.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) + break; + } + } + } + } + network["routes"] = nrts; + } + } + + if (b.count("ipAssignmentPools")) { + json& ipp = b["ipAssignmentPools"]; + if (ipp.is_array()) { + json nipp = json::array(); + for (unsigned long i = 0; i < ipp.size(); ++i) { + json& ip = ipp[i]; + if ((ip.is_object()) && (ip.count("ipRangeStart")) && (ip.count("ipRangeEnd"))) { + InetAddress f(OSUtils::jsonString(ip["ipRangeStart"], "").c_str()); + InetAddress t(OSUtils::jsonString(ip["ipRangeEnd"], "").c_str()); + if (((f.ss_family == AF_INET) || (f.ss_family == AF_INET6)) && (f.ss_family == t.ss_family)) { + json tmp = json::object(); + char tmp2[64]; + tmp["ipRangeStart"] = f.toIpString(tmp2); + tmp["ipRangeEnd"] = t.toIpString(tmp2); + nipp.push_back(tmp); + if (nipp.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) + break; + } + } + } + network["ipAssignmentPools"] = nipp; + } + } + + if (b.count("rules")) { + json& rules = b["rules"]; + if (rules.is_array()) { + json nrules = json::array(); + for (unsigned long i = 0; i < rules.size(); ++i) { + json& rule = rules[i]; + if (rule.is_object()) { + ZT_VirtualNetworkRule ztr; + if (_parseRule(rule, ztr)) { + nrules.push_back(_renderRule(ztr)); + if (nrules.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) + break; + } + } + } + network["rules"] = nrules; + } + } + + if (b.count("authTokens")) { + json& authTokens = b["authTokens"]; + if (authTokens.is_object()) { + json nat; + for (json::iterator t(authTokens.begin()); t != authTokens.end(); ++t) { + if ((t.value().is_number()) && (t.value() >= 0)) + nat[t.key()] = t.value(); + } + network["authTokens"] = nat; + } + else { + network["authTokens"] = { {} }; + } + } + + if (b.count("capabilities")) { + json& capabilities = b["capabilities"]; + if (capabilities.is_array()) { + std::map ncaps; + for (unsigned long i = 0; i < capabilities.size(); ++i) { + json& cap = capabilities[i]; + if (cap.is_object()) { + json ncap = json::object(); + const uint64_t capId = OSUtils::jsonInt(cap["id"], 0ULL); + ncap["id"] = capId; + ncap["default"] = OSUtils::jsonBool(cap["default"], false); + + json& rules = cap["rules"]; + json nrules = json::array(); + if (rules.is_array()) { + for (unsigned long i = 0; i < rules.size(); ++i) { + json& rule = rules[i]; + if (rule.is_object()) { + ZT_VirtualNetworkRule ztr; + if (_parseRule(rule, ztr)) { + nrules.push_back(_renderRule(ztr)); + if (nrules.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) + break; + } } } - responseBody.push_back('}'); - responseContentType = "application/json"; - } - return 200; - - } // else 404 - - } else { - // Get network - - responseBody = OSUtils::jsonDump(network); - responseContentType = "application/json"; - return 200; + ncap["rules"] = nrules; + ncaps[capId] = ncap; + } } - } else if (path.size() == 1) { - // List networks - std::set networkIds; - _db.networks(networkIds); - char tmp[64]; - responseBody = "["; - responseBody.reserve((networkIds.size() + 1) * 24); - for(std::set::const_iterator i(networkIds.begin());i!=networkIds.end();++i) { - if (responseBody.length() > 1) - responseBody.push_back(','); - OSUtils::ztsnprintf(tmp,sizeof(tmp),"\"%.16llx\"",(unsigned long long)*i); - responseBody.append(tmp); + json ncapsa = json::array(); + for (std::map::iterator c(ncaps.begin()); c != ncaps.end(); ++c) { + ncapsa.push_back(c->second); + if (ncapsa.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) + break; } - responseBody.push_back(']'); - responseContentType = "application/json"; + network["capabilities"] = ncapsa; + } + } - return 200; + if (b.count("tags")) { + json& tags = b["tags"]; + if (tags.is_array()) { + std::map ntags; + for (unsigned long i = 0; i < tags.size(); ++i) { + json& tag = tags[i]; + if (tag.is_object()) { + json ntag = json::object(); + const uint64_t tagId = OSUtils::jsonInt(tag["id"], 0ULL); + ntag["id"] = tagId; + json& dfl = tag["default"]; + if (dfl.is_null()) + ntag["default"] = dfl; + else + ntag["default"] = OSUtils::jsonInt(dfl, 0ULL); + ntags[tagId] = ntag; + } + } - } // else 404 + json ntagsa = json::array(); + for (std::map::iterator t(ntags.begin()); t != ntags.end(); ++t) { + ntagsa.push_back(t->second); + if (ntagsa.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) + break; + } + network["tags"] = ntagsa; + } + } - } else { - // Controller status + if (b.count("dns")) { + json& dns = b["dns"]; + if (dns.is_object()) { + json nd; + + nd["domain"] = dns["domain"]; + + json& srv = dns["servers"]; + if (srv.is_array()) { + json ns = json::array(); + for (unsigned int i = 0; i < srv.size(); ++i) { + ns.push_back(srv[i]); + } + nd["servers"] = ns; + } + + network["dns"] = nd; + } + } + + network["id"] = nwids; + network["nwid"] = nwids; + + DB::cleanNetwork(network); + _db.save(network, true); + + return network.dump(); +} + +void EmbeddedNetworkController::configureHTTPControlPlane(httplib::Server& s, httplib::Server& sv6, const std::function setContent) +{ + // Control plane Endpoints + std::string controllerPath = "/controller"; + std::string networkListPath = "/controller/network"; + std::string networkListPath2 = "/unstable/controller/network"; + std::string networkPath = "/controller/network/([0-9a-fA-F]{16})"; + std::string oldAndBustedNetworkCreatePath = "/controller/network/([0-9a-fA-F]{10})______"; + std::string memberListPath = "/controller/network/([0-9a-fA-F]{16})/member"; + std::string memberListPath2 = "/unstable/controller/network/([0-9a-fA-F]{16})/member"; + std::string memberPath = "/controller/network/([0-9a-fA-F]{16})/member/([0-9a-fA-F]{10})"; + + auto controllerGet = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::controllerGet"); + auto scope = tracer->WithActiveSpan(span); char tmp[4096]; const bool dbOk = _db.isReady(); - OSUtils::ztsnprintf(tmp,sizeof(tmp),"{\n\t\"controller\": true,\n\t\"apiVersion\": %d,\n\t\"clock\": %llu,\n\t\"databaseReady\": %s\n}\n",ZT_NETCONF_CONTROLLER_API_VERSION,(unsigned long long)OSUtils::now(),dbOk ? "true" : "false"); - responseBody = tmp; - responseContentType = "application/json"; - return dbOk ? 200 : 503; + OSUtils::ztsnprintf( + tmp, + sizeof(tmp), + "{\n\t\"controller\": true,\n\t\"apiVersion\": %d,\n\t\"clock\": %llu,\n\t\"databaseReady\": %s\n}\n", + ZT_NETCONF_CONTROLLER_API_VERSION, + (unsigned long long)OSUtils::now(), + dbOk ? "true" : "false"); - } - - return 404; -} - -unsigned int EmbeddedNetworkController::handleControlPlaneHttpPOST( - const std::vector &path, - const std::map &urlArgs, - const std::map &headers, - const std::string &body, - std::string &responseBody, - std::string &responseContentType) -{ - if (path.empty()) - return 404; - - json b; - try { - b = OSUtils::jsonParse(body); - if (!b.is_object()) { - responseBody = "{ \"message\": \"body is not a JSON object\" }"; - responseContentType = "application/json"; - return 400; + if (! dbOk) { + res.status = 503; } - } catch ( ... ) { - responseBody = "{ \"message\": \"body JSON is invalid\" }"; - responseContentType = "application/json"; - return 400; - } - const int64_t now = OSUtils::now(); - if (path[0] == "network") { + setContent(req, res, tmp); + }; + s.Get(controllerPath, controllerGet); + sv6.Get(controllerPath, controllerGet); - if ((path.size() >= 2)&&(path[1].length() == 16)) { - uint64_t nwid = Utils::hexStrToU64(path[1].c_str()); - char nwids[24]; - OSUtils::ztsnprintf(nwids,sizeof(nwids),"%.16llx",(unsigned long long)nwid); + auto networkListGet = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::networkListGet"); + auto scope = tracer->WithActiveSpan(span); - if (path.size() >= 3) { + std::set networkIds; + _db.networks(networkIds); + char tmp[64]; - if ((path.size() == 4)&&(path[2] == "member")&&(path[3].length() == 10)) { - uint64_t address = Utils::hexStrToU64(path[3].c_str()); - char addrs[24]; - OSUtils::ztsnprintf(addrs,sizeof(addrs),"%.10llx",(unsigned long long)address); + auto out = json::array(); + for (std::set::const_iterator i(networkIds.begin()); i != networkIds.end(); ++i) { + OSUtils::ztsnprintf(tmp, sizeof(tmp), "%.16llx", *i); + out.push_back(tmp); + } - json member,network; - _db.get(nwid,network,address,member); - DB::initMember(member); + setContent(req, res, out.dump()); + }; + s.Get(networkListPath, networkListGet); + sv6.Get(networkListPath, networkListGet); - try { - if (b.count("activeBridge")) member["activeBridge"] = OSUtils::jsonBool(b["activeBridge"], false); - if (b.count("noAutoAssignIps")) member["noAutoAssignIps"] = OSUtils::jsonBool(b["noAutoAssignIps"], false); - if (b.count("authenticationExpiryTime")) member["authenticationExpiryTime"] = (uint64_t)OSUtils::jsonInt(b["authenticationExpiryTime"], 0ULL); - if (b.count("authenticationURL")) member["authenticationURL"] = OSUtils::jsonString(b["authenticationURL"], ""); + auto networkListGet2 = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::networkListGet2"); + auto scope = tracer->WithActiveSpan(span); - if (b.count("remoteTraceTarget")) { - const std::string rtt(OSUtils::jsonString(b["remoteTraceTarget"],"")); - if (rtt.length() == 10) { - member["remoteTraceTarget"] = rtt; - } else { - member["remoteTraceTarget"] = json(); - } - } - if (b.count("remoteTraceLevel")) member["remoteTraceLevel"] = OSUtils::jsonInt(b["remoteTraceLevel"],0ULL); + std::set networkIds; + _db.networks(networkIds); - if (b.count("authorized")) { - const bool newAuth = OSUtils::jsonBool(b["authorized"],false); - if (newAuth != OSUtils::jsonBool(member["authorized"],false)) { - member["authorized"] = newAuth; - member[((newAuth) ? "lastAuthorizedTime" : "lastDeauthorizedTime")] = now; - if (newAuth) { - member["lastAuthorizedCredentialType"] = "api"; - member["lastAuthorizedCredential"] = json(); - } - } - } + auto meta = json::object(); + auto data = json::array(); + uint64_t networkCount = 0; - if (b.count("ipAssignments")) { - json &ipa = b["ipAssignments"]; - if (ipa.is_array()) { - json mipa(json::array()); - for(unsigned long i=0;i= ZT_CONTROLLER_MAX_ARRAY_SIZE) - break; - } - } - member["ipAssignments"] = mipa; - } - } - - if (b.count("tags")) { - json &tags = b["tags"]; - if (tags.is_array()) { - std::map mtags; - for(unsigned long i=0;i::iterator t(mtags.begin());t!=mtags.end();++t) { - json ta = json::array(); - ta.push_back(t->first); - ta.push_back(t->second); - mtagsa.push_back(ta); - if (mtagsa.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) - break; - } - member["tags"] = mtagsa; - } - } - - if (b.count("capabilities")) { - json &capabilities = b["capabilities"]; - if (capabilities.is_array()) { - json mcaps = json::array(); - for(unsigned long i=0;i= ZT_CONTROLLER_MAX_ARRAY_SIZE) - break; - } - std::sort(mcaps.begin(),mcaps.end()); - mcaps.erase(std::unique(mcaps.begin(),mcaps.end()),mcaps.end()); - member["capabilities"] = mcaps; - } - } - } catch ( ... ) { - responseBody = "{ \"message\": \"exception while processing parameters in JSON body\" }"; - responseContentType = "application/json"; - return 400; - } - - member["id"] = addrs; - member["address"] = addrs; // legacy - member["nwid"] = nwids; - - DB::cleanMember(member); - _db.save(member,true); - responseBody = OSUtils::jsonDump(member); - responseContentType = "application/json"; - - return 200; - } // else 404 - - } else { - // POST to network ID - - // Magic ID ending with ______ picks a random unused network ID - if (path[1].substr(10) == "______") { - nwid = 0; - uint64_t nwidPrefix = (Utils::hexStrToU64(path[1].substr(0,10).c_str()) << 24) & 0xffffffffff000000ULL; - uint64_t nwidPostfix = 0; - for(unsigned long k=0;k<100000;++k) { // sanity limit on trials - Utils::getSecureRandom(&nwidPostfix,sizeof(nwidPostfix)); - uint64_t tryNwid = nwidPrefix | (nwidPostfix & 0xffffffULL); - if ((tryNwid & 0xffffffULL) == 0ULL) tryNwid |= 1ULL; - if (!_db.hasNetwork(tryNwid)) { - nwid = tryNwid; - break; - } - } - if (!nwid) - return 503; - } - OSUtils::ztsnprintf(nwids,sizeof(nwids),"%.16llx",(unsigned long long)nwid); - - json network; - _db.get(nwid,network); - DB::initNetwork(network); - - try { - if (b.count("name")) network["name"] = OSUtils::jsonString(b["name"],""); - if (b.count("private")) network["private"] = OSUtils::jsonBool(b["private"],true); - if (b.count("enableBroadcast")) network["enableBroadcast"] = OSUtils::jsonBool(b["enableBroadcast"],false); - if (b.count("multicastLimit")) network["multicastLimit"] = OSUtils::jsonInt(b["multicastLimit"],32ULL); - if (b.count("mtu")) network["mtu"] = std::max(std::min((unsigned int)OSUtils::jsonInt(b["mtu"],ZT_DEFAULT_MTU),(unsigned int)ZT_MAX_MTU),(unsigned int)ZT_MIN_MTU); - - if (b.count("remoteTraceTarget")) { - const std::string rtt(OSUtils::jsonString(b["remoteTraceTarget"],"")); - if (rtt.length() == 10) { - network["remoteTraceTarget"] = rtt; - } else { - network["remoteTraceTarget"] = json(); - } - } - if (b.count("remoteTraceLevel")) network["remoteTraceLevel"] = OSUtils::jsonInt(b["remoteTraceLevel"],0ULL); - - if (b.count("v4AssignMode")) { - json nv4m; - json &v4m = b["v4AssignMode"]; - if (v4m.is_string()) { // backward compatibility - nv4m["zt"] = (OSUtils::jsonString(v4m,"") == "zt"); - } else if (v4m.is_object()) { - nv4m["zt"] = OSUtils::jsonBool(v4m["zt"],false); - } else nv4m["zt"] = false; - network["v4AssignMode"] = nv4m; - } - - if (b.count("v6AssignMode")) { - json nv6m; - json &v6m = b["v6AssignMode"]; - if (!nv6m.is_object()) nv6m = json::object(); - if (v6m.is_string()) { // backward compatibility - std::vector v6ms(OSUtils::split(OSUtils::jsonString(v6m,"").c_str(),",","","")); - std::sort(v6ms.begin(),v6ms.end()); - v6ms.erase(std::unique(v6ms.begin(),v6ms.end()),v6ms.end()); - nv6m["rfc4193"] = false; - nv6m["zt"] = false; - nv6m["6plane"] = false; - for(std::vector::iterator i(v6ms.begin());i!=v6ms.end();++i) { - if (*i == "rfc4193") - nv6m["rfc4193"] = true; - else if (*i == "zt") - nv6m["zt"] = true; - else if (*i == "6plane") - nv6m["6plane"] = true; - } - } else if (v6m.is_object()) { - if (v6m.count("rfc4193")) nv6m["rfc4193"] = OSUtils::jsonBool(v6m["rfc4193"],false); - if (v6m.count("zt")) nv6m["zt"] = OSUtils::jsonBool(v6m["zt"],false); - if (v6m.count("6plane")) nv6m["6plane"] = OSUtils::jsonBool(v6m["6plane"],false); - } else { - nv6m["rfc4193"] = false; - nv6m["zt"] = false; - nv6m["6plane"] = false; - } - network["v6AssignMode"] = nv6m; - } - - if (b.count("routes")) { - json &rts = b["routes"]; - if (rts.is_array()) { - json nrts = json::array(); - for(unsigned long i=0;i().c_str()); - InetAddress v; - if (via.is_string()) v.fromString(via.get().c_str()); - if ( ((t.ss_family == AF_INET)||(t.ss_family == AF_INET6)) && (t.netmaskBitsValid()) ) { - json tmp; - char tmp2[64]; - tmp["target"] = t.toString(tmp2); - if (v.ss_family == t.ss_family) - tmp["via"] = v.toIpString(tmp2); - else tmp["via"] = json(); - nrts.push_back(tmp); - if (nrts.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) - break; - } - } - } - } - network["routes"] = nrts; - } - } - - if (b.count("ipAssignmentPools")) { - json &ipp = b["ipAssignmentPools"]; - if (ipp.is_array()) { - json nipp = json::array(); - for(unsigned long i=0;i= ZT_CONTROLLER_MAX_ARRAY_SIZE) - break; - } - } - } - network["ipAssignmentPools"] = nipp; - } - } - - if (b.count("rules")) { - json &rules = b["rules"]; - if (rules.is_array()) { - json nrules = json::array(); - for(unsigned long i=0;i= ZT_CONTROLLER_MAX_ARRAY_SIZE) - break; - } - } - } - network["rules"] = nrules; - } - } - - if (b.count("authTokens")) { - json &authTokens = b["authTokens"]; - if (authTokens.is_object()) { - json nat; - for(json::iterator t(authTokens.begin());t!=authTokens.end();++t) { - if ((t.value().is_number())&&(t.value() >= 0)) - nat[t.key()] = t.value(); - } - network["authTokens"] = nat; - } else { - network["authTokens"] = {{}}; - } - } - - if (b.count("capabilities")) { - json &capabilities = b["capabilities"]; - if (capabilities.is_array()) { - std::map< uint64_t,json > ncaps; - for(unsigned long i=0;i= ZT_CONTROLLER_MAX_ARRAY_SIZE) - break; - } - } - } - } - ncap["rules"] = nrules; - - ncaps[capId] = ncap; - } - } - - json ncapsa = json::array(); - for(std::map< uint64_t,json >::iterator c(ncaps.begin());c!=ncaps.end();++c) { - ncapsa.push_back(c->second); - if (ncapsa.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) - break; - } - network["capabilities"] = ncapsa; - } - } - - if (b.count("tags")) { - json &tags = b["tags"]; - if (tags.is_array()) { - std::map< uint64_t,json > ntags; - for(unsigned long i=0;i::iterator t(ntags.begin());t!=ntags.end();++t) { - ntagsa.push_back(t->second); - if (ntagsa.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) - break; - } - network["tags"] = ntagsa; - } - } - - if (b.count("dns")) { - json &dns = b["dns"]; - if (dns.is_object()) { - json nd; - - nd["domain"] = dns["domain"]; - - json &srv = dns["servers"]; - if (srv.is_array()) { - json ns = json::array(); - for(unsigned int i=0;i &path, - const std::map &urlArgs, - const std::map &headers, - const std::string &body, - std::string &responseBody, - std::string &responseContentType) -{ - if (path.empty()) - return 404; - - if (path[0] == "network") { - if ((path.size() >= 2)&&(path[1].length() == 16)) { - const uint64_t nwid = Utils::hexStrToU64(path[1].c_str()); - if (path.size() >= 3) { - if ((path.size() == 4)&&(path[2] == "member")&&(path[3].length() == 10)) { - const uint64_t address = Utils::hexStrToU64(path[3].c_str()); - - json network,member; - _db.get(nwid,network,address,member); - _db.eraseMember(nwid, address); - - { - std::lock_guard l(_memberStatus_l); - _memberStatus.erase(_MemberStatusKey(nwid,address)); - } - - if (!member.size()) - return 404; - responseBody = OSUtils::jsonDump(member); - responseContentType = "application/json"; - return 200; - } - } else { - json network; - _db.get(nwid,network); - _db.eraseNetwork(nwid); - - { - std::lock_guard l(_memberStatus_l); - for(auto i=_memberStatus.begin();i!=_memberStatus.end();) { - if (i->first.networkId == nwid) - _memberStatus.erase(i++); - else ++i; - } - } - - if (!network.size()) - return 404; - responseBody = OSUtils::jsonDump(network); - responseContentType = "application/json"; - return 200; + for (std::set::const_iterator nwid(networkIds.begin()); nwid != networkIds.end(); ++nwid) { + json network; + if (! _db.get(*nwid, network)) { + continue; } - } // else 404 - } // else 404 + std::vector memTmp; + if (_db.get(*nwid, network, memTmp)) { + if (! network.is_null()) { + uint64_t authorizedCount = 0; + uint64_t totalCount = memTmp.size(); + networkCount++; - return 404; + for (auto m = memTmp.begin(); m != memTmp.end(); ++m) { + bool a = OSUtils::jsonBool((*m)["authorized"], 0); + if (a) { + authorizedCount++; + } + } + + auto nwMeta = json::object(); + nwMeta["totalMemberCount"] = totalCount; + nwMeta["authorizedMemberCount"] = authorizedCount; + network["meta"] = nwMeta; + + data.push_back(network); + } + } + } + meta["networkCount"] = networkCount; + + auto out = json::object(); + out["data"] = data; + out["meta"] = meta; + + setContent(req, res, out.dump()); + }; + s.Get(networkListPath2, networkListGet2); + sv6.Get(networkListPath2, networkListGet2); + + auto networkGet = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::networkGet"); + auto scope = tracer->WithActiveSpan(span); + + auto networkID = req.matches[1]; + uint64_t nwid = Utils::hexStrToU64(networkID.str().c_str()); + json network; + if (! _db.get(nwid, network)) { + res.status = 404; + return; + } + + setContent(req, res, network.dump()); + }; + s.Get(networkPath, networkGet); + sv6.Get(networkPath, networkGet); + + auto createNewNetwork = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::createNewNetwork"); + auto scope = tracer->WithActiveSpan(span); + + // fprintf(stderr, "creating new network (new style)\n"); + uint64_t nwid = 0; + uint64_t nwidPrefix = (Utils::hexStrToU64(_signingIdAddressString.c_str()) << 24) & 0xffffffffff000000ULL; + uint64_t nwidPostfix = 0; + for (unsigned long k = 0; k < 100000; ++k) { // sanity limit on trials + Utils::getSecureRandom(&nwidPostfix, sizeof(nwidPostfix)); + uint64_t tryNwid = nwidPrefix | (nwidPostfix & 0xffffffULL); + if ((tryNwid & 0xffffffULL) == 0ULL) + tryNwid |= 1ULL; + if (! _db.hasNetwork(tryNwid)) { + nwid = tryNwid; + break; + } + } + if (! nwid) { + res.status = 503; + return; + } + + setContent(req, res, networkUpdateFromPostData(nwid, req.body)); + }; + s.Put(networkListPath, createNewNetwork); + s.Post(networkListPath, createNewNetwork); + sv6.Put(networkListPath, createNewNetwork); + sv6.Post(networkListPath, createNewNetwork); + + auto createNewNetworkOldAndBusted = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::createNewNetworkOldAndBusted"); + auto scope = tracer->WithActiveSpan(span); + + auto inID = req.matches[1].str(); + + if (inID != _signingIdAddressString) { + res.status = 400; + return; + } + + uint64_t nwid = 0; + uint64_t nwidPrefix = (Utils::hexStrToU64(inID.c_str()) << 24) & 0xffffffffff000000ULL; + uint64_t nwidPostfix = 0; + for (unsigned long k = 0; k < 100000; ++k) { // sanity limit on trials + Utils::getSecureRandom(&nwidPostfix, sizeof(nwidPostfix)); + uint64_t tryNwid = nwidPrefix | (nwidPostfix & 0xffffffULL); + if ((tryNwid & 0xffffffULL) == 0ULL) + tryNwid |= 1ULL; + if (! _db.hasNetwork(tryNwid)) { + nwid = tryNwid; + break; + } + } + if (! nwid) { + res.status = 503; + return; + } + setContent(req, res, networkUpdateFromPostData(nwid, req.body)); + }; + s.Put(oldAndBustedNetworkCreatePath, createNewNetworkOldAndBusted); + s.Post(oldAndBustedNetworkCreatePath, createNewNetworkOldAndBusted); + sv6.Put(oldAndBustedNetworkCreatePath, createNewNetworkOldAndBusted); + sv6.Post(oldAndBustedNetworkCreatePath, createNewNetworkOldAndBusted); + + auto networkPost = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::networkPost"); + auto scope = tracer->WithActiveSpan(span); + + auto networkID = req.matches[1].str(); + uint64_t nwid = Utils::hexStrToU64(networkID.c_str()); + + res.status = 200; + setContent(req, res, networkUpdateFromPostData(nwid, req.body)); + }; + s.Put(networkPath, networkPost); + s.Post(networkPath, networkPost); + sv6.Put(networkPath, networkPost); + sv6.Post(networkPath, networkPost); + + auto networkDelete = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::networkDelete"); + auto scope = tracer->WithActiveSpan(span); + + auto networkID = req.matches[1].str(); + uint64_t nwid = Utils::hexStrToU64(networkID.c_str()); + + json network; + if (! _db.get(nwid, network)) { + res.status = 404; + return; + } + + _db.eraseNetwork(nwid); + setContent(req, res, network.dump()); + }; + s.Delete(networkPath, networkDelete); + sv6.Delete(networkPath, networkDelete); + + auto memberListGet = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::memberListGet"); + auto scope = tracer->WithActiveSpan(span); + + auto networkID = req.matches[1]; + uint64_t nwid = Utils::hexStrToU64(networkID.str().c_str()); + json network; + if (! _db.get(nwid, network)) { + res.status = 404; + return; + } + + json out = json::object(); + std::vector memTmp; + if (_db.get(nwid, network, memTmp)) { + for (auto m = memTmp.begin(); m != memTmp.end(); ++m) { + int revision = OSUtils::jsonInt((*m)["revision"], 0); + std::string id = OSUtils::jsonString((*m)["id"], ""); + if (id.length() == 10) { + out[id] = revision; + } + } + } + + setContent(req, res, out.dump()); + }; + s.Get(memberListPath, memberListGet); + sv6.Get(memberListPath, memberListGet); + + auto memberListGet2 = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::memberListGet2"); + auto scope = tracer->WithActiveSpan(span); + + auto networkID = req.matches[1]; + uint64_t nwid = Utils::hexStrToU64(networkID.str().c_str()); + json network; + if (! _db.get(nwid, network)) { + res.status = 404; + return; + } + + auto out = nlohmann::json::object(); + auto meta = nlohmann::json::object(); + std::vector memTmp; + if (_db.get(nwid, network, memTmp)) { + uint64_t authorizedCount = 0; + uint64_t totalCount = memTmp.size(); + for (auto m = memTmp.begin(); m != memTmp.end(); ++m) { + bool a = OSUtils::jsonBool((*m)["authorized"], 0); + if (a) { + authorizedCount++; + } + } + + meta["totalCount"] = totalCount; + meta["authorizedCount"] = authorizedCount; + + out["data"] = memTmp; + out["meta"] = meta; + + setContent(req, res, out.dump()); + } + else { + res.status = 404; + return; + } + }; + s.Get(memberListPath2, memberListGet2); + sv6.Get(memberListPath2, memberListGet2); + + auto memberGet = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::memberGet"); + auto scope = tracer->WithActiveSpan(span); + + auto networkID = req.matches[1]; + auto memberID = req.matches[2]; + uint64_t nwid = Utils::hexStrToU64(networkID.str().c_str()); + uint64_t memid = Utils::hexStrToU64(memberID.str().c_str()); + json network; + json member; + if (! _db.get(nwid, network, memid, member)) { + res.status = 404; + return; + } + + setContent(req, res, member.dump()); + }; + s.Get(memberPath, memberGet); + sv6.Get(memberPath, memberGet); + + auto memberPost = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::memberPost"); + auto scope = tracer->WithActiveSpan(span); + + auto networkID = req.matches[1].str(); + auto memberID = req.matches[2].str(); + uint64_t nwid = Utils::hexStrToU64(networkID.c_str()); + uint64_t memid = Utils::hexStrToU64(memberID.c_str()); + + if (! _db.hasNetwork(nwid)) { + res.status = 404; + return; + } + + json network; + json member; + _db.get(nwid, network, memid, member); + DB::initMember(member); + + json b = OSUtils::jsonParse(req.body); + + if (b.count("activeBridge")) + member["activeBridge"] = OSUtils::jsonBool(b["activeBridge"], false); + if (b.count("noAutoAssignIps")) + member["noAutoAssignIps"] = OSUtils::jsonBool(b["noAutoAssignIps"], false); + if (b.count("authenticationExpiryTime")) + member["authenticationExpiryTime"] = (uint64_t)OSUtils::jsonInt(b["authenticationExpiryTime"], 0ULL); + if (b.count("authenticationURL")) + member["authenticationURL"] = OSUtils::jsonString(b["authenticationURL"], ""); + if (b.count("name")) + member["name"] = OSUtils::jsonString(b["name"], ""); + + if (b.count("remoteTraceTarget")) { + const std::string rtt(OSUtils::jsonString(b["remoteTraceTarget"], "")); + if (rtt.length() == 10) { + member["remoteTraceTarget"] = rtt; + } + else { + member["remoteTraceTarget"] = json(); + } + } + if (b.count("remoteTraceLevel")) + member["remoteTraceLevel"] = OSUtils::jsonInt(b["remoteTraceLevel"], 0ULL); + + if (b.count("authorized")) { + const bool newAuth = OSUtils::jsonBool(b["authorized"], false); + if (newAuth != OSUtils::jsonBool(member["authorized"], false)) { + member["authorized"] = newAuth; + member[((newAuth) ? "lastAuthorizedTime" : "lastDeauthorizedTime")] = OSUtils::now(); + if (newAuth) { + member["lastAuthorizedCredentialType"] = "api"; + member["lastAuthorizedCredential"] = json(); + } + } + } + + if (b.count("ipAssignments")) { + json& ipa = b["ipAssignments"]; + if (ipa.is_array()) { + json mipa(json::array()); + for (unsigned long i = 0; i < ipa.size(); ++i) { + std::string ips = ipa[i]; + InetAddress ip(ips.c_str()); + if ((ip.ss_family == AF_INET) || (ip.ss_family == AF_INET6)) { + char tmpip[64]; + mipa.push_back(ip.toIpString(tmpip)); + if (mipa.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) + break; + } + } + member["ipAssignments"] = mipa; + } + } + + if (b.count("tags")) { + json& tags = b["tags"]; + if (tags.is_array()) { + std::map mtags; + for (unsigned long i = 0; i < tags.size(); ++i) { + json& tag = tags[i]; + if ((tag.is_array()) && (tag.size() == 2)) + mtags[OSUtils::jsonInt(tag[0], 0ULL) & 0xffffffffULL] = OSUtils::jsonInt(tag[1], 0ULL) & 0xffffffffULL; + } + json mtagsa = json::array(); + for (std::map::iterator t(mtags.begin()); t != mtags.end(); ++t) { + json ta = json::array(); + ta.push_back(t->first); + ta.push_back(t->second); + mtagsa.push_back(ta); + if (mtagsa.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) + break; + } + member["tags"] = mtagsa; + } + } + + if (b.count("capabilities")) { + json& capabilities = b["capabilities"]; + if (capabilities.is_array()) { + json mcaps = json::array(); + for (unsigned long i = 0; i < capabilities.size(); ++i) { + mcaps.push_back(OSUtils::jsonInt(capabilities[i], 0ULL)); + if (mcaps.size() >= ZT_CONTROLLER_MAX_ARRAY_SIZE) + break; + } + std::sort(mcaps.begin(), mcaps.end()); + mcaps.erase(std::unique(mcaps.begin(), mcaps.end()), mcaps.end()); + member["capabilities"] = mcaps; + } + } + + member["id"] = memberID; + member["address"] = memberID; + member["nwid"] = networkID; + + DB::cleanMember(member); + _db.save(member, true); + + setContent(req, res, member.dump()); + }; + s.Put(memberPath, memberPost); + s.Post(memberPath, memberPost); + sv6.Put(memberPath, memberPost); + sv6.Post(memberPath, memberPost); + + auto memberDelete = [&, setContent](const httplib::Request& req, httplib::Response& res) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::memberDelete"); + auto scope = tracer->WithActiveSpan(span); + + auto networkID = req.matches[1].str(); + auto memberID = req.matches[2].str(); + + uint64_t nwid = Utils::hexStrToU64(networkID.c_str()); + uint64_t address = Utils::hexStrToU64(memberID.c_str()); + json network, member; + + if (! _db.get(nwid, network, address, member)) { + res.status = 404; + return; + } + + if (! member.size()) { + res.status = 404; + return; + } + + _db.eraseMember(nwid, address); + + setContent(req, res, member.dump()); + }; + s.Delete(memberPath, memberDelete); + sv6.Delete(memberPath, memberDelete); } -void EmbeddedNetworkController::handleRemoteTrace(const ZT_RemoteTrace &rt) +void EmbeddedNetworkController::handleRemoteTrace(const ZT_RemoteTrace& rt) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::handleRemoteTrace"); + auto scope = tracer->WithActiveSpan(span); + static volatile unsigned long idCounter = 0; - char id[128],tmp[128]; - std::string k,v; + char id[128], tmp[128]; + std::string k, v; try { // Convert Dictionary into JSON object json d; - char *saveptr = (char *)0; - for(char *l=Utils::stok(rt.data,"\n",&saveptr);(l);l=Utils::stok((char *)0,"\n",&saveptr)) { - char *eq = strchr(l,'='); + char* saveptr = (char*)0; + for (char* l = Utils::stok(rt.data, "\n", &saveptr); (l); l = Utils::stok((char*)0, "\n", &saveptr)) { + char* eq = strchr(l, '='); if (eq > l) { - k.assign(l,(unsigned long)(eq - l)); + k.assign(l, (unsigned long)(eq - l)); v.clear(); ++eq; while (*eq) { if (*eq == '\\') { ++eq; if (*eq) { - switch(*eq) { - case 'r': v.push_back('\r'); break; - case 'n': v.push_back('\n'); break; - case '0': v.push_back((char)0); break; - case 'e': v.push_back('='); break; - default: v.push_back(*eq); break; + switch (*eq) { + case 'r': + v.push_back('\r'); + break; + case 'n': + v.push_back('\n'); + break; + case '0': + v.push_back((char)0); + break; + case 'e': + v.push_back('='); + break; + default: + v.push_back(*eq); + break; } ++eq; } - } else { + } + else { v.push_back(*(eq++)); } } - if ((k.length() > 0)&&(v.length() > 0)) + if ((k.length() > 0) && (v.length() > 0)) d[k] = v; } } const int64_t now = OSUtils::now(); - OSUtils::ztsnprintf(id,sizeof(id),"%.10llx-%.16llx-%.10llx-%.4x",_signingId.address().toInt(),now,rt.origin,(unsigned int)(idCounter++ & 0xffff)); + OSUtils::ztsnprintf(id, sizeof(id), "%.10llx-%.16llx-%.10llx-%.4x", _signingId.address().toInt(), now, rt.origin, (unsigned int)(idCounter++ & 0xffff)); d["id"] = id; d["objtype"] = "trace"; d["ts"] = now; - d["nodeId"] = Utils::hex10(rt.origin,tmp); - _db.save(d,true); - } catch ( ... ) { + d["nodeId"] = Utils::hex10(rt.origin, tmp); + _db.save(d, true); + } + catch (...) { // drop invalid trace messages if an error occurs } } -void EmbeddedNetworkController::onNetworkUpdate(const void *db,uint64_t networkId,const nlohmann::json &network) +void EmbeddedNetworkController::onNetworkUpdate(const void* db, uint64_t networkId, const nlohmann::json& network) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::onNetworkUpdate"); + auto scope = tracer->WithActiveSpan(span); + // Send an update to all members of the network that are online const int64_t now = OSUtils::now(); std::lock_guard l(_memberStatus_l); - for(auto i=_memberStatus.begin();i!=_memberStatus.end();++i) { - if ((i->first.networkId == networkId)&&(i->second.online(now))&&(i->second.lastRequestMetaData)) - request(networkId,InetAddress(),0,i->second.identity,i->second.lastRequestMetaData); + for (auto i = _memberStatus.begin(); i != _memberStatus.end(); ++i) { + if ((i->first.networkId == networkId) && (i->second.online(now)) && (i->second.lastRequestMetaData)) + request(networkId, InetAddress(), 0, i->second.identity, i->second.lastRequestMetaData); } } -void EmbeddedNetworkController::onNetworkMemberUpdate(const void *db,uint64_t networkId,uint64_t memberId,const nlohmann::json &member) +void EmbeddedNetworkController::onNetworkMemberUpdate(const void* db, uint64_t networkId, uint64_t memberId, const nlohmann::json& member) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::onNetworkMemberUpdate"); + auto scope = tracer->WithActiveSpan(span); + // Push update to member if online try { std::lock_guard l(_memberStatus_l); - _MemberStatus &ms = _memberStatus[_MemberStatusKey(networkId,memberId)]; - if ((ms.online(OSUtils::now()))&&(ms.lastRequestMetaData)) - request(networkId,InetAddress(),0,ms.identity,ms.lastRequestMetaData); - } catch ( ... ) {} + _MemberStatus& ms = _memberStatus[_MemberStatusKey(networkId, memberId)]; + if ((ms.online(OSUtils::now())) && (ms.lastRequestMetaData)) + request(networkId, InetAddress(), 0, ms.identity, ms.lastRequestMetaData); + } + catch (...) { + } } -void EmbeddedNetworkController::onNetworkMemberDeauthorize(const void *db,uint64_t networkId,uint64_t memberId) +void EmbeddedNetworkController::onNetworkMemberDeauthorize(const void* db, uint64_t networkId, uint64_t memberId) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::onNetworkMemberDeauthorize"); + auto scope = tracer->WithActiveSpan(span); + const int64_t now = OSUtils::now(); - Revocation rev((uint32_t)_node->prng(),networkId,0,now,ZT_REVOCATION_FLAG_FAST_PROPAGATE,Address(memberId),Revocation::CREDENTIAL_TYPE_COM); + Revocation rev((uint32_t)_node->prng(), networkId, 0, now, ZT_REVOCATION_FLAG_FAST_PROPAGATE, Address(memberId), Revocation::CREDENTIAL_TYPE_COM); rev.sign(_signingId); { std::lock_guard l(_memberStatus_l); - for(auto i=_memberStatus.begin();i!=_memberStatus.end();++i) { - if ((i->first.networkId == networkId)&&(i->second.online(now))) - _node->ncSendRevocation(Address(i->first.nodeId),rev); + for (auto i = _memberStatus.begin(); i != _memberStatus.end(); ++i) { + if ((i->first.networkId == networkId) && (i->second.online(now))) + _node->ncSendRevocation(Address(i->first.nodeId), rev); } } } -void EmbeddedNetworkController::_request( - uint64_t nwid, - const InetAddress &fromAddr, - uint64_t requestPacketId, - const Identity &identity, - const Dictionary &metaData) +void EmbeddedNetworkController::_request(uint64_t nwid, const InetAddress& fromAddr, uint64_t requestPacketId, const Identity& identity, const Dictionary& metaData) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_controller"); + auto span = tracer->StartSpan("embedded_controller::_request"); + auto scope = tracer->WithActiveSpan(span); + + Metrics::network_config_request++; + auto tid = std::this_thread::get_id(); + std::stringstream ss; + ss << tid; + std::string threadID = ss.str(); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + auto b1 = _member_status_lookup.Add({ { "thread", threadID } }); + auto c1 = _member_status_lookup_count.Add({ { "thread", threadID } }); + c1++; + b1.start(); +#endif + char nwids[24]; DB::NetworkSummaryInfo ns; - json network,member; + json network, member; - if (((!_signingId)||(!_signingId.hasPrivate()))||(_signingId.address().toInt() != (nwid >> 24))||(!_sender)) + if (((! _signingId) || (! _signingId.hasPrivate())) || (_signingId.address().toInt() != (nwid >> 24)) || (! _sender)) { return; + } const int64_t now = OSUtils::now(); - if (requestPacketId) { - std::lock_guard l(_memberStatus_l); - _MemberStatus &ms = _memberStatus[_MemberStatusKey(nwid,identity.address().toInt())]; - if ((now - ms.lastRequestTime) <= ZT_NETCONF_MIN_REQUEST_PERIOD) - return; - ms.lastRequestTime = now; - } +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b1.stop(); + auto b2 = _node_is_online.Add({ { "thread", threadID } }); + auto c2 = _node_is_online_count.Add({ { "thread", threadID } }); + c2++; + b2.start(); +#endif + char osArch[256]; + metaData.get(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_OS_ARCH, osArch, sizeof(osArch)); + // fprintf(stderr, "Network Config Request: nwid=%.16llx, nodeid=%.10llx, osArch=%s\n", + // nwid, identity.address().toInt(), osArch); + _db.nodeIsOnline(nwid, identity.address().toInt(), fromAddr, osArch); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b2.stop(); - _db.nodeIsOnline(nwid,identity.address().toInt(),fromAddr); - - Utils::hex(nwid,nwids); - _db.get(nwid,network,identity.address().toInt(),member,ns); - if ((!network.is_object())||(network.empty())) { - _sender->ncSendError(nwid,requestPacketId,identity.address(),NetworkController::NC_ERROR_OBJECT_NOT_FOUND, nullptr, 0); + auto b3 = _get_and_init_member.Add({ { "thread", threadID } }); + auto c3 = _get_and_init_member_count.Add({ { "thread", threadID } }); + c3++; + b3.start(); +#endif + Utils::hex(nwid, nwids); + _db.get(nwid, network, identity.address().toInt(), member, ns); + if ((! network.is_object()) || (network.empty())) { + _sender->ncSendError(nwid, requestPacketId, identity.address(), NetworkController::NC_ERROR_OBJECT_NOT_FOUND, nullptr, 0); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b3.stop(); +#endif return; } - const bool newMember = ((!member.is_object())||(member.empty())); + const bool newMember = ((! member.is_object()) || (member.empty())); DB::initMember(member); - _MemberStatusKey msk(nwid,identity.address().toInt()); + _MemberStatusKey msk(nwid, identity.address().toInt()); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b3.stop(); +#endif { - const std::string haveIdStr(OSUtils::jsonString(member["identity"],"")); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + auto b4 = _have_identity.Add({ { "thread", threadID } }); + auto c4 = _have_identity_count.Add({ { "thread", threadID } }); + c4++; + b4.start(); +#endif + const std::string haveIdStr(OSUtils::jsonString(member["identity"], "")); if (haveIdStr.length() > 0) { // If we already know this member's identity perform a full compare. This prevents // a "collision" from being able to auth onto our network in place of an already // known member. try { if (Identity(haveIdStr.c_str()) != identity) { - _sender->ncSendError(nwid,requestPacketId,identity.address(),NetworkController::NC_ERROR_ACCESS_DENIED, nullptr, 0); + _sender->ncSendError(nwid, requestPacketId, identity.address(), NetworkController::NC_ERROR_ACCESS_DENIED, nullptr, 0); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b4.stop(); +#endif return; } - } catch ( ... ) { - _sender->ncSendError(nwid,requestPacketId,identity.address(),NetworkController::NC_ERROR_ACCESS_DENIED, nullptr, 0); + } + catch (...) { + _sender->ncSendError(nwid, requestPacketId, identity.address(), NetworkController::NC_ERROR_ACCESS_DENIED, nullptr, 0); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b4.stop(); +#endif return; } - } else { + } + else { // If we do not yet know this member's identity, learn it. char idtmp[1024]; - member["identity"] = identity.toString(false,idtmp); + member["identity"] = identity.toString(false, idtmp); } +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b4.stop(); +#endif } // These are always the same, but make sure they are set @@ -1296,25 +1697,33 @@ void EmbeddedNetworkController::_request( } // Determine whether and how member is authorized +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + auto b5 = _determine_auth.Add({ { "thread", threadID } }); + auto c5 = _determine_auth_count.Add({ { "thread", threadID } }); + c5++; + b5.start(); +#endif bool authorized = false; bool autoAuthorized = false; - json autoAuthCredentialType,autoAuthCredential; - if (OSUtils::jsonBool(member["authorized"],false)) { + json autoAuthCredentialType, autoAuthCredential; + if (OSUtils::jsonBool(member["authorized"], false)) { authorized = true; - } else if (!OSUtils::jsonBool(network["private"],true)) { + } + else if (! OSUtils::jsonBool(network["private"], true)) { authorized = true; autoAuthorized = true; autoAuthCredentialType = "public"; - } else { + } + else { char presentedAuth[512]; - if (metaData.get(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_AUTH,presentedAuth,sizeof(presentedAuth)) > 0) { - presentedAuth[511] = (char)0; // sanity check - if ((strlen(presentedAuth) > 6)&&(!strncmp(presentedAuth,"token:",6))) { - const char *const presentedToken = presentedAuth + 6; + if (metaData.get(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_AUTH, presentedAuth, sizeof(presentedAuth)) > 0) { + presentedAuth[511] = (char)0; // sanity check + if ((strlen(presentedAuth) > 6) && (! strncmp(presentedAuth, "token:", 6))) { + const char* const presentedToken = presentedAuth + 6; json authTokens(network["authTokens"]); - json &tokenExpires = authTokens[presentedToken]; + json& tokenExpires = authTokens[presentedToken]; if (tokenExpires.is_number()) { - if ((tokenExpires == 0)||(tokenExpires > now)) { + if ((tokenExpires == 0) || (tokenExpires > now)) { authorized = true; autoAuthorized = true; autoAuthCredentialType = "token"; @@ -1326,21 +1735,30 @@ void EmbeddedNetworkController::_request( } // If we auto-authorized, update member record - if ((autoAuthorized)&&(authorized)) { + if ((autoAuthorized) && (authorized)) { member["authorized"] = true; member["lastAuthorizedTime"] = now; member["lastAuthorizedCredentialType"] = autoAuthCredentialType; member["lastAuthorizedCredential"] = autoAuthCredential; } +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b5.stop(); +#endif // Should we check SSO Stuff? // If network is configured with SSO, and the member is not marked exempt: yes // Otherwise no, we use standard auth logic. +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + auto b6 = _sso_check.Add({ { "thread", threadID } }); + auto c6 = _sso_check_count.Add({ { "thread", threadID } }); + c6++; + b6.start(); +#endif AuthInfo info; int64_t authenticationExpiryTime = -1; bool networkSSOEnabled = OSUtils::jsonBool(network["ssoEnabled"], false); bool memberSSOExempt = OSUtils::jsonBool(member["ssoExempt"], false); - if (networkSSOEnabled && !memberSSOExempt) { + if (networkSSOEnabled && ! memberSSOExempt) { authenticationExpiryTime = (int64_t)OSUtils::jsonInt(member["authenticationExpiryTime"], 0); info = _db.getSSOAuthInfo(member, _ssoRedirectURL); assert(info.enabled == networkSSOEnabled); @@ -1349,8 +1767,9 @@ void EmbeddedNetworkController::_request( Dictionary<4096> authInfo; authInfo.add(ZT_AUTHINFO_DICT_KEY_VERSION, (uint64_t)0ULL); authInfo.add(ZT_AUTHINFO_DICT_KEY_AUTHENTICATION_URL, info.authenticationURL.c_str()); - _sender->ncSendError(nwid,requestPacketId,identity.address(),NetworkController::NC_ERROR_AUTHENTICATION_REQUIRED, authInfo.data(), authInfo.sizeBytes()); - } else if (info.version == 1) { + _sender->ncSendError(nwid, requestPacketId, identity.address(), NetworkController::NC_ERROR_AUTHENTICATION_REQUIRED, authInfo.data(), authInfo.sizeBytes()); + } + else if (info.version == 1) { Dictionary<8192> authInfo; authInfo.add(ZT_AUTHINFO_DICT_KEY_VERSION, info.version); authInfo.add(ZT_AUTHINFO_DICT_KEY_ISSUER_URL, info.issuerURL.c_str()); @@ -1358,21 +1777,32 @@ void EmbeddedNetworkController::_request( authInfo.add(ZT_AUTHINFO_DICT_KEY_NONCE, info.ssoNonce.c_str()); authInfo.add(ZT_AUTHINFO_DICT_KEY_STATE, info.ssoState.c_str()); authInfo.add(ZT_AUTHINFO_DICT_KEY_CLIENT_ID, info.ssoClientID.c_str()); - _sender->ncSendError(nwid,requestPacketId,identity.address(),NetworkController::NC_ERROR_AUTHENTICATION_REQUIRED, authInfo.data(), authInfo.sizeBytes()); + authInfo.add(ZT_AUTHINFO_DICT_KEY_SSO_PROVIDER, info.ssoProvider.c_str()); + _sender->ncSendError(nwid, requestPacketId, identity.address(), NetworkController::NC_ERROR_AUTHENTICATION_REQUIRED, authInfo.data(), authInfo.sizeBytes()); } DB::cleanMember(member); - _db.save(member,true); + _db.save(member, true); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b6.stop(); +#endif return; } } +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b6.stop(); + auto b7 = _auth_check.Add({ { "thread", threadID } }); + auto c7 = _auth_check_count.Add({ { "thread", threadID } }); + c7++; + b7.start(); +#endif if (authorized) { // Update version info and meta-data if authorized and if this is a genuine request if (requestPacketId) { - const uint64_t vMajor = metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_MAJOR_VERSION,0); - const uint64_t vMinor = metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_MINOR_VERSION,0); - const uint64_t vRev = metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_REVISION,0); - const uint64_t vProto = metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_PROTOCOL_VERSION,0); + const uint64_t vMajor = metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_MAJOR_VERSION, 0); + const uint64_t vMinor = metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_MINOR_VERSION, 0); + const uint64_t vRev = metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_NODE_REVISION, 0); + const uint64_t vProto = metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_PROTOCOL_VERSION, 0); member["vMajor"] = vMajor; member["vMinor"] = vMinor; @@ -1381,7 +1811,7 @@ void EmbeddedNetworkController::_request( { std::lock_guard l(_memberStatus_l); - _MemberStatus &ms = _memberStatus[msk]; + _MemberStatus& ms = _memberStatus[msk]; ms.authenticationExpiryTime = authenticationExpiryTime; ms.vMajor = (int)vMajor; ms.vMinor = (int)vMinor; @@ -1396,13 +1826,20 @@ void EmbeddedNetworkController::_request( _expiringSoon.insert(std::pair(authenticationExpiryTime, msk)); } } - } else { + } + else { // If they are not authorized, STOP! DB::cleanMember(member); - _db.save(member,true); - _sender->ncSendError(nwid,requestPacketId,identity.address(),NetworkController::NC_ERROR_ACCESS_DENIED, nullptr, 0); + _db.save(member, true); + _sender->ncSendError(nwid, requestPacketId, identity.address(), NetworkController::NC_ERROR_ACCESS_DENIED, nullptr, 0); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b7.stop(); +#endif return; } +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b7.stop(); +#endif // ------------------------------------------------------------------------- // If we made it this far, they are authorized (and authenticated). @@ -1410,6 +1847,12 @@ void EmbeddedNetworkController::_request( // Default timeout: 15 minutes. Maximum: two hours. Can be specified by an optional field in the network config // if something longer than 15 minutes is desired. Minimum is 5 minutes since shorter than that would be flaky. +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + auto b8 = _json_schlep.Add({ { "thread", threadID } }); + auto c8 = _json_schlep_count.Add({ { "thread", threadID } }); + c8++; + b8.start(); +#endif int64_t credentialtmd = ZT_NETWORKCONFIG_DEFAULT_CREDENTIAL_TIME_DFL_MAX_DELTA; if (network.contains("certificateTimeoutWindowSize")) { credentialtmd = (int64_t)network["certificateTimeoutWindowSize"]; @@ -1419,136 +1862,143 @@ void EmbeddedNetworkController::_request( std::unique_ptr nc(new NetworkConfig()); nc->networkId = nwid; - nc->type = OSUtils::jsonBool(network["private"],true) ? ZT_NETWORK_TYPE_PRIVATE : ZT_NETWORK_TYPE_PUBLIC; + nc->type = OSUtils::jsonBool(network["private"], true) ? ZT_NETWORK_TYPE_PRIVATE : ZT_NETWORK_TYPE_PUBLIC; nc->timestamp = now; nc->credentialTimeMaxDelta = credentialtmd; - nc->revision = OSUtils::jsonInt(network["revision"],0ULL); + nc->revision = OSUtils::jsonInt(network["revision"], 0ULL); nc->issuedTo = identity.address(); - if (OSUtils::jsonBool(network["enableBroadcast"],true)) nc->flags |= ZT_NETWORKCONFIG_FLAG_ENABLE_BROADCAST; - Utils::scopy(nc->name,sizeof(nc->name),OSUtils::jsonString(network["name"],"").c_str()); - nc->mtu = std::max(std::min((unsigned int)OSUtils::jsonInt(network["mtu"],ZT_DEFAULT_MTU),(unsigned int)ZT_MAX_MTU),(unsigned int)ZT_MIN_MTU); - nc->multicastLimit = (unsigned int)OSUtils::jsonInt(network["multicastLimit"],32ULL); + if (OSUtils::jsonBool(network["enableBroadcast"], true)) + nc->flags |= ZT_NETWORKCONFIG_FLAG_ENABLE_BROADCAST; + Utils::scopy(nc->name, sizeof(nc->name), OSUtils::jsonString(network["name"], "").c_str()); + nc->mtu = std::max(std::min((unsigned int)OSUtils::jsonInt(network["mtu"], ZT_DEFAULT_MTU), (unsigned int)ZT_MAX_MTU), (unsigned int)ZT_MIN_MTU); + nc->multicastLimit = (unsigned int)OSUtils::jsonInt(network["multicastLimit"], 32ULL); - nc->ssoEnabled = networkSSOEnabled; //OSUtils::jsonBool(network["ssoEnabled"], false); + nc->ssoEnabled = networkSSOEnabled; // OSUtils::jsonBool(network["ssoEnabled"], false); nc->ssoVersion = info.version; if (info.version == 0) { nc->authenticationExpiryTime = OSUtils::jsonInt(member["authenticationExpiryTime"], 0LL); - if (!info.authenticationURL.empty()) { + if (! info.authenticationURL.empty()) { Utils::scopy(nc->authenticationURL, sizeof(nc->authenticationURL), info.authenticationURL.c_str()); } } else if (info.version == 1) { nc->authenticationExpiryTime = OSUtils::jsonInt(member["authenticationExpiryTime"], 0LL); - if (!info.authenticationURL.empty()) { + if (! info.authenticationURL.empty()) { Utils::scopy(nc->authenticationURL, sizeof(nc->authenticationURL), info.authenticationURL.c_str()); } - if (!info.centralAuthURL.empty()) { + if (! info.centralAuthURL.empty()) { Utils::scopy(nc->centralAuthURL, sizeof(nc->centralAuthURL), info.centralAuthURL.c_str()); } - if (!info.issuerURL.empty()) { + if (! info.issuerURL.empty()) { +#ifdef ZT_DEBUG fprintf(stderr, "copying issuerURL to nc: %s\n", info.issuerURL.c_str()); +#endif Utils::scopy(nc->issuerURL, sizeof(nc->issuerURL), info.issuerURL.c_str()); } - if (!info.ssoNonce.empty()) { + if (! info.ssoNonce.empty()) { Utils::scopy(nc->ssoNonce, sizeof(nc->ssoNonce), info.ssoNonce.c_str()); } - if (!info.ssoState.empty()) { + if (! info.ssoState.empty()) { Utils::scopy(nc->ssoState, sizeof(nc->ssoState), info.ssoState.c_str()); } - if (!info.ssoClientID.empty()) { + if (! info.ssoClientID.empty()) { Utils::scopy(nc->ssoClientID, sizeof(nc->ssoClientID), info.ssoClientID.c_str()); } } - std::string rtt(OSUtils::jsonString(member["remoteTraceTarget"],"")); + std::string rtt(OSUtils::jsonString(member["remoteTraceTarget"], "")); if (rtt.length() == 10) { nc->remoteTraceTarget = Address(Utils::hexStrToU64(rtt.c_str())); - nc->remoteTraceLevel = (Trace::Level)OSUtils::jsonInt(member["remoteTraceLevel"],0ULL); - } else { - rtt = OSUtils::jsonString(network["remoteTraceTarget"],""); + nc->remoteTraceLevel = (Trace::Level)OSUtils::jsonInt(member["remoteTraceLevel"], 0ULL); + } + else { + rtt = OSUtils::jsonString(network["remoteTraceTarget"], ""); if (rtt.length() == 10) { nc->remoteTraceTarget = Address(Utils::hexStrToU64(rtt.c_str())); - } else { + } + else { nc->remoteTraceTarget.zero(); } - nc->remoteTraceLevel = (Trace::Level)OSUtils::jsonInt(network["remoteTraceLevel"],0ULL); + nc->remoteTraceLevel = (Trace::Level)OSUtils::jsonInt(network["remoteTraceLevel"], 0ULL); } - for(std::vector
::const_iterator ab(ns.activeBridges.begin());ab!=ns.activeBridges.end();++ab) - nc->addSpecialist(*ab,ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE); + for (std::vector
::const_iterator ab(ns.activeBridges.begin()); ab != ns.activeBridges.end(); ++ab) { + nc->addSpecialist(*ab, ZT_NETWORKCONFIG_SPECIALIST_TYPE_ACTIVE_BRIDGE); + } - json &v4AssignMode = network["v4AssignMode"]; - json &v6AssignMode = network["v6AssignMode"]; - json &ipAssignmentPools = network["ipAssignmentPools"]; - json &routes = network["routes"]; - json &rules = network["rules"]; - json &capabilities = network["capabilities"]; - json &tags = network["tags"]; - json &memberCapabilities = member["capabilities"]; - json &memberTags = member["tags"]; - json &dns = network["dns"]; + json& v4AssignMode = network["v4AssignMode"]; + json& v6AssignMode = network["v6AssignMode"]; + json& ipAssignmentPools = network["ipAssignmentPools"]; + json& routes = network["routes"]; + json& rules = network["rules"]; + json& capabilities = network["capabilities"]; + json& tags = network["tags"]; + json& memberCapabilities = member["capabilities"]; + json& memberTags = member["tags"]; + json& dns = network["dns"]; - //fprintf(stderr, "IP Assignment Pools for Network %s: %s\n", nwids, OSUtils::jsonDump(ipAssignmentPools, 2).c_str()); + // fprintf(stderr, "IP Assignment Pools for Network %s: %s\n", nwids, OSUtils::jsonDump(ipAssignmentPools, 2).c_str()); - if (metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_RULES_ENGINE_REV,0) <= 0) { + if (metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_RULES_ENGINE_REV, 0) <= 0) { // Old versions with no rules engine support get an allow everything rule. // Since rules are enforced bidirectionally, newer versions *will* still // enforce rules on the inbound side. nc->ruleCount = 1; nc->rules[0].t = ZT_NETWORK_RULE_ACTION_ACCEPT; - } else { + } + else { if (rules.is_array()) { - for(unsigned long i=0;iruleCount >= ZT_MAX_NETWORK_RULES) break; - if (_parseRule(rules[i],nc->rules[nc->ruleCount])) + if (_parseRule(rules[i], nc->rules[nc->ruleCount])) ++nc->ruleCount; } } - std::map< uint64_t,json * > capsById; - if (!memberCapabilities.is_array()) + std::map capsById; + if (! memberCapabilities.is_array()) memberCapabilities = json::array(); if (capabilities.is_array()) { - for(unsigned long i=0;i::const_iterator ctmp = capsById.find(capId); + for (unsigned long i = 0; i < memberCapabilities.size(); ++i) { + const uint64_t capId = OSUtils::jsonInt(memberCapabilities[i], 0ULL) & 0xffffffffULL; + std::map::const_iterator ctmp = capsById.find(capId); if (ctmp != capsById.end()) { - json *cap = ctmp->second; - if ((cap)&&(cap->is_object())&&(!cap->empty())) { + json* cap = ctmp->second; + if ((cap) && (cap->is_object()) && (! cap->empty())) { ZT_VirtualNetworkRule capr[ZT_MAX_CAPABILITY_RULES]; unsigned int caprc = 0; - json &caprj = (*cap)["rules"]; - if ((caprj.is_array())&&(!caprj.empty())) { - for(unsigned long j=0;j= ZT_MAX_CAPABILITY_RULES) break; - if (_parseRule(caprj[j],capr[caprc])) + if (_parseRule(caprj[j], capr[caprc])) ++caprc; } } - nc->capabilities[nc->capabilityCount] = Capability((uint32_t)capId,nwid,now,1,capr,caprc); - if (nc->capabilities[nc->capabilityCount].sign(_signingId,identity.address())) + nc->capabilities[nc->capabilityCount] = Capability((uint32_t)capId, nwid, now, 1, capr, caprc); + if (nc->capabilities[nc->capabilityCount].sign(_signingId, identity.address())) ++nc->capabilityCount; if (nc->capabilityCount >= ZT_MAX_NETWORK_CAPABILITIES) break; @@ -1556,87 +2006,88 @@ void EmbeddedNetworkController::_request( } } - std::map< uint32_t,uint32_t > memberTagsById; + std::map memberTagsById; if (memberTags.is_array()) { - for(unsigned long i=0;i::const_iterator t(memberTagsById.begin());t!=memberTagsById.end();++t) { + for (std::map::const_iterator t(memberTagsById.begin()); t != memberTagsById.end(); ++t) { if (nc->tagCount >= ZT_MAX_NETWORK_TAGS) break; - nc->tags[nc->tagCount] = Tag(nwid,now,identity.address(),t->first,t->second); + nc->tags[nc->tagCount] = Tag(nwid, now, identity.address(), t->first, t->second); if (nc->tags[nc->tagCount].sign(_signingId)) ++nc->tagCount; } } if (routes.is_array()) { - for(unsigned long i=0;irouteCount >= ZT_MAX_NETWORK_ROUTES) break; - json &route = routes[i]; - json &target = route["target"]; - json &via = route["via"]; + json& route = routes[i]; + json& target = route["target"]; + json& via = route["via"]; if (target.is_string()) { const InetAddress t(target.get().c_str()); InetAddress v; - if (via.is_string()) v.fromString(via.get().c_str()); - if ((t.ss_family == AF_INET)||(t.ss_family == AF_INET6)) { - ZT_VirtualNetworkRoute *r = &(nc->routes[nc->routeCount]); - *(reinterpret_cast(&(r->target))) = t; + if (via.is_string()) + v.fromString(via.get().c_str()); + if ((t.ss_family == AF_INET) || (t.ss_family == AF_INET6)) { + ZT_VirtualNetworkRoute* r = &(nc->routes[nc->routeCount]); + *(reinterpret_cast(&(r->target))) = t; if (v.ss_family == t.ss_family) - *(reinterpret_cast(&(r->via))) = v; + *(reinterpret_cast(&(r->via))) = v; ++nc->routeCount; } } } } - const bool noAutoAssignIps = OSUtils::jsonBool(member["noAutoAssignIps"],false); + const bool noAutoAssignIps = OSUtils::jsonBool(member["noAutoAssignIps"], false); - if ((v6AssignMode.is_object())&&(!noAutoAssignIps)) { - if ((OSUtils::jsonBool(v6AssignMode["rfc4193"],false))&&(nc->staticIpCount < ZT_MAX_ZT_ASSIGNED_ADDRESSES)) { - nc->staticIps[nc->staticIpCount++] = InetAddress::makeIpv6rfc4193(nwid,identity.address().toInt()); + if ((v6AssignMode.is_object()) && (! noAutoAssignIps)) { + if ((OSUtils::jsonBool(v6AssignMode["rfc4193"], false)) && (nc->staticIpCount < ZT_MAX_ZT_ASSIGNED_ADDRESSES)) { + nc->staticIps[nc->staticIpCount++] = InetAddress::makeIpv6rfc4193(nwid, identity.address().toInt()); nc->flags |= ZT_NETWORKCONFIG_FLAG_ENABLE_IPV6_NDP_EMULATION; } - if ((OSUtils::jsonBool(v6AssignMode["6plane"],false))&&(nc->staticIpCount < ZT_MAX_ZT_ASSIGNED_ADDRESSES)) { - nc->staticIps[nc->staticIpCount++] = InetAddress::makeIpv66plane(nwid,identity.address().toInt()); + if ((OSUtils::jsonBool(v6AssignMode["6plane"], false)) && (nc->staticIpCount < ZT_MAX_ZT_ASSIGNED_ADDRESSES)) { + nc->staticIps[nc->staticIpCount++] = InetAddress::makeIpv66plane(nwid, identity.address().toInt()); nc->flags |= ZT_NETWORKCONFIG_FLAG_ENABLE_IPV6_NDP_EMULATION; } } bool haveManagedIpv4AutoAssignment = false; - bool haveManagedIpv6AutoAssignment = false; // "special" NDP-emulated address types do not count - json ipAssignments = member["ipAssignments"]; // we want to make a copy + bool haveManagedIpv6AutoAssignment = false; // "special" NDP-emulated address types do not count + json ipAssignments = member["ipAssignments"]; // we want to make a copy if (ipAssignments.is_array()) { - for(unsigned long i=0;irouteCount;++rk) { - if (reinterpret_cast(&(nc->routes[rk].target))->containsAddress(ip)) { - const int nb = (int)(reinterpret_cast(&(nc->routes[rk].target))->netmaskBits()); + for (unsigned int rk = 0; rk < nc->routeCount; ++rk) { + if (reinterpret_cast(&(nc->routes[rk].target))->containsAddress(ip)) { + const int nb = (int)(reinterpret_cast(&(nc->routes[rk].target))->netmaskBits()); if (nb > routedNetmaskBits) routedNetmaskBits = nb; } @@ -1654,20 +2105,21 @@ void EmbeddedNetworkController::_request( } } } - } else { + } + else { ipAssignments = json::array(); } - if ( (ipAssignmentPools.is_array()) && ((v6AssignMode.is_object())&&(OSUtils::jsonBool(v6AssignMode["zt"],false))) && (!haveManagedIpv6AutoAssignment) && (!noAutoAssignIps) ) { - for(unsigned long p=0;((p s[1])&&((e[1] - s[1]) >= 0xffffffffffULL)) { + for (unsigned int trialCount = 0; trialCount < 1000; ++trialCount) { + if ((trialCount == 0) && (e[1] > s[1]) && ((e[1] - s[1]) >= 0xffffffffffULL)) { // First see if we can just cram a ZeroTier ID into the higher 64 bits. If so do that. xx[0] = Utils::hton(x[0]); xx[1] = Utils::hton(x[1] + identity.address().toInt()); - } else { + } + else { // Otherwise pick random addresses -- this technically doesn't explore the whole range if the lower 64 bit range is >= 1 but that won't matter since that would be huge anyway - Utils::getSecureRandom((void *)xx,16); + Utils::getSecureRandom((void*)xx, 16); if ((e[0] > s[0])) xx[0] %= (e[0] - s[0]); - else xx[0] = 0; + else + xx[0] = 0; if ((e[1] > s[1])) xx[1] %= (e[1] - s[1]); - else xx[1] = 0; + else + xx[1] = 0; xx[0] = Utils::hton(x[0] + xx[0]); xx[1] = Utils::hton(x[1] + xx[1]); } - InetAddress ip6((const void *)xx,16,0); + InetAddress ip6((const void*)xx, 16, 0); // Check if this IP is within a local-to-Ethernet routed network int routedNetmaskBits = 0; - for(unsigned int rk=0;rkrouteCount;++rk) { - if ( (!nc->routes[rk].via.ss_family) && (nc->routes[rk].target.ss_family == AF_INET6) && (reinterpret_cast(&(nc->routes[rk].target))->containsAddress(ip6)) ) - routedNetmaskBits = reinterpret_cast(&(nc->routes[rk].target))->netmaskBits(); + for (unsigned int rk = 0; rk < nc->routeCount; ++rk) { + if ((! nc->routes[rk].via.ss_family) && (nc->routes[rk].target.ss_family == AF_INET6) && (reinterpret_cast(&(nc->routes[rk].target))->containsAddress(ip6))) + routedNetmaskBits = reinterpret_cast(&(nc->routes[rk].target))->netmaskBits(); } // If it's routed, then try to claim and assign it and if successful end loop - if ( (routedNetmaskBits > 0) && (!std::binary_search(ns.allocatedIps.begin(),ns.allocatedIps.end(),ip6)) ) { + if ((routedNetmaskBits > 0) && (! std::binary_search(ns.allocatedIps.begin(), ns.allocatedIps.end(), ip6))) { char tmpip[64]; const std::string ipStr(ip6.toIpString(tmpip)); - if (std::find(ipAssignments.begin(),ipAssignments.end(),ipStr) == ipAssignments.end()) { + if (std::find(ipAssignments.begin(), ipAssignments.end(), ipStr) == ipAssignments.end()) { ipAssignments.push_back(ipStr); member["ipAssignments"] = ipAssignments; ip6.setPort((unsigned int)routedNetmaskBits); @@ -1722,36 +2177,36 @@ void EmbeddedNetworkController::_request( } } - if ( (ipAssignmentPools.is_array()) && ((v4AssignMode.is_object())&&(OSUtils::jsonBool(v4AssignMode["zt"],false))) && (!haveManagedIpv4AutoAssignment) && (!noAutoAssignIps) ) { - for(unsigned long p=0;((p(&ipRangeStartIA)->sin_addr.s_addr)); - uint32_t ipRangeEnd = Utils::ntoh((uint32_t)(reinterpret_cast(&ipRangeEndIA)->sin_addr.s_addr)); + InetAddress ipRangeStartIA(OSUtils::jsonString(pool["ipRangeStart"], "").c_str()); + InetAddress ipRangeEndIA(OSUtils::jsonString(pool["ipRangeEnd"], "").c_str()); + if ((ipRangeStartIA.ss_family == AF_INET) && (ipRangeEndIA.ss_family == AF_INET)) { + uint32_t ipRangeStart = Utils::ntoh((uint32_t)(reinterpret_cast(&ipRangeStartIA)->sin_addr.s_addr)); + uint32_t ipRangeEnd = Utils::ntoh((uint32_t)(reinterpret_cast(&ipRangeEndIA)->sin_addr.s_addr)); - if ((ipRangeEnd < ipRangeStart)||(ipRangeStart == 0)) + if ((ipRangeEnd < ipRangeStart) || (ipRangeStart == 0)) continue; uint32_t ipRangeLen = ipRangeEnd - ipRangeStart; // Start with the LSB of the member's address uint32_t ipTrialCounter = (uint32_t)(identity.address().toInt() & 0xffffffff); - for(uint32_t k=ipRangeStart,trialCount=0;((k<=ipRangeEnd)&&(trialCount < 1000));++k,++trialCount) { + for (uint32_t k = ipRangeStart, trialCount = 0; ((k <= ipRangeEnd) && (trialCount < 1000)); ++k, ++trialCount) { uint32_t ip = (ipRangeLen > 0) ? (ipRangeStart + (ipTrialCounter % ipRangeLen)) : ipRangeStart; ++ipTrialCounter; if ((ip & 0x000000ff) == 0x000000ff) { - continue; // don't allow addresses that end in .255 + continue; // don't allow addresses that end in .255 } // Check if this IP is within a local-to-Ethernet routed network int routedNetmaskBits = -1; - for(unsigned int rk=0;rkrouteCount;++rk) { + for (unsigned int rk = 0; rk < nc->routeCount; ++rk) { if (nc->routes[rk].target.ss_family == AF_INET) { - uint32_t targetIp = Utils::ntoh((uint32_t)(reinterpret_cast(&(nc->routes[rk].target))->sin_addr.s_addr)); - int targetBits = Utils::ntoh((uint16_t)(reinterpret_cast(&(nc->routes[rk].target))->sin_port)); + uint32_t targetIp = Utils::ntoh((uint32_t)(reinterpret_cast(&(nc->routes[rk].target))->sin_addr.s_addr)); + int targetBits = Utils::ntoh((uint16_t)(reinterpret_cast(&(nc->routes[rk].target))->sin_port)); if ((ip & (0xffffffff << (32 - targetBits))) == targetIp) { routedNetmaskBits = targetBits; break; @@ -1760,15 +2215,15 @@ void EmbeddedNetworkController::_request( } // If it's routed, then try to claim and assign it and if successful end loop - const InetAddress ip4(Utils::hton(ip),0); - if ( (routedNetmaskBits > 0) && (!std::binary_search(ns.allocatedIps.begin(),ns.allocatedIps.end(),ip4)) ) { + const InetAddress ip4(Utils::hton(ip), 0); + if ((routedNetmaskBits > 0) && (! std::binary_search(ns.allocatedIps.begin(), ns.allocatedIps.end(), ip4))) { char tmpip[64]; const std::string ipStr(ip4.toIpString(tmpip)); - if (std::find(ipAssignments.begin(),ipAssignments.end(),ipStr) == ipAssignments.end()) { + if (std::find(ipAssignments.begin(), ipAssignments.end(), ipStr) == ipAssignments.end()) { ipAssignments.push_back(ipStr); member["ipAssignments"] = ipAssignments; if (nc->staticIpCount < ZT_MAX_ZT_ASSIGNED_ADDRESSES) { - struct sockaddr_in *const v4ip = reinterpret_cast(&(nc->staticIps[nc->staticIpCount++])); + struct sockaddr_in* const v4ip = reinterpret_cast(&(nc->staticIps[nc->staticIpCount++])); v4ip->sin_family = AF_INET; v4ip->sin_port = Utils::hton((uint16_t)routedNetmaskBits); v4ip->sin_addr.s_addr = Utils::hton(ip); @@ -1782,100 +2237,160 @@ void EmbeddedNetworkController::_request( } } } - - if(dns.is_object()) { - std::string domain = OSUtils::jsonString(dns["domain"],""); + + if (dns.is_object()) { + std::string domain = OSUtils::jsonString(dns["domain"], ""); memcpy(nc->dns.domain, domain.c_str(), domain.size()); - json &addrArray = dns["servers"]; + json& addrArray = dns["servers"]; if (addrArray.is_array()) { - for(unsigned int j = 0; j < addrArray.size() && j < ZT_MAX_DNS_SERVERS; ++j) { - json &addr = addrArray[j]; - nc->dns.server_addr[j] = InetAddress(OSUtils::jsonString(addr,"").c_str()); + for (unsigned int j = 0; j < addrArray.size() && j < ZT_MAX_DNS_SERVERS; ++j) { + json& addr = addrArray[j]; + nc->dns.server_addr[j] = InetAddress(OSUtils::jsonString(addr, "").c_str()); } } - } else { + } + else { dns = json::object(); } +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b8.stop(); +#endif // Issue a certificate of ownership for all static IPs +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + auto b9 = _issue_certificate.Add({ { "thread", threadID } }); + auto c9 = _issue_certificate_count.Add({ { "thread", threadID } }); + c9++; + b9.start(); +#endif if (nc->staticIpCount) { - nc->certificatesOfOwnership[0] = CertificateOfOwnership(nwid,now,identity.address(),1); - for(unsigned int i=0;istaticIpCount;++i) + nc->certificatesOfOwnership[0] = CertificateOfOwnership(nwid, now, identity.address(), 1); + for (unsigned int i = 0; i < nc->staticIpCount; ++i) { nc->certificatesOfOwnership[0].addThing(nc->staticIps[i]); + } nc->certificatesOfOwnership[0].sign(_signingId); nc->certificateOfOwnershipCount = 1; } - CertificateOfMembership com(now,credentialtmd,nwid,identity); + CertificateOfMembership com(now, credentialtmd, nwid, identity); if (com.sign(_signingId)) { nc->com = com; - } else { - _sender->ncSendError(nwid,requestPacketId,identity.address(),NetworkController::NC_ERROR_INTERNAL_SERVER_ERROR, nullptr, 0); + } + else { + _sender->ncSendError(nwid, requestPacketId, identity.address(), NetworkController::NC_ERROR_INTERNAL_SERVER_ERROR, nullptr, 0); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b9.stop(); +#endif return; } +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b9.stop(); + auto b10 = _save_member.Add({ { "thread", threadID } }); + auto c10 = _save_member_count.Add({ { "thread", threadID } }); + c10++; + b10.start(); +#endif DB::cleanMember(member); - _db.save(member,true); - _sender->ncSendConfig(nwid,requestPacketId,identity.address(),*(nc.get()),metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_VERSION,0) < 6); + _db.save(member, true); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b10.stop(); + + auto b11 = _send_netconf.Add({ { "thread", threadID } }); + auto c11 = _send_netconf_count.Add({ { "thread", threadID } }); + c11++; + b11.start(); +#endif + _sender->ncSendConfig(nwid, requestPacketId, identity.address(), *(nc.get()), metaData.getUI(ZT_NETWORKCONFIG_REQUEST_METADATA_KEY_VERSION, 0) < 6); +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + b11.stop(); +#endif } void EmbeddedNetworkController::_startThreads() { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_network_controller"); + auto span = tracer->StartSpan("embedded_network_controller::_startThreads"); + auto scope = tracer->WithActiveSpan(span); + std::lock_guard l(_threads_l); - if (!_threads.empty()) + if (! _threads.empty()) { return; - const long hwc = std::max((long)std::thread::hardware_concurrency(),(long)1); - for(long t=0;t expired; - nlohmann::json network, member; - for(;;) { - _RQEntry *qe = (_RQEntry *)0; + Metrics::network_config_request_threads++; + for (;;) { + _RQEntry* qe = (_RQEntry*)0; + Metrics::network_config_request_queue_size = _queue.size(); auto timedWaitResult = _queue.get(qe, 1000); - if (timedWaitResult == BlockingQueue<_RQEntry *>::STOP) { + if (timedWaitResult == BlockingQueue<_RQEntry*>::STOP) { break; - } else if (timedWaitResult == BlockingQueue<_RQEntry *>::OK) { + } + else if (timedWaitResult == BlockingQueue<_RQEntry*>::OK) { if (qe) { try { - _request(qe->nwid,qe->fromAddr,qe->requestPacketId,qe->identity,qe->metaData); - } catch (std::exception &e) { - fprintf(stderr,"ERROR: exception in controller request handling thread: %s" ZT_EOL_S,e.what()); - } catch ( ... ) { - fprintf(stderr,"ERROR: exception in controller request handling thread: unknown exception" ZT_EOL_S); + _request(qe->nwid, qe->fromAddr, qe->requestPacketId, qe->identity, qe->metaData); + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: exception in controller request handling thread: %s" ZT_EOL_S, e.what()); + } + catch (...) { + fprintf(stderr, "ERROR: exception in controller request handling thread: unknown exception" ZT_EOL_S); } delete qe; + qe = nullptr; } } - - expired.clear(); - int64_t now = OSUtils::now(); - { - std::lock_guard l(_expiringSoon_l); - for(auto s=_expiringSoon.begin();s!=_expiringSoon.end();) { - const int64_t when = s->first; - if (when <= now) { - // The user may have re-authorized, so we must actually look it up and check. - network.clear(); - member.clear(); - if (_db.get(s->second.networkId, network, s->second.nodeId, member)) { - int64_t authenticationExpiryTime = (int64_t)OSUtils::jsonInt(member["authenticationExpiryTime"], 0); - if (authenticationExpiryTime <= now) { - expired.push_back(s->second); - } - } - _expiringSoon.erase(s++); - } else { - // Don't bother going further into the future than necessary. - break; - } - } - } - for(auto e=expired.begin();e!=expired.end();++e) { - onNetworkMemberDeauthorize(nullptr, e->networkId, e->nodeId); - } } + Metrics::network_config_request_threads--; }); } } -} // namespace ZeroTier +void EmbeddedNetworkController::_ssoExpiryThread() +{ + while (_ssoExpiryRunning) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("embedded_network_controller"); + auto span = tracer->StartSpan("embedded_network_controller::_ssoExpiryThread"); + auto scope = tracer->WithActiveSpan(span); + + std::vector<_MemberStatusKey> expired; + nlohmann::json network, member; + int64_t now = OSUtils::now(); + { + std::lock_guard l(_expiringSoon_l); + for (auto s = _expiringSoon.begin(); s != _expiringSoon.end();) { + Metrics::sso_expiration_checks++; + const int64_t when = s->first; + if (when <= now) { + // The user may have re-authorized, so we must actually look it up and check. + network.clear(); + member.clear(); + if (_db.get(s->second.networkId, network, s->second.nodeId, member)) { + int64_t authenticationExpiryTime = (int64_t)OSUtils::jsonInt(member["authenticationExpiryTime"], 0); + if (authenticationExpiryTime <= now) { + expired.push_back(s->second); + } + } + s = _expiringSoon.erase(s); + } + else { + // Don't bother going further into the future than necessary. + break; + } + } + } + for (auto e = expired.begin(); e != expired.end(); ++e) { + Metrics::sso_member_deauth++; + onNetworkMemberDeauthorize(nullptr, e->networkId, e->nodeId); + } + span->End(); + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + } +} + +} // namespace ZeroTier diff --git a/controller/EmbeddedNetworkController.hpp b/controller/EmbeddedNetworkController.hpp index 49c1c36eb..111417bfb 100644 --- a/controller/EmbeddedNetworkController.hpp +++ b/controller/EmbeddedNetworkController.hpp @@ -4,7 +4,7 @@ * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. @@ -14,124 +14,109 @@ #ifndef ZT_SQLITENETWORKCONTROLLER_HPP #define ZT_SQLITENETWORKCONTROLLER_HPP -#include - -#include -#include -#include -#include -#include -#include -#include -#include - +#include "../node/Address.hpp" #include "../node/Constants.hpp" +#include "../node/InetAddress.hpp" #include "../node/NetworkController.hpp" #include "../node/Utils.hpp" -#include "../node/Address.hpp" -#include "../node/InetAddress.hpp" - +#include "../osdep/BlockingQueue.hpp" #include "../osdep/OSUtils.hpp" #include "../osdep/Thread.hpp" -#include "../osdep/BlockingQueue.hpp" - -#include "../ext/json/json.hpp" - #include "DB.hpp" #include "DBMirrorSet.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + namespace ZeroTier { class Node; struct RedisConfig; -class EmbeddedNetworkController : public NetworkController,public DB::ChangeListener -{ -public: +class EmbeddedNetworkController + : public NetworkController + , public DB::ChangeListener { + public: /** * @param node Parent node * @param dbPath Database path (file path or database credentials) */ - EmbeddedNetworkController(Node *node,const char *ztPath,const char *dbPath, int listenPort, RedisConfig *rc); + EmbeddedNetworkController(Node* node, const char* ztPath, const char* dbPath, int listenPort, RedisConfig* rc); virtual ~EmbeddedNetworkController(); - virtual void init(const Identity &signingId,Sender *sender); + virtual void init(const Identity& signingId, Sender* sender); - void setSSORedirectURL(const std::string &url); + void setSSORedirectURL(const std::string& url); - virtual void request( - uint64_t nwid, - const InetAddress &fromAddr, - uint64_t requestPacketId, - const Identity &identity, - const Dictionary &metaData); + virtual void request(uint64_t nwid, const InetAddress& fromAddr, uint64_t requestPacketId, const Identity& identity, const Dictionary& metaData); - unsigned int handleControlPlaneHttpGET( - const std::vector &path, - const std::map &urlArgs, - const std::map &headers, - const std::string &body, - std::string &responseBody, - std::string &responseContentType); - unsigned int handleControlPlaneHttpPOST( - const std::vector &path, - const std::map &urlArgs, - const std::map &headers, - const std::string &body, - std::string &responseBody, - std::string &responseContentType); - unsigned int handleControlPlaneHttpDELETE( - const std::vector &path, - const std::map &urlArgs, - const std::map &headers, - const std::string &body, - std::string &responseBody, - std::string &responseContentType); + void configureHTTPControlPlane(httplib::Server& s, httplib::Server& sV6, const std::function); - void handleRemoteTrace(const ZT_RemoteTrace &rt); + void handleRemoteTrace(const ZT_RemoteTrace& rt); - virtual void onNetworkUpdate(const void *db,uint64_t networkId,const nlohmann::json &network); - virtual void onNetworkMemberUpdate(const void *db,uint64_t networkId,uint64_t memberId,const nlohmann::json &member); - virtual void onNetworkMemberDeauthorize(const void *db,uint64_t networkId,uint64_t memberId); + virtual void onNetworkUpdate(const void* db, uint64_t networkId, const nlohmann::json& network); + virtual void onNetworkMemberUpdate(const void* db, uint64_t networkId, uint64_t memberId, const nlohmann::json& member); + virtual void onNetworkMemberDeauthorize(const void* db, uint64_t networkId, uint64_t memberId); -private: - void _request(uint64_t nwid,const InetAddress &fromAddr,uint64_t requestPacketId,const Identity &identity,const Dictionary &metaData); + private: + void _request(uint64_t nwid, const InetAddress& fromAddr, uint64_t requestPacketId, const Identity& identity, const Dictionary& metaData); void _startThreads(); + void _ssoExpiryThread(); - struct _RQEntry - { + std::string networkUpdateFromPostData(uint64_t networkID, const std::string& body); + + struct _RQEntry { uint64_t nwid; uint64_t requestPacketId; InetAddress fromAddr; Identity identity; Dictionary metaData; - enum { - RQENTRY_TYPE_REQUEST = 0 - } type; + enum { RQENTRY_TYPE_REQUEST = 0 } type; }; - struct _MemberStatusKey - { - _MemberStatusKey() : networkId(0),nodeId(0) {} - _MemberStatusKey(const uint64_t nwid,const uint64_t nid) : networkId(nwid),nodeId(nid) {} + struct _MemberStatusKey { + _MemberStatusKey() : networkId(0), nodeId(0) + { + } + _MemberStatusKey(const uint64_t nwid, const uint64_t nid) : networkId(nwid), nodeId(nid) + { + } uint64_t networkId; uint64_t nodeId; - inline bool operator==(const _MemberStatusKey &k) const { return ((k.networkId == networkId)&&(k.nodeId == nodeId)); } - inline bool operator<(const _MemberStatusKey &k) const { return (k.networkId < networkId) || ((k.networkId == networkId)&&(k.nodeId < nodeId)); } + inline bool operator==(const _MemberStatusKey& k) const + { + return ((k.networkId == networkId) && (k.nodeId == nodeId)); + } + inline bool operator<(const _MemberStatusKey& k) const + { + return (k.networkId < networkId) || ((k.networkId == networkId) && (k.nodeId < nodeId)); + } }; - struct _MemberStatus - { - _MemberStatus() : lastRequestTime(0),authenticationExpiryTime(-1),vMajor(-1),vMinor(-1),vRev(-1),vProto(-1) {} + struct _MemberStatus { + _MemberStatus() : lastRequestTime(0), authenticationExpiryTime(-1), vMajor(-1), vMinor(-1), vRev(-1), vProto(-1) + { + } int64_t lastRequestTime; int64_t authenticationExpiryTime; - int vMajor,vMinor,vRev,vProto; + int vMajor, vMinor, vRev, vProto; Dictionary lastRequestMetaData; Identity identity; - inline bool online(const int64_t now) const { return ((now - lastRequestTime) < (ZT_NETWORK_AUTOCONF_DELAY * 2)); } + inline bool online(const int64_t now) const + { + return ((now - lastRequestTime) < (ZT_NETWORK_AUTOCONF_DELAY * 2)); + } }; - struct _MemberStatusHash - { - inline std::size_t operator()(const _MemberStatusKey &networkIdNodeId) const + struct _MemberStatusHash { + inline std::size_t operator()(const _MemberStatusKey& networkIdNodeId) const { return (std::size_t)(networkIdNodeId.networkId + networkIdNodeId.nodeId); } @@ -139,29 +124,57 @@ private: const int64_t _startTime; int _listenPort; - Node *const _node; + Node* const _node; std::string _ztPath; std::string _path; Identity _signingId; std::string _signingIdAddressString; - NetworkController::Sender *_sender; + NetworkController::Sender* _sender; DBMirrorSet _db; - BlockingQueue< _RQEntry * > _queue; + BlockingQueue<_RQEntry*> _queue; std::vector _threads; std::mutex _threads_l; - std::unordered_map< _MemberStatusKey,_MemberStatus,_MemberStatusHash > _memberStatus; + std::unordered_map<_MemberStatusKey, _MemberStatus, _MemberStatusHash> _memberStatus; std::mutex _memberStatus_l; - std::set< std::pair > _expiringSoon; + std::set > _expiringSoon; std::mutex _expiringSoon_l; - RedisConfig *_rc; + RedisConfig* _rc; std::string _ssoRedirectURL; + + bool _ssoExpiryRunning; + std::thread _ssoExpiry; + +#ifdef CENTRAL_CONTROLLER_REQUEST_BENCHMARK + prometheus::simpleapi::benchmark_family_t _member_status_lookup; + prometheus::simpleapi::counter_family_t _member_status_lookup_count; + prometheus::simpleapi::benchmark_family_t _node_is_online; + prometheus::simpleapi::counter_family_t _node_is_online_count; + prometheus::simpleapi::benchmark_family_t _get_and_init_member; + prometheus::simpleapi::counter_family_t _get_and_init_member_count; + prometheus::simpleapi::benchmark_family_t _have_identity; + prometheus::simpleapi::counter_family_t _have_identity_count; + prometheus::simpleapi::benchmark_family_t _determine_auth; + prometheus::simpleapi::counter_family_t _determine_auth_count; + prometheus::simpleapi::benchmark_family_t _sso_check; + prometheus::simpleapi::counter_family_t _sso_check_count; + prometheus::simpleapi::benchmark_family_t _auth_check; + prometheus::simpleapi::counter_family_t _auth_check_count; + prometheus::simpleapi::benchmark_family_t _json_schlep; + prometheus::simpleapi::counter_family_t _json_schlep_count; + prometheus::simpleapi::benchmark_family_t _issue_certificate; + prometheus::simpleapi::counter_family_t _issue_certificate_count; + prometheus::simpleapi::benchmark_family_t _save_member; + prometheus::simpleapi::counter_family_t _save_member_count; + prometheus::simpleapi::benchmark_family_t _send_netconf; + prometheus::simpleapi::counter_family_t _send_netconf_count; +#endif }; -} // namespace ZeroTier +} // namespace ZeroTier #endif diff --git a/controller/FileDB.cpp b/controller/FileDB.cpp index d454e93e1..6472022e2 100644 --- a/controller/FileDB.cpp +++ b/controller/FileDB.cpp @@ -4,7 +4,7 @@ * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. @@ -13,49 +13,57 @@ #include "FileDB.hpp" -namespace ZeroTier -{ +#include "../node/Metrics.hpp" +#include "opentelemetry/trace/provider.h" -FileDB::FileDB(const char *path) : - DB(), - _path(path), - _networksPath(_path + ZT_PATH_SEPARATOR_S + "network"), - _tracePath(_path + ZT_PATH_SEPARATOR_S + "trace"), - _running(true) +namespace ZeroTier { + +FileDB::FileDB(const char* path) : DB(), _path(path), _networksPath(_path + ZT_PATH_SEPARATOR_S + "network"), _tracePath(_path + ZT_PATH_SEPARATOR_S + "trace"), _running(true) { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("filedb"); + auto span = tracer->StartSpan("filedb::FileDB"); + auto scope = tracer->WithActiveSpan(span); + OSUtils::mkdir(_path.c_str()); - OSUtils::lockDownFile(_path.c_str(),true); + OSUtils::lockDownFile(_path.c_str(), true); OSUtils::mkdir(_networksPath.c_str()); OSUtils::mkdir(_tracePath.c_str()); - std::vector networks(OSUtils::listDirectory(_networksPath.c_str(),false)); + std::vector networks(OSUtils::listDirectory(_networksPath.c_str(), false)); std::string buf; - for(auto n=networks.begin();n!=networks.end();++n) { + for (auto n = networks.begin(); n != networks.end(); ++n) { buf.clear(); - if ((n->length() == 21)&&(OSUtils::readFile((_networksPath + ZT_PATH_SEPARATOR_S + *n).c_str(),buf))) { + if ((n->length() == 21) && (OSUtils::readFile((_networksPath + ZT_PATH_SEPARATOR_S + *n).c_str(), buf))) { try { nlohmann::json network(OSUtils::jsonParse(buf)); const std::string nwids = network["id"]; if (nwids.length() == 16) { nlohmann::json nullJson; - _networkChanged(nullJson,network,false); + _networkChanged(nullJson, network, false); + Metrics::network_count++; std::string membersPath(_networksPath + ZT_PATH_SEPARATOR_S + nwids + ZT_PATH_SEPARATOR_S "member"); - std::vector members(OSUtils::listDirectory(membersPath.c_str(),false)); - for(auto m=members.begin();m!=members.end();++m) { + std::vector members(OSUtils::listDirectory(membersPath.c_str(), false)); + for (auto m = members.begin(); m != members.end(); ++m) { buf.clear(); - if ((m->length() == 15)&&(OSUtils::readFile((membersPath + ZT_PATH_SEPARATOR_S + *m).c_str(),buf))) { + if ((m->length() == 15) && (OSUtils::readFile((membersPath + ZT_PATH_SEPARATOR_S + *m).c_str(), buf))) { try { nlohmann::json member(OSUtils::jsonParse(buf)); const std::string addrs = member["id"]; if (addrs.length() == 10) { nlohmann::json nullJson2; - _memberChanged(nullJson2,member,false); + _memberChanged(nullJson2, member, false); + Metrics::member_count++; } - } catch ( ... ) {} + } + catch (...) { + } } } } - } catch ( ... ) {} + } + catch (...) { + } } } } @@ -67,95 +75,135 @@ FileDB::~FileDB() _running = false; _online_l.unlock(); _onlineUpdateThread.join(); - } catch ( ... ) {} + } + catch (...) { + } } -bool FileDB::waitForReady() { return true; } -bool FileDB::isReady() { return true; } - -bool FileDB::save(nlohmann::json &record,bool notifyListeners) +bool FileDB::waitForReady() { - char p1[4096],p2[4096],pb[4096]; + return true; +} +bool FileDB::isReady() +{ + return true; +} + +bool FileDB::save(nlohmann::json& record, bool notifyListeners) +{ + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("filedb"); + auto span = tracer->StartSpan("filedb::save"); + auto scope = tracer->WithActiveSpan(span); + + char p1[4096], p2[4096], pb[4096]; bool modified = false; try { const std::string objtype = record["objtype"]; if (objtype == "network") { + auto span = tracer->StartSpan("filedb::save::network"); + auto scope = tracer->WithActiveSpan(span); - const uint64_t nwid = OSUtils::jsonIntHex(record["id"],0ULL); + const uint64_t nwid = OSUtils::jsonIntHex(record["id"], 0ULL); if (nwid) { nlohmann::json old; - get(nwid,old); - if ((!old.is_object())||(!_compareRecords(old,record))) { - record["revision"] = OSUtils::jsonInt(record["revision"],0ULL) + 1ULL; - OSUtils::ztsnprintf(p1,sizeof(p1),"%s" ZT_PATH_SEPARATOR_S "%.16llx.json",_networksPath.c_str(),nwid); - if (!OSUtils::writeFile(p1,OSUtils::jsonDump(record,-1))) - fprintf(stderr,"WARNING: controller unable to write to path: %s" ZT_EOL_S,p1); - _networkChanged(old,record,notifyListeners); + get(nwid, old); + if ((! old.is_object()) || (! _compareRecords(old, record))) { + record["revision"] = OSUtils::jsonInt(record["revision"], 0ULL) + 1ULL; + OSUtils::ztsnprintf(p1, sizeof(p1), "%s" ZT_PATH_SEPARATOR_S "%.16llx.json", _networksPath.c_str(), nwid); + if (! OSUtils::writeFile(p1, OSUtils::jsonDump(record, -1))) { + fprintf(stderr, "WARNING: controller unable to write to path: %s" ZT_EOL_S, p1); + } + _networkChanged(old, record, notifyListeners); modified = true; } } + } + else if (objtype == "member") { + auto span = tracer->StartSpan("filedb::save::member"); + auto scope = tracer->WithActiveSpan(span); - } else if (objtype == "member") { - - const uint64_t id = OSUtils::jsonIntHex(record["id"],0ULL); - const uint64_t nwid = OSUtils::jsonIntHex(record["nwid"],0ULL); - if ((id)&&(nwid)) { - nlohmann::json network,old; - get(nwid,network,id,old); - if ((!old.is_object())||(!_compareRecords(old,record))) { - record["revision"] = OSUtils::jsonInt(record["revision"],0ULL) + 1ULL; - OSUtils::ztsnprintf(pb,sizeof(pb),"%s" ZT_PATH_SEPARATOR_S "%.16llx" ZT_PATH_SEPARATOR_S "member",_networksPath.c_str(),(unsigned long long)nwid); - OSUtils::ztsnprintf(p1,sizeof(p1),"%s" ZT_PATH_SEPARATOR_S "%.10llx.json",pb,(unsigned long long)id); - if (!OSUtils::writeFile(p1,OSUtils::jsonDump(record,-1))) { - OSUtils::ztsnprintf(p2,sizeof(p2),"%s" ZT_PATH_SEPARATOR_S "%.16llx",_networksPath.c_str(),(unsigned long long)nwid); + const uint64_t id = OSUtils::jsonIntHex(record["id"], 0ULL); + const uint64_t nwid = OSUtils::jsonIntHex(record["nwid"], 0ULL); + if ((id) && (nwid)) { + nlohmann::json network, old; + get(nwid, network, id, old); + if ((! old.is_object()) || (! _compareRecords(old, record))) { + record["revision"] = OSUtils::jsonInt(record["revision"], 0ULL) + 1ULL; + OSUtils::ztsnprintf(pb, sizeof(pb), "%s" ZT_PATH_SEPARATOR_S "%.16llx" ZT_PATH_SEPARATOR_S "member", _networksPath.c_str(), (unsigned long long)nwid); + OSUtils::ztsnprintf(p1, sizeof(p1), "%s" ZT_PATH_SEPARATOR_S "%.10llx.json", pb, (unsigned long long)id); + if (! OSUtils::writeFile(p1, OSUtils::jsonDump(record, -1))) { + OSUtils::ztsnprintf(p2, sizeof(p2), "%s" ZT_PATH_SEPARATOR_S "%.16llx", _networksPath.c_str(), (unsigned long long)nwid); OSUtils::mkdir(p2); OSUtils::mkdir(pb); - if (!OSUtils::writeFile(p1,OSUtils::jsonDump(record,-1))) - fprintf(stderr,"WARNING: controller unable to write to path: %s" ZT_EOL_S,p1); + if (! OSUtils::writeFile(p1, OSUtils::jsonDump(record, -1))) { + fprintf(stderr, "WARNING: controller unable to write to path: %s" ZT_EOL_S, p1); + } } - _memberChanged(old,record,notifyListeners); + _memberChanged(old, record, notifyListeners); modified = true; } } - } - } catch ( ... ) {} // drop invalid records missing fields + } + catch (...) { + } // drop invalid records missing fields return modified; } void FileDB::eraseNetwork(const uint64_t networkId) { - nlohmann::json network,nullJson; - get(networkId,network); + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("filedb"); + auto span = tracer->StartSpan("filedb::eraseNetwork"); + auto scope = tracer->WithActiveSpan(span); + + nlohmann::json network, nullJson; + get(networkId, network); char p[16384]; - OSUtils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "%.16llx.json",_networksPath.c_str(),networkId); + OSUtils::ztsnprintf(p, sizeof(p), "%s" ZT_PATH_SEPARATOR_S "%.16llx.json", _networksPath.c_str(), networkId); OSUtils::rm(p); - OSUtils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "%.16llx" ZT_PATH_SEPARATOR_S "member",_networksPath.c_str(),(unsigned long long)networkId); + OSUtils::ztsnprintf(p, sizeof(p), "%s" ZT_PATH_SEPARATOR_S "%.16llx", _networksPath.c_str(), (unsigned long long)networkId); OSUtils::rmDashRf(p); - _networkChanged(network,nullJson,true); + _networkChanged(network, nullJson, true); std::lock_guard l(this->_online_l); this->_online.erase(networkId); } -void FileDB::eraseMember(const uint64_t networkId,const uint64_t memberId) +void FileDB::eraseMember(const uint64_t networkId, const uint64_t memberId) { - nlohmann::json network,member,nullJson; - get(networkId,network,memberId,member); + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("filedb"); + auto span = tracer->StartSpan("filedb::eraseMember"); + auto scope = tracer->WithActiveSpan(span); + + nlohmann::json network, member, nullJson; + get(networkId, network, memberId, member); char p[4096]; - OSUtils::ztsnprintf(p,sizeof(p),"%s" ZT_PATH_SEPARATOR_S "%.16llx" ZT_PATH_SEPARATOR_S "member" ZT_PATH_SEPARATOR_S "%.10llx.json",_networksPath.c_str(),networkId,memberId); + OSUtils::ztsnprintf(p, sizeof(p), "%s" ZT_PATH_SEPARATOR_S "%.16llx" ZT_PATH_SEPARATOR_S "member" ZT_PATH_SEPARATOR_S "%.10llx.json", _networksPath.c_str(), networkId, memberId); OSUtils::rm(p); - _memberChanged(member,nullJson,true); + _memberChanged(member, nullJson, true); std::lock_guard l(this->_online_l); this->_online[networkId].erase(memberId); } -void FileDB::nodeIsOnline(const uint64_t networkId,const uint64_t memberId,const InetAddress &physicalAddress) +void FileDB::nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress, const char* osArch) { - char mid[32],atmp[64]; - OSUtils::ztsnprintf(mid,sizeof(mid),"%.10llx",(unsigned long long)memberId); + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("filedb"); + auto span = tracer->StartSpan("filedb::nodeIsOnline"); + auto scope = tracer->WithActiveSpan(span); + + char mid[32], atmp[64]; + OSUtils::ztsnprintf(mid, sizeof(mid), "%.10llx", (unsigned long long)memberId); physicalAddress.toString(atmp); std::lock_guard l(this->_online_l); this->_online[networkId][memberId][OSUtils::now()] = physicalAddress; } -} // namespace ZeroTier +void FileDB::nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress) +{ + this->nodeIsOnline(networkId, memberId, physicalAddress, "unknown/unknown"); +} + +} // namespace ZeroTier diff --git a/controller/FileDB.hpp b/controller/FileDB.hpp index 545d26e3d..e10753223 100644 --- a/controller/FileDB.hpp +++ b/controller/FileDB.hpp @@ -4,7 +4,7 @@ * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. @@ -16,32 +16,31 @@ #include "DB.hpp" -namespace ZeroTier -{ +namespace ZeroTier { -class FileDB : public DB -{ -public: - FileDB(const char *path); +class FileDB : public DB { + public: + FileDB(const char* path); virtual ~FileDB(); virtual bool waitForReady(); virtual bool isReady(); - virtual bool save(nlohmann::json &record,bool notifyListeners); + virtual bool save(nlohmann::json& record, bool notifyListeners); virtual void eraseNetwork(const uint64_t networkId); - virtual void eraseMember(const uint64_t networkId,const uint64_t memberId); - virtual void nodeIsOnline(const uint64_t networkId,const uint64_t memberId,const InetAddress &physicalAddress); + virtual void eraseMember(const uint64_t networkId, const uint64_t memberId); + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress); + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress, const char* osArch); -protected: + protected: std::string _path; std::string _networksPath; std::string _tracePath; std::thread _onlineUpdateThread; - std::map< uint64_t,std::map > > _online; + std::map > > _online; std::mutex _online_l; bool _running; }; -} // namespace ZeroTier +} // namespace ZeroTier #endif diff --git a/controller/LFDB.cpp b/controller/LFDB.cpp index b935ecf5d..77fa8ffe1 100644 --- a/controller/LFDB.cpp +++ b/controller/LFDB.cpp @@ -4,7 +4,7 @@ * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. @@ -13,51 +13,52 @@ #include "LFDB.hpp" -#include +#include "../ext/cpp-httplib/httplib.h" +#include "../osdep/OSUtils.hpp" + #include #include #include +#include -#include "../osdep/OSUtils.hpp" -#include "../ext/cpp-httplib/httplib.h" +namespace ZeroTier { -namespace ZeroTier -{ - -LFDB::LFDB(const Identity &myId,const char *path,const char *lfOwnerPrivate,const char *lfOwnerPublic,const char *lfNodeHost,int lfNodePort,bool storeOnlineState) : - DB(), - _myId(myId), - _lfOwnerPrivate((lfOwnerPrivate) ? lfOwnerPrivate : ""), - _lfOwnerPublic((lfOwnerPublic) ? lfOwnerPublic : ""), - _lfNodeHost((lfNodeHost) ? lfNodeHost : "127.0.0.1"), - _lfNodePort(((lfNodePort > 0)&&(lfNodePort < 65536)) ? lfNodePort : 9980), - _running(true), - _ready(false), - _storeOnlineState(storeOnlineState) +LFDB::LFDB(const Identity& myId, const char* path, const char* lfOwnerPrivate, const char* lfOwnerPublic, const char* lfNodeHost, int lfNodePort, bool storeOnlineState) + : DB() + , _myId(myId) + , _lfOwnerPrivate((lfOwnerPrivate) ? lfOwnerPrivate : "") + , _lfOwnerPublic((lfOwnerPublic) ? lfOwnerPublic : "") + , _lfNodeHost((lfNodeHost) ? lfNodeHost : "127.0.0.1") + , _lfNodePort(((lfNodePort > 0) && (lfNodePort < 65536)) ? lfNodePort : 9980) + , _running(true) + , _ready(false) + , _storeOnlineState(storeOnlineState) { _syncThread = std::thread([this]() { char controllerAddress[24]; const uint64_t controllerAddressInt = _myId.address().toInt(); _myId.address().toString(controllerAddress); - std::string networksSelectorName("com.zerotier.controller.lfdb:"); networksSelectorName.append(controllerAddress); networksSelectorName.append("/network"); + std::string networksSelectorName("com.zerotier.controller.lfdb:"); + networksSelectorName.append(controllerAddress); + networksSelectorName.append("/network"); // LF record masking key is the first 32 bytes of SHA512(controller private key) in hex, // hiding record values from anything but the controller or someone who has its key. uint8_t sha512pk[64]; _myId.sha512PrivateKey(sha512pk); - char maskingKey [128]; - Utils::hex(sha512pk,32,maskingKey); + char maskingKey[128]; + Utils::hex(sha512pk, 32, maskingKey); - httplib::Client htcli(_lfNodeHost.c_str(),_lfNodePort); + httplib::Client htcli(_lfNodeHost.c_str(), _lfNodePort); int64_t timeRangeStart = 0; while (_running.load()) { { std::lock_guard sl(_state_l); - for(auto ns=_state.begin();ns!=_state.end();++ns) { + for (auto ns = _state.begin(); ns != _state.end(); ++ns) { if (ns->second.dirty) { nlohmann::json network; - if (get(ns->first,network)) { - nlohmann::json newrec,selector0; + if (get(ns->first, network)) { + nlohmann::json newrec, selector0; selector0["Name"] = networksSelectorName; selector0["Ordinal"] = ns->first; newrec["Selectors"].push_back(selector0); @@ -66,30 +67,34 @@ LFDB::LFDB(const Identity &myId,const char *path,const char *lfOwnerPrivate,cons newrec["MaskingKey"] = maskingKey; newrec["PulseIfUnchanged"] = true; try { - auto resp = htcli.Post("/makerecord",newrec.dump(),"application/json"); + auto resp = htcli.Post("/makerecord", newrec.dump(), "application/json"); if (resp) { if (resp->status == 200) { ns->second.dirty = false; - //printf("SET network %.16llx %s\n",ns->first,resp->body.c_str()); - } else { - fprintf(stderr,"ERROR: LFDB: %d from node (create/update network): %s" ZT_EOL_S,resp->status,resp->body.c_str()); + // printf("SET network %.16llx %s\n",ns->first,resp->body.c_str()); + } + else { + fprintf(stderr, "ERROR: LFDB: %d from node (create/update network): %s" ZT_EOL_S, resp->status, resp->body.c_str()); } - } else { - fprintf(stderr,"ERROR: LFDB: node is offline" ZT_EOL_S); } - } catch (std::exception &e) { - fprintf(stderr,"ERROR: LFDB: unexpected exception querying node (create/update network): %s" ZT_EOL_S,e.what()); - } catch ( ... ) { - fprintf(stderr,"ERROR: LFDB: unexpected exception querying node (create/update network): unknown exception" ZT_EOL_S); + else { + fprintf(stderr, "ERROR: LFDB: node is offline" ZT_EOL_S); + } + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: LFDB: unexpected exception querying node (create/update network): %s" ZT_EOL_S, e.what()); + } + catch (...) { + fprintf(stderr, "ERROR: LFDB: unexpected exception querying node (create/update network): unknown exception" ZT_EOL_S); } } } - for(auto ms=ns->second.members.begin();ms!=ns->second.members.end();++ms) { - if ((_storeOnlineState)&&(ms->second.lastOnlineDirty)&&(ms->second.lastOnlineAddress)) { - nlohmann::json newrec,selector0,selector1,selectors,ip; - char tmp[1024],tmp2[128]; - OSUtils::ztsnprintf(tmp,sizeof(tmp),"com.zerotier.controller.lfdb:%s/network/%.16llx/online",controllerAddress,(unsigned long long)ns->first); + for (auto ms = ns->second.members.begin(); ms != ns->second.members.end(); ++ms) { + if ((_storeOnlineState) && (ms->second.lastOnlineDirty) && (ms->second.lastOnlineAddress)) { + nlohmann::json newrec, selector0, selector1, selectors, ip; + char tmp[1024], tmp2[128]; + OSUtils::ztsnprintf(tmp, sizeof(tmp), "com.zerotier.controller.lfdb:%s/network/%.16llx/online", controllerAddress, (unsigned long long)ns->first); ms->second.lastOnlineAddress.toIpString(tmp2); selector0["Name"] = tmp; selector0["Ordinal"] = ms->first; @@ -98,18 +103,18 @@ LFDB::LFDB(const Identity &myId,const char *path,const char *lfOwnerPrivate,cons selectors.push_back(selector0); selectors.push_back(selector1); newrec["Selectors"] = selectors; - const uint8_t *const rawip = (const uint8_t *)ms->second.lastOnlineAddress.rawIpData(); - switch(ms->second.lastOnlineAddress.ss_family) { + const uint8_t* const rawip = (const uint8_t*)ms->second.lastOnlineAddress.rawIpData(); + switch (ms->second.lastOnlineAddress.ss_family) { case AF_INET: - for(int j=0;j<4;++j) + for (int j = 0; j < 4; ++j) ip.push_back((unsigned int)rawip[j]); break; case AF_INET6: - for(int j=0;j<16;++j) + for (int j = 0; j < 16; ++j) ip.push_back((unsigned int)rawip[j]); break; default: - ip = tmp2; // should never happen since only IP transport is currently supported + ip = tmp2; // should never happen since only IP transport is currently supported break; } newrec["Value"] = ip; @@ -118,28 +123,32 @@ LFDB::LFDB(const Identity &myId,const char *path,const char *lfOwnerPrivate,cons newrec["Timestamp"] = ms->second.lastOnlineTime; newrec["PulseIfUnchanged"] = true; try { - auto resp = htcli.Post("/makerecord",newrec.dump(),"application/json"); + auto resp = htcli.Post("/makerecord", newrec.dump(), "application/json"); if (resp) { if (resp->status == 200) { ms->second.lastOnlineDirty = false; - //printf("SET member online %.16llx %.10llx %s\n",ns->first,ms->first,resp->body.c_str()); - } else { - fprintf(stderr,"ERROR: LFDB: %d from node (create/update member online status): %s" ZT_EOL_S,resp->status,resp->body.c_str()); + // printf("SET member online %.16llx %.10llx %s\n",ns->first,ms->first,resp->body.c_str()); + } + else { + fprintf(stderr, "ERROR: LFDB: %d from node (create/update member online status): %s" ZT_EOL_S, resp->status, resp->body.c_str()); } - } else { - fprintf(stderr,"ERROR: LFDB: node is offline" ZT_EOL_S); } - } catch (std::exception &e) { - fprintf(stderr,"ERROR: LFDB: unexpected exception querying node (create/update member online status): %s" ZT_EOL_S,e.what()); - } catch ( ... ) { - fprintf(stderr,"ERROR: LFDB: unexpected exception querying node (create/update member online status): unknown exception" ZT_EOL_S); + else { + fprintf(stderr, "ERROR: LFDB: node is offline" ZT_EOL_S); + } + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: LFDB: unexpected exception querying node (create/update member online status): %s" ZT_EOL_S, e.what()); + } + catch (...) { + fprintf(stderr, "ERROR: LFDB: unexpected exception querying node (create/update member online status): unknown exception" ZT_EOL_S); } } if (ms->second.dirty) { - nlohmann::json network,member; - if (get(ns->first,network,ms->first,member)) { - nlohmann::json newrec,selector0,selector1,selectors; + nlohmann::json network, member; + if (get(ns->first, network, ms->first, member)) { + nlohmann::json newrec, selector0, selector1, selectors; selector0["Name"] = networksSelectorName; selector0["Ordinal"] = ns->first; selector1["Name"] = "member"; @@ -152,21 +161,25 @@ LFDB::LFDB(const Identity &myId,const char *path,const char *lfOwnerPrivate,cons newrec["MaskingKey"] = maskingKey; newrec["PulseIfUnchanged"] = true; try { - auto resp = htcli.Post("/makerecord",newrec.dump(),"application/json"); + auto resp = htcli.Post("/makerecord", newrec.dump(), "application/json"); if (resp) { if (resp->status == 200) { ms->second.dirty = false; - //printf("SET member %.16llx %.10llx %s\n",ns->first,ms->first,resp->body.c_str()); - } else { - fprintf(stderr,"ERROR: LFDB: %d from node (create/update member): %s" ZT_EOL_S,resp->status,resp->body.c_str()); + // printf("SET member %.16llx %.10llx %s\n",ns->first,ms->first,resp->body.c_str()); + } + else { + fprintf(stderr, "ERROR: LFDB: %d from node (create/update member): %s" ZT_EOL_S, resp->status, resp->body.c_str()); } - } else { - fprintf(stderr,"ERROR: LFDB: node is offline" ZT_EOL_S); } - } catch (std::exception &e) { - fprintf(stderr,"ERROR: LFDB: unexpected exception querying node (create/update member): %s" ZT_EOL_S,e.what()); - } catch ( ... ) { - fprintf(stderr,"ERROR: LFDB: unexpected exception querying node (create/update member): unknown exception" ZT_EOL_S); + else { + fprintf(stderr, "ERROR: LFDB: node is offline" ZT_EOL_S); + } + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: LFDB: unexpected exception querying node (create/update member): %s" ZT_EOL_S, e.what()); + } + catch (...) { + fprintf(stderr, "ERROR: LFDB: unexpected exception querying node (create/update member): unknown exception" ZT_EOL_S); } } } @@ -176,143 +189,161 @@ LFDB::LFDB(const Identity &myId,const char *path,const char *lfOwnerPrivate,cons try { std::ostringstream query; - query << - "{" - "\"Ranges\":[{" - "\"Name\":\"" << networksSelectorName << "\"," - "\"Range\":[0,18446744073709551615]" - "}]," - "\"TimeRange\":[" << timeRangeStart << ",9223372036854775807]," - "\"MaskingKey\":\"" << maskingKey << "\"," - "\"Owners\":[\"" << _lfOwnerPublic << "\"]" - "}"; - auto resp = htcli.Post("/query",query.str(),"application/json"); + query << "{" + "\"Ranges\":[{" + "\"Name\":\"" + << networksSelectorName + << "\"," + "\"Range\":[0,18446744073709551615]" + "}]," + "\"TimeRange\":[" + << timeRangeStart + << ",9223372036854775807]," + "\"MaskingKey\":\"" + << maskingKey + << "\"," + "\"Owners\":[\"" + << _lfOwnerPublic + << "\"]" + "}"; + auto resp = htcli.Post("/query", query.str(), "application/json"); if (resp) { if (resp->status == 200) { nlohmann::json results(OSUtils::jsonParse(resp->body)); - if ((results.is_array())&&(!results.empty())) { - for(std::size_t ri=0;ri> 24) == controllerAddressInt) { // sanity check + if ((id >> 24) == controllerAddressInt) { // sanity check nlohmann::json oldNetwork; - if ((timeRangeStart > 0)&&(get(id,oldNetwork))) { + if ((timeRangeStart > 0) && (get(id, oldNetwork))) { const uint64_t revision = network["revision"]; const uint64_t prevRevision = oldNetwork["revision"]; if (prevRevision < revision) { - _networkChanged(oldNetwork,network,timeRangeStart > 0); + _networkChanged(oldNetwork, network, timeRangeStart > 0); } - } else { - nlohmann::json nullJson; - _networkChanged(nullJson,network,timeRangeStart > 0); } - + else { + nlohmann::json nullJson; + _networkChanged(nullJson, network, timeRangeStart > 0); + } } } } } - } } } - } else { - fprintf(stderr,"ERROR: LFDB: %d from node (check for network updates): %s" ZT_EOL_S,resp->status,resp->body.c_str()); } - } else { - fprintf(stderr,"ERROR: LFDB: node is offline" ZT_EOL_S); + else { + fprintf(stderr, "ERROR: LFDB: %d from node (check for network updates): %s" ZT_EOL_S, resp->status, resp->body.c_str()); + } } - } catch (std::exception &e) { - fprintf(stderr,"ERROR: LFDB: unexpected exception querying node (check for network updates): %s" ZT_EOL_S,e.what()); - } catch ( ... ) { - fprintf(stderr,"ERROR: LFDB: unexpected exception querying node (check for network updates): unknown exception" ZT_EOL_S); + else { + fprintf(stderr, "ERROR: LFDB: node is offline" ZT_EOL_S); + } + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: LFDB: unexpected exception querying node (check for network updates): %s" ZT_EOL_S, e.what()); + } + catch (...) { + fprintf(stderr, "ERROR: LFDB: unexpected exception querying node (check for network updates): unknown exception" ZT_EOL_S); } try { std::ostringstream query; - query << - "{" - "\"Ranges\":[{" - "\"Name\":\"" << networksSelectorName << "\"," - "\"Range\":[0,18446744073709551615]" - "},{" - "\"Name\":\"member\"," - "\"Range\":[0,18446744073709551615]" - "}]," - "\"TimeRange\":[" << timeRangeStart << ",9223372036854775807]," - "\"MaskingKey\":\"" << maskingKey << "\"," - "\"Owners\":[\"" << _lfOwnerPublic << "\"]" - "}"; - auto resp = htcli.Post("/query",query.str(),"application/json"); + query << "{" + "\"Ranges\":[{" + "\"Name\":\"" + << networksSelectorName + << "\"," + "\"Range\":[0,18446744073709551615]" + "},{" + "\"Name\":\"member\"," + "\"Range\":[0,18446744073709551615]" + "}]," + "\"TimeRange\":[" + << timeRangeStart + << ",9223372036854775807]," + "\"MaskingKey\":\"" + << maskingKey + << "\"," + "\"Owners\":[\"" + << _lfOwnerPublic + << "\"]" + "}"; + auto resp = htcli.Post("/query", query.str(), "application/json"); if (resp) { if (resp->status == 200) { nlohmann::json results(OSUtils::jsonParse(resp->body)); - if ((results.is_array())&&(!results.empty())) { - for(std::size_t ri=0;ri> 24) == controllerAddressInt)) { // sanity check + if ((id) && ((nwid >> 24) == controllerAddressInt)) { // sanity check - nlohmann::json network,oldMember; - if ((timeRangeStart > 0)&&(get(nwid,network,id,oldMember))) { + nlohmann::json network, oldMember; + if ((timeRangeStart > 0) && (get(nwid, network, id, oldMember))) { const uint64_t revision = member["revision"]; const uint64_t prevRevision = oldMember["revision"]; if (prevRevision < revision) - _memberChanged(oldMember,member,timeRangeStart > 0); - } else if (hasNetwork(nwid)) { - nlohmann::json nullJson; - _memberChanged(nullJson,member,timeRangeStart > 0); + _memberChanged(oldMember, member, timeRangeStart > 0); + } + else if (hasNetwork(nwid)) { + nlohmann::json nullJson; + _memberChanged(nullJson, member, timeRangeStart > 0); } - } } } } - } } } - } else { - fprintf(stderr,"ERROR: LFDB: %d from node (check for member updates): %s" ZT_EOL_S,resp->status,resp->body.c_str()); } - } else { - fprintf(stderr,"ERROR: LFDB: node is offline" ZT_EOL_S); + else { + fprintf(stderr, "ERROR: LFDB: %d from node (check for member updates): %s" ZT_EOL_S, resp->status, resp->body.c_str()); + } } - } catch (std::exception &e) { - fprintf(stderr,"ERROR: LFDB: unexpected exception querying node (check for member updates): %s" ZT_EOL_S,e.what()); - } catch ( ... ) { - fprintf(stderr,"ERROR: LFDB: unexpected exception querying node (check for member updates): unknown exception" ZT_EOL_S); + else { + fprintf(stderr, "ERROR: LFDB: node is offline" ZT_EOL_S); + } + } + catch (std::exception& e) { + fprintf(stderr, "ERROR: LFDB: unexpected exception querying node (check for member updates): %s" ZT_EOL_S, e.what()); + } + catch (...) { + fprintf(stderr, "ERROR: LFDB: unexpected exception querying node (check for member updates): unknown exception" ZT_EOL_S); } - timeRangeStart = time(nullptr) - 120; // start next query 2m before now to avoid losing updates + timeRangeStart = time(nullptr) - 120; // start next query 2m before now to avoid losing updates _ready.store(true); - for(int k=0;k<4;++k) { // 2s delay between queries for remotely modified networks or members - if (!_running.load()) + for (int k = 0; k < 4; ++k) { // 2s delay between queries for remotely modified networks or members + if (! _running.load()) return; std::this_thread::sleep_for(std::chrono::milliseconds(500)); } @@ -328,7 +359,7 @@ LFDB::~LFDB() bool LFDB::waitForReady() { - while (!_ready.load()) { + while (! _ready.load()) { std::this_thread::sleep_for(std::chrono::milliseconds(500)); } return true; @@ -339,18 +370,18 @@ bool LFDB::isReady() return (_ready.load()); } -bool LFDB::save(nlohmann::json &record,bool notifyListeners) +bool LFDB::save(nlohmann::json& record, bool notifyListeners) { bool modified = false; const std::string objtype = record["objtype"]; if (objtype == "network") { - const uint64_t nwid = OSUtils::jsonIntHex(record["id"],0ULL); + const uint64_t nwid = OSUtils::jsonIntHex(record["id"], 0ULL); if (nwid) { nlohmann::json old; - get(nwid,old); - if ((!old.is_object())||(!_compareRecords(old,record))) { - record["revision"] = OSUtils::jsonInt(record["revision"],0ULL) + 1ULL; - _networkChanged(old,record,notifyListeners); + get(nwid, old); + if ((! old.is_object()) || (! _compareRecords(old, record))) { + record["revision"] = OSUtils::jsonInt(record["revision"], 0ULL) + 1ULL; + _networkChanged(old, record, notifyListeners); { std::lock_guard l(_state_l); _state[nwid].dirty = true; @@ -358,15 +389,16 @@ bool LFDB::save(nlohmann::json &record,bool notifyListeners) modified = true; } } - } else if (objtype == "member") { - const uint64_t nwid = OSUtils::jsonIntHex(record["nwid"],0ULL); - const uint64_t id = OSUtils::jsonIntHex(record["id"],0ULL); - if ((id)&&(nwid)) { - nlohmann::json network,old; - get(nwid,network,id,old); - if ((!old.is_object())||(!_compareRecords(old,record))) { - record["revision"] = OSUtils::jsonInt(record["revision"],0ULL) + 1ULL; - _memberChanged(old,record,notifyListeners); + } + else if (objtype == "member") { + const uint64_t nwid = OSUtils::jsonIntHex(record["nwid"], 0ULL); + const uint64_t id = OSUtils::jsonIntHex(record["id"], 0ULL); + if ((id) && (nwid)) { + nlohmann::json network, old; + get(nwid, network, id, old); + if ((! old.is_object()) || (! _compareRecords(old, record))) { + record["revision"] = OSUtils::jsonInt(record["revision"], 0ULL) + 1ULL; + _memberChanged(old, record, notifyListeners); { std::lock_guard l(_state_l); _state[nwid].members[id].dirty = true; @@ -383,12 +415,12 @@ void LFDB::eraseNetwork(const uint64_t networkId) // TODO } -void LFDB::eraseMember(const uint64_t networkId,const uint64_t memberId) +void LFDB::eraseMember(const uint64_t networkId, const uint64_t memberId) { // TODO } -void LFDB::nodeIsOnline(const uint64_t networkId,const uint64_t memberId,const InetAddress &physicalAddress) +void LFDB::nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress, const char* osArch) { std::lock_guard l(_state_l); auto nw = _state.find(networkId); @@ -403,4 +435,9 @@ void LFDB::nodeIsOnline(const uint64_t networkId,const uint64_t memberId,const I } } -} // namespace ZeroTier +void LFDB::nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress) +{ + this->nodeIsOnline(networkId, memberId, physicalAddress, "unknown/unknown"); +} + +} // namespace ZeroTier diff --git a/controller/LFDB.hpp b/controller/LFDB.hpp index 9f654bfb7..3632e483f 100644 --- a/controller/LFDB.hpp +++ b/controller/LFDB.hpp @@ -4,7 +4,7 @@ * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. @@ -16,19 +16,18 @@ #include "DB.hpp" +#include #include #include #include -#include namespace ZeroTier { /** * DB implementation for controller that stores data in LF */ -class LFDB : public DB -{ -public: +class LFDB : public DB { + public: /** * @param myId This controller's identity * @param path Base path for ZeroTier node itself @@ -38,44 +37,41 @@ public: * @param lfNodePort LF node http (not https) port * @param storeOnlineState If true, store online/offline state and IP info in LF (a lot of data, only for private networks!) */ - LFDB(const Identity &myId,const char *path,const char *lfOwnerPrivate,const char *lfOwnerPublic,const char *lfNodeHost,int lfNodePort,bool storeOnlineState); + LFDB(const Identity& myId, const char* path, const char* lfOwnerPrivate, const char* lfOwnerPublic, const char* lfNodeHost, int lfNodePort, bool storeOnlineState); virtual ~LFDB(); virtual bool waitForReady(); virtual bool isReady(); - virtual bool save(nlohmann::json &record,bool notifyListeners); + virtual bool save(nlohmann::json& record, bool notifyListeners); virtual void eraseNetwork(const uint64_t networkId); - virtual void eraseMember(const uint64_t networkId,const uint64_t memberId); - virtual void nodeIsOnline(const uint64_t networkId,const uint64_t memberId,const InetAddress &physicalAddress); + virtual void eraseMember(const uint64_t networkId, const uint64_t memberId); + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress); + virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress& physicalAddress, const char* osArch); -protected: + protected: const Identity _myId; - std::string _lfOwnerPrivate,_lfOwnerPublic; + std::string _lfOwnerPrivate, _lfOwnerPublic; std::string _lfNodeHost; int _lfNodePort; - struct _MemberState - { - _MemberState() : - lastOnlineAddress(), - lastOnlineTime(0), - dirty(false), - lastOnlineDirty(false) {} + struct _MemberState { + _MemberState() : lastOnlineAddress(), lastOnlineTime(0), dirty(false), lastOnlineDirty(false) + { + } InetAddress lastOnlineAddress; int64_t lastOnlineTime; bool dirty; bool lastOnlineDirty; }; - struct _NetworkState - { - _NetworkState() : - members(), - dirty(false) {} - std::unordered_map members; + struct _NetworkState { + _NetworkState() : members(), dirty(false) + { + } + std::unordered_map members; bool dirty; }; - std::unordered_map _state; + std::unordered_map _state; std::mutex _state_l; std::atomic_bool _running; @@ -84,6 +80,6 @@ protected: bool _storeOnlineState; }; -} // namespace ZeroTier +} // namespace ZeroTier #endif diff --git a/controller/PostgreSQL.cpp b/controller/PostgreSQL.cpp index f83ebc9b9..3bc864601 100644 --- a/controller/PostgreSQL.cpp +++ b/controller/PostgreSQL.cpp @@ -1,1581 +1,11 @@ -/* - * Copyright (c)2019 ZeroTier, Inc. - * - * Use of this software is governed by the Business Source License included - * in the LICENSE.TXT file in the project's root directory. - * - * Change Date: 2025-01-01 - * - * On the date above, in accordance with the Business Source License, use - * of this software will be governed by version 2.0 of the Apache License. - */ -/****/ +#ifdef ZT_CONTROLLER_USE_LIBPQ #include "PostgreSQL.hpp" -#ifdef ZT_CONTROLLER_USE_LIBPQ +#include -#include "../node/Constants.hpp" -#include "../node/SHA512.hpp" -#include "EmbeddedNetworkController.hpp" -#include "../version.h" -#include "Redis.hpp" - -#include -#include -#include -#include -#include - - -// #define ZT_TRACE 1 - -using json = nlohmann::json; - -namespace { - -static const int DB_MINIMUM_VERSION = 20; - -static const char *_timestr() -{ - - time_t t = time(0); - char *ts = ctime(&t); - char *p = ts; - if (!p) - return ""; - while (*p) { - if (*p == '\n') { - *p = (char)0; - break; - } - ++p; - } - return ts; -} - -/* -std::string join(const std::vector &elements, const char * const separator) -{ - switch(elements.size()) { - case 0: - return ""; - case 1: - return elements[0]; - default: - std::ostringstream os; - std::copy(elements.begin(), elements.end()-1, std::ostream_iterator(os, separator)); - os << *elements.rbegin(); - return os.str(); - } -} -*/ - -std::vector split(std::string str, char delim){ - std::istringstream iss(str); - std::vector tokens; - std::string item; - while(std::getline(iss, item, delim)) { - tokens.push_back(item); - } - return tokens; -} - -std::string url_encode(const std::string &value) { - std::ostringstream escaped; - escaped.fill('0'); - escaped << std::hex; - - for (std::string::const_iterator i = value.begin(), n = value.end(); i != n; ++i) { - std::string::value_type c = (*i); - - // Keep alphanumeric and other accepted characters intact - if (isalnum(c) || c == '-' || c == '_' || c == '.' || c == '~') { - escaped << c; - continue; - } - - // Any other characters are percent-encoded - escaped << std::uppercase; - escaped << '%' << std::setw(2) << int((unsigned char) c); - escaped << std::nouppercase; - } - - return escaped.str(); -} - -} // anonymous namespace +using namespace nlohmann; using namespace ZeroTier; - -MemberNotificationReceiver::MemberNotificationReceiver(PostgreSQL *p, pqxx::connection &c, const std::string &channel) - : pqxx::notification_receiver(c, channel) - , _psql(p) -{ - fprintf(stderr, "initialize MemberNotificaitonReceiver\n"); -} - - -void MemberNotificationReceiver::operator() (const std::string &payload, int packend_pid) { - fprintf(stderr, "Member Notification received: %s\n", payload.c_str()); - json tmp(json::parse(payload)); - json &ov = tmp["old_val"]; - json &nv = tmp["new_val"]; - json oldConfig, newConfig; - if (ov.is_object()) oldConfig = ov; - if (nv.is_object()) newConfig = nv; - if (oldConfig.is_object() || newConfig.is_object()) { - _psql->_memberChanged(oldConfig,newConfig,(_psql->_ready>=2)); - fprintf(stderr, "payload sent\n"); - } -} - - -NetworkNotificationReceiver::NetworkNotificationReceiver(PostgreSQL *p, pqxx::connection &c, const std::string &channel) - : pqxx::notification_receiver(c, channel) - , _psql(p) -{ - fprintf(stderr, "initialize NetworkNotificationReceiver\n"); -} - -void NetworkNotificationReceiver::operator() (const std::string &payload, int packend_pid) { - fprintf(stderr, "Network Notificaiton received: %s\n", payload.c_str()); - json tmp(json::parse(payload)); - json &ov = tmp["old_val"]; - json &nv = tmp["new_val"]; - json oldConfig, newConfig; - if (ov.is_object()) oldConfig = ov; - if (nv.is_object()) newConfig = nv; - if (oldConfig.is_object() || newConfig.is_object()) { - _psql->_networkChanged(oldConfig,newConfig,(_psql->_ready>=2)); - fprintf(stderr, "payload sent\n"); - } -} - -using Attrs = std::vector>; -using Item = std::pair; -using ItemStream = std::vector; - -PostgreSQL::PostgreSQL(const Identity &myId, const char *path, int listenPort, RedisConfig *rc) - : DB() - , _pool() - , _myId(myId) - , _myAddress(myId.address()) - , _ready(0) - , _connected(1) - , _run(1) - , _waitNoticePrinted(false) - , _listenPort(listenPort) - , _rc(rc) - , _redis(NULL) - , _cluster(NULL) -{ - char myAddress[64]; - _myAddressStr = myId.address().toString(myAddress); - _connString = std::string(path); - auto f = std::make_shared(_connString); - _pool = std::make_shared >( - 15, 5, std::static_pointer_cast(f)); - - memset(_ssoPsk, 0, sizeof(_ssoPsk)); - char *const ssoPskHex = getenv("ZT_SSO_PSK"); -#ifdef ZT_TRACE - fprintf(stderr, "ZT_SSO_PSK: %s\n", ssoPskHex); -#endif - if (ssoPskHex) { - // SECURITY: note that ssoPskHex will always be null-terminated if libc acatually - // returns something non-NULL. If the hex encodes something shorter than 48 bytes, - // it will be padded at the end with zeroes. If longer, it'll be truncated. - Utils::unhex(ssoPskHex, _ssoPsk, sizeof(_ssoPsk)); - } - - auto c = _pool->borrow(); - pqxx::work txn{*c->c}; - - pqxx::row r{txn.exec1("SELECT version FROM ztc_database")}; - int dbVersion = r[0].as(); - txn.commit(); - - if (dbVersion < DB_MINIMUM_VERSION) { - fprintf(stderr, "Central database schema version too low. This controller version requires a minimum schema version of %d. Please upgrade your Central instance", DB_MINIMUM_VERSION); - exit(1); - } - _pool->unborrow(c); - - if (_rc != NULL) { - sw::redis::ConnectionOptions opts; - sw::redis::ConnectionPoolOptions poolOpts; - opts.host = _rc->hostname; - opts.port = _rc->port; - opts.password = _rc->password; - opts.db = 0; - poolOpts.size = 10; - if (_rc->clusterMode) { - fprintf(stderr, "Using Redis in Cluster Mode\n"); - _cluster = std::make_shared(opts, poolOpts); - } else { - fprintf(stderr, "Using Redis in Standalone Mode\n"); - _redis = std::make_shared(opts, poolOpts); - } - } - - _readyLock.lock(); - - fprintf(stderr, "[%s] NOTICE: %.10llx controller PostgreSQL waiting for initial data download..." ZT_EOL_S, ::_timestr(), (unsigned long long)_myAddress.toInt()); - _waitNoticePrinted = true; - - initializeNetworks(); - initializeMembers(); - - _heartbeatThread = std::thread(&PostgreSQL::heartbeat, this); - _membersDbWatcher = std::thread(&PostgreSQL::membersDbWatcher, this); - _networksDbWatcher = std::thread(&PostgreSQL::networksDbWatcher, this); - for (int i = 0; i < ZT_CENTRAL_CONTROLLER_COMMIT_THREADS; ++i) { - _commitThread[i] = std::thread(&PostgreSQL::commitThread, this); - } - _onlineNotificationThread = std::thread(&PostgreSQL::onlineNotificationThread, this); -} - -PostgreSQL::~PostgreSQL() -{ - _run = 0; - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - - _heartbeatThread.join(); - _membersDbWatcher.join(); - _networksDbWatcher.join(); - _commitQueue.stop(); - for (int i = 0; i < ZT_CENTRAL_CONTROLLER_COMMIT_THREADS; ++i) { - _commitThread[i].join(); - } - _onlineNotificationThread.join(); -} - - -bool PostgreSQL::waitForReady() -{ - while (_ready < 2) { - _readyLock.lock(); - _readyLock.unlock(); - } - return true; -} - -bool PostgreSQL::isReady() -{ - return ((_ready == 2)&&(_connected)); -} - -bool PostgreSQL::save(nlohmann::json &record,bool notifyListeners) -{ - bool modified = false; - try { - if (!record.is_object()) { - fprintf(stderr, "record is not an object?!?\n"); - return false; - } - const std::string objtype = record["objtype"]; - if (objtype == "network") { - //fprintf(stderr, "network save\n"); - const uint64_t nwid = OSUtils::jsonIntHex(record["id"],0ULL); - if (nwid) { - nlohmann::json old; - get(nwid,old); - if ((!old.is_object())||(!_compareRecords(old,record))) { - record["revision"] = OSUtils::jsonInt(record["revision"],0ULL) + 1ULL; - _commitQueue.post(std::pair(record,notifyListeners)); - modified = true; - } - } - } else if (objtype == "member") { - std::string networkId = record["nwid"]; - std::string memberId = record["id"]; - const uint64_t nwid = OSUtils::jsonIntHex(record["nwid"],0ULL); - const uint64_t id = OSUtils::jsonIntHex(record["id"],0ULL); - //fprintf(stderr, "member save %s-%s\n", networkId.c_str(), memberId.c_str()); - if ((id)&&(nwid)) { - nlohmann::json network,old; - get(nwid,network,id,old); - if ((!old.is_object())||(!_compareRecords(old,record))) { - //fprintf(stderr, "commit queue post\n"); - record["revision"] = OSUtils::jsonInt(record["revision"],0ULL) + 1ULL; - _commitQueue.post(std::pair(record,notifyListeners)); - modified = true; - } else { - //fprintf(stderr, "no change\n"); - } - } - } else { - fprintf(stderr, "uhh waaat\n"); - } - } catch (std::exception &e) { - fprintf(stderr, "Error on PostgreSQL::save: %s\n", e.what()); - } catch (...) { - fprintf(stderr, "Unknown error on PostgreSQL::save\n"); - } - return modified; -} - -void PostgreSQL::eraseNetwork(const uint64_t networkId) -{ - fprintf(stderr, "PostgreSQL::eraseNetwork\n"); - char tmp2[24]; - waitForReady(); - Utils::hex(networkId, tmp2); - std::pair tmp; - tmp.first["id"] = tmp2; - tmp.first["objtype"] = "_delete_network"; - tmp.second = true; - _commitQueue.post(tmp); - nlohmann::json nullJson; - _networkChanged(tmp.first, nullJson, true); -} - -void PostgreSQL::eraseMember(const uint64_t networkId, const uint64_t memberId) -{ - fprintf(stderr, "PostgreSQL::eraseMember\n"); - char tmp2[24]; - waitForReady(); - std::pair tmp, nw; - Utils::hex(networkId, tmp2); - tmp.first["nwid"] = tmp2; - Utils::hex(memberId, tmp2); - tmp.first["id"] = tmp2; - tmp.first["objtype"] = "_delete_member"; - tmp.second = true; - _commitQueue.post(tmp); - nlohmann::json nullJson; - _memberChanged(tmp.first, nullJson, true); -} - -void PostgreSQL::nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress &physicalAddress) -{ - std::lock_guard l(_lastOnline_l); - std::pair &i = _lastOnline[std::pair(networkId, memberId)]; - i.first = OSUtils::now(); - if (physicalAddress) { - i.second = physicalAddress; - } -} - -AuthInfo PostgreSQL::getSSOAuthInfo(const nlohmann::json &member, const std::string &redirectURL) -{ - // NONCE is just a random character string. no semantic meaning - // state = HMAC SHA384 of Nonce based on shared sso key - // - // need nonce timeout in database? make sure it's used within X time - // X is 5 minutes for now. Make configurable later? - // - // how do we tell when a nonce is used? if auth_expiration_time is set - std::string networkId = member["nwid"]; - std::string memberId = member["id"]; - - - char authenticationURL[4096] = {0}; - AuthInfo info; - info.enabled = true; - // fprintf(stderr, "PostgreSQL::updateMemberOnLoad: %s-%s\n", networkId.c_str(), memberId.c_str()); - try { - auto c = _pool->borrow(); - pqxx::work w(*c->c); - - char nonceBytes[16] = {0}; - std::string nonce = ""; - - // check if the member exists first. - pqxx::row count = w.exec_params1("SELECT count(id) FROM ztc_member WHERE id = $1 AND network_id = $2 AND deleted = false", memberId, networkId); - if (count[0].as() == 1) { - // get active nonce, if exists. - pqxx::result r = w.exec_params("SELECT nonce FROM ztc_sso_expiry " - "WHERE network_id = $1 AND member_id = $2 " - "AND ((NOW() AT TIME ZONE 'UTC') <= authentication_expiry_time) AND ((NOW() AT TIME ZONE 'UTC') <= nonce_expiration)", - networkId, memberId); - - if (r.size() == 0) { - // no active nonce. - // find an unused nonce, if one exists. - pqxx::result r = w.exec_params("SELECT nonce FROM ztc_sso_expiry " - "WHERE network_id = $1 AND member_id = $2 " - "AND authentication_expiry_time IS NULL AND ((NOW() AT TIME ZONE 'UTC') <= nonce_expiration)", - networkId, memberId); - - if (r.size() == 1) { - // we have an existing nonce. Use it - nonce = r.at(0)[0].as(); - Utils::unhex(nonce.c_str(), nonceBytes, sizeof(nonceBytes)); - } else if (r.empty()) { - // create a nonce - Utils::getSecureRandom(nonceBytes, 16); - char nonceBuf[64] = {0}; - Utils::hex(nonceBytes, sizeof(nonceBytes), nonceBuf); - nonce = std::string(nonceBuf); - - pqxx::result ir = w.exec_params0("INSERT INTO ztc_sso_expiry " - "(nonce, nonce_expiration, network_id, member_id) VALUES " - "($1, TO_TIMESTAMP($2::double precision/1000), $3, $4)", - nonce, OSUtils::now() + 300000, networkId, memberId); - - w.commit(); - } else { - // > 1 ?!? Thats an error! - fprintf(stderr, "> 1 unused nonce!\n"); - exit(6); - } - } else if (r.size() == 1) { - nonce = r.at(0)[0].as(); - Utils::unhex(nonce.c_str(), nonceBytes, sizeof(nonceBytes)); - } else { - // more than 1 nonce in use? Uhhh... - fprintf(stderr, "> 1 nonce in use for network member?!?\n"); - exit(7); - } - - r = w.exec_params("SELECT org.client_id, org.authorization_endpoint, org.issuer, org.sso_impl_version " - "FROM ztc_network AS nw, ztc_org AS org " - "WHERE nw.id = $1 AND nw.sso_enabled = true AND org.owner_id = nw.owner_id", networkId); - - std::string client_id = ""; - std::string authorization_endpoint = ""; - std::string issuer = ""; - uint64_t sso_version = 0; - - if (r.size() == 1) { - client_id = r.at(0)[0].as(); - authorization_endpoint = r.at(0)[1].as(); - issuer = r.at(0)[2].as(); - sso_version = r.at(0)[3].as(); - } else if (r.size() > 1) { - fprintf(stderr, "ERROR: More than one auth endpoint for an organization?!?!? NetworkID: %s\n", networkId.c_str()); - } else { - fprintf(stderr, "No client or auth endpoint?!?\n"); - } - - info.version = sso_version; - - // no catch all else because we don't actually care if no records exist here. just continue as normal. - if ((!client_id.empty())&&(!authorization_endpoint.empty())) { - - uint8_t state[48]; - HMACSHA384(_ssoPsk, nonceBytes, sizeof(nonceBytes), state); - char state_hex[256]; - Utils::hex(state, 48, state_hex); - - if (info.version == 0) { - char url[2048] = {0}; - OSUtils::ztsnprintf(url, sizeof(authenticationURL), - "%s?response_type=id_token&response_mode=form_post&scope=openid+email+profile&redirect_uri=%s&nonce=%s&state=%s&client_id=%s", - authorization_endpoint.c_str(), - url_encode(redirectURL).c_str(), - nonce.c_str(), - state_hex, - client_id.c_str()); - info.authenticationURL = std::string(url); - } else if (info.version == 1) { - info.ssoClientID = client_id; - info.issuerURL = issuer; - info.ssoNonce = nonce; - info.ssoState = std::string(state_hex) + "_" +networkId; - info.centralAuthURL = redirectURL; - fprintf( - stderr, - "ssoClientID: %s\nissuerURL: %s\nssoNonce: %s\nssoState: %s\ncentralAuthURL: %s\n", - info.ssoClientID.c_str(), - info.issuerURL.c_str(), - info.ssoNonce.c_str(), - info.ssoState.c_str(), - info.centralAuthURL.c_str()); - } - } else { - fprintf(stderr, "client_id: %s\nauthorization_endpoint: %s\n", client_id.c_str(), authorization_endpoint.c_str()); - } - } - - _pool->unborrow(c); - } catch (std::exception &e) { - fprintf(stderr, "ERROR: Error updating member on load: %s\n", e.what()); - } - - return info; //std::string(authenticationURL); -} - -void PostgreSQL::initializeNetworks() -{ - try { - std::string setKey = "networks:{" + _myAddressStr + "}"; - - fprintf(stderr, "Initializing Networks...\n"); - - char qbuf[2048] = {0}; - sprintf(qbuf, "SELECT n.id, (EXTRACT(EPOCH FROM n.creation_time AT TIME ZONE 'UTC')*1000)::bigint as creation_time, n.capabilities, " - "n.enable_broadcast, (EXTRACT(EPOCH FROM n.last_modified AT TIME ZONE 'UTC')*1000)::bigint AS last_modified, n.mtu, n.multicast_limit, n.name, n.private, n.remote_trace_level, " - "n.remote_trace_target, n.revision, n.rules, n.tags, n.v4_assign_mode, n.v6_assign_mode, n.sso_enabled, (CASE WHEN n.sso_enabled THEN o.client_id ELSE NULL END) as client_id, " - "(CASE WHEN n.sso_enabled THEN o.authorization_endpoint ELSE NULL END) as authorization_endpoint, d.domain, d.servers, " - "ARRAY(SELECT CONCAT(host(ip_range_start),'|', host(ip_range_end)) FROM ztc_network_assignment_pool WHERE network_id = n.id) AS assignment_pool, " - "ARRAY(SELECT CONCAT(host(address),'/',bits::text,'|',COALESCE(host(via), 'NULL'))FROM ztc_network_route WHERE network_id = n.id) AS routes " - "FROM ztc_network n " - "LEFT OUTER JOIN ztc_org o " - " ON o.owner_id = n.owner_id " - "LEFT OUTER JOIN ztc_network_dns d " - " ON d.network_id = n.id " - "WHERE deleted = false AND controller_id = '%s'", _myAddressStr.c_str()); - auto c = _pool->borrow(); - auto c2 = _pool->borrow(); - pqxx::work w{*c->c}; - - auto stream = pqxx::stream_from::query(w, qbuf); - - std::tuple< - std::string // network ID - , std::optional // creationTime - , std::optional // capabilities - , std::optional // enableBroadcast - , std::optional // lastModified - , std::optional // mtu - , std::optional // multicastLimit - , std::optional // name - , bool // private - , std::optional // remoteTraceLevel - , std::optional // remoteTraceTarget - , std::optional // revision - , std::optional // rules - , std::optional // tags - , std::optional // v4AssignMode - , std::optional // v6AssignMode - , std::optional // ssoEnabled - , std::optional // clientId - , std::optional // authorizationEndpoint - , std::optional // domain - , std::optional // servers - , std::string // assignmentPoolString - , std::string // routeString - > row; - - uint64_t count = 0; - auto tmp = std::chrono::high_resolution_clock::now(); - uint64_t total = 0; - while (stream >> row) { - auto start = std::chrono::high_resolution_clock::now(); - - json empty; - json config; - - initNetwork(config); - - std::string nwid = std::get<0>(row); - std::optional creationTime = std::get<1>(row); - std::optional capabilities = std::get<2>(row); - std::optional enableBroadcast = std::get<3>(row); - std::optional lastModified = std::get<4>(row); - std::optional mtu = std::get<5>(row); - std::optional multicastLimit = std::get<6>(row); - std::optional name = std::get<7>(row); - bool isPrivate = std::get<8>(row); - std::optional remoteTraceLevel = std::get<9>(row); - std::optional remoteTraceTarget = std::get<10>(row); - std::optional revision = std::get<11>(row); - std::optional rules = std::get<12>(row); - std::optional tags = std::get<13>(row); - std::optional v4AssignMode = std::get<14>(row); - std::optional v6AssignMode = std::get<15>(row); - std::optional ssoEnabled = std::get<16>(row); - std::optional clientId = std::get<17>(row); - std::optional authorizationEndpoint = std::get<18>(row); - std::optional dnsDomain = std::get<19>(row); - std::optional dnsServers = std::get<20>(row); - std::string assignmentPoolString = std::get<21>(row); - std::string routesString = std::get<22>(row); - - config["id"] = nwid; - config["nwid"] = nwid; - config["creationTime"] = creationTime.value_or(0); - config["capabilities"] = json::parse(capabilities.value_or("[]")); - config["enableBroadcast"] = enableBroadcast.value_or(false); - config["lastModified"] = lastModified.value_or(0); - config["mtu"] = mtu.value_or(2800); - config["multicastLimit"] = multicastLimit.value_or(64); - config["name"] = name.value_or(""); - config["private"] = isPrivate; - config["remoteTraceLevel"] = remoteTraceLevel.value_or(0); - config["remoteTraceTarget"] = remoteTraceTarget.value_or(""); - config["revision"] = revision.value_or(0); - config["rules"] = json::parse(rules.value_or("[]")); - config["tags"] = json::parse(tags.value_or("[]")); - config["v4AssignMode"] = json::parse(v4AssignMode.value_or("{}")); - config["v6AssignMode"] = json::parse(v6AssignMode.value_or("{}")); - config["ssoEnabled"] = ssoEnabled.value_or(false); - config["objtype"] = "network"; - config["ipAssignmentPools"] = json::array(); - config["routes"] = json::array(); - config["clientId"] = clientId.value_or(""); - config["authorizationEndpoint"] = authorizationEndpoint.value_or(""); - - if (dnsDomain.has_value()) { - std::string serverList = dnsServers.value(); - json obj; - auto servers = json::array(); - if (serverList.rfind("{",0) != std::string::npos) { - serverList = serverList.substr(1, serverList.size()-2); - std::stringstream ss(serverList); - while(ss.good()) { - std::string server; - std::getline(ss, server, ','); - servers.push_back(server); - } - } - obj["domain"] = dnsDomain.value(); - obj["servers"] = servers; - config["dns"] = obj; - } - - config["ipAssignmentPools"] = json::array(); - if (assignmentPoolString != "{}") { - std::string tmp = assignmentPoolString.substr(1, assignmentPoolString.size()-2); - std::vector assignmentPools = split(tmp, ','); - for (auto it = assignmentPools.begin(); it != assignmentPools.end(); ++it) { - std::vector r = split(*it, '|'); - json ip; - ip["ipRangeStart"] = r[0]; - ip["ipRangeEnd"] = r[1]; - config["ipAssignmentPools"].push_back(ip); - } - } - - config["routes"] = json::array(); - if (routesString != "{}") { - std::string tmp = routesString.substr(1, routesString.size()-2); - std::vector routes = split(tmp, ','); - for (auto it = routes.begin(); it != routes.end(); ++it) { - std::vector r = split(*it, '|'); - json route; - route["target"] = r[0]; - route["via"] = ((route["via"] == "NULL")? nullptr : r[1]); - config["routes"].push_back(route); - } - } - - _networkChanged(empty, config, false); - - auto end = std::chrono::high_resolution_clock::now(); - auto dur = std::chrono::duration_cast(end - start);; - total += dur.count(); - ++count; - if (count > 0 && count % 10000 == 0) { - fprintf(stderr, "Averaging %llu us per network\n", (total/count)); - } - } - - if (count > 0) { - fprintf(stderr, "Took %llu us per network to load\n", (total/count)); - } - stream.complete(); - - w.commit(); - _pool->unborrow(c2); - _pool->unborrow(c); - - if (++this->_ready == 2) { - if (_waitNoticePrinted) { - fprintf(stderr,"[%s] NOTICE: %.10llx controller PostgreSQL data download complete." ZT_EOL_S,_timestr(),(unsigned long long)_myAddress.toInt()); - } - _readyLock.unlock(); - } - } catch (sw::redis::Error &e) { - fprintf(stderr, "ERROR: Error initializing networks in Redis: %s\n", e.what()); - exit(-1); - } catch (std::exception &e) { - fprintf(stderr, "ERROR: Error initializing networks: %s\n", e.what()); - exit(-1); - } -} - -void PostgreSQL::initializeMembers() -{ - std::string memberId; - std::string networkId; - try { - std::unordered_map networkMembers; - fprintf(stderr, "Initializing Members...\n"); - - char qbuf[2048]; - sprintf(qbuf, "SELECT m.id, m.network_id, m.active_bridge, m.authorized, m.capabilities, (EXTRACT(EPOCH FROM m.creation_time AT TIME ZONE 'UTC')*1000)::bigint, m.identity, " - " (EXTRACT(EPOCH FROM m.last_authorized_time AT TIME ZONE 'UTC')*1000)::bigint, " - " (EXTRACT(EPOCH FROM m.last_deauthorized_time AT TIME ZONE 'UTC')*1000)::bigint, " - " m.remote_trace_level, m.remote_trace_target, m.tags, m.v_major, m.v_minor, m.v_rev, m.v_proto, " - " m.no_auto_assign_ips, m.revision, sso_exempt, " - " (SELECT (EXTRACT(EPOCH FROM e.authentication_expiry_time)*1000)::bigint " - " FROM ztc_sso_expiry e " - " INNER JOIN ztc_network n1 " - " ON n.id = e.network_id " - " WHERE e.network_id = m.network_id AND e.member_id = m.id AND n.sso_enabled = TRUE AND e.authentication_expiry_time IS NOT NULL " - " ORDER BY e.authentication_expiry_time DESC LIMIT 1) AS authentication_expiry_time, " - " ARRAY(SELECT DISTINCT address FROM ztc_member_ip_assignment WHERE member_id = m.id AND network_id = m.network_id) AS assigned_addresses " - "FROM ztc_member m " - "INNER JOIN ztc_network n " - " ON n.id = m.network_id " - "WHERE n.controller_id = '%s' AND m.deleted = false", _myAddressStr.c_str()); - auto c = _pool->borrow(); - auto c2 = _pool->borrow(); - pqxx::work w{*c->c}; - - auto stream = pqxx::stream_from::query(w, qbuf); - - std::tuple< - std::string // memberId - , std::string // memberId - , std::optional // activeBridge - , std::optional // authorized - , std::optional // capabilities - , std::optional // creationTime - , std::optional // identity - , std::optional // lastAuthorizedTime - , std::optional // lastDeauthorizedTime - , std::optional // remoteTraceLevel - , std::optional // remoteTraceTarget - , std::optional // tags - , std::optional // vMajor - , std::optional // vMinor - , std::optional // vRev - , std::optional // vProto - , std::optional // noAutoAssignIps - , std::optional // revision - , std::optional // ssoExempt - , std::optional // authenticationExpiryTime - , std::string // assignedAddresses - > row; - - uint64_t count = 0; - auto tmp = std::chrono::high_resolution_clock::now(); - uint64_t total = 0; - while (stream >> row) { - auto start = std::chrono::high_resolution_clock::now(); - json empty; - json config; - - initMember(config); - - memberId = std::get<0>(row); - networkId = std::get<1>(row); - std::optional activeBridge = std::get<2>(row); - std::optional authorized = std::get<3>(row); - std::optional capabilities = std::get<4>(row); - std::optional creationTime = std::get<5>(row); - std::optional identity = std::get<6>(row); - std::optional lastAuthorizedTime = std::get<7>(row); - std::optional lastDeauthorizedTime = std::get<8>(row); - std::optional remoteTraceLevel = std::get<9>(row); - std::optional remoteTraceTarget = std::get<10>(row); - std::optional tags = std::get<11>(row); - std::optional vMajor = std::get<12>(row); - std::optional vMinor = std::get<13>(row); - std::optional vRev = std::get<14>(row); - std::optional vProto = std::get<15>(row); - std::optional noAutoAssignIps = std::get<16>(row); - std::optional revision = std::get<17>(row); - std::optional ssoExempt = std::get<18>(row); - std::optional authenticationExpiryTime = std::get<19>(row); - std::string assignedAddresses = std::get<20>(row); - - config["id"] = memberId; - config["address"] = memberId; - config["nwid"] = networkId; - config["activeBridge"] = activeBridge.value_or(false); - config["authorized"] = authorized.value_or(false); - config["capabilities"] = json::parse(capabilities.value_or("[]")); - config["creationTime"] = creationTime.value_or(0); - config["identity"] = identity.value_or(""); - config["lastAuthorizedTime"] = lastAuthorizedTime.value_or(0); - config["lastDeauthorizedTime"] = lastDeauthorizedTime.value_or(0); - config["remoteTraceLevel"] = remoteTraceLevel.value_or(0); - config["remoteTraceTarget"] = remoteTraceTarget.value_or(""); - config["tags"] = json::parse(tags.value_or("[]")); - config["vMajor"] = vMajor.value_or(-1); - config["vMinor"] = vMinor.value_or(-1); - config["vRev"] = vRev.value_or(-1); - config["vProto"] = vProto.value_or(-1); - config["noAutoAssignIps"] = noAutoAssignIps.value_or(false); - config["revision"] = revision.value_or(0); - config["ssoExempt"] = ssoExempt.value_or(false); - config["authenticationExpiryTime"] = authenticationExpiryTime.value_or(0); - config["objtype"] = "member"; - config["ipAssignments"] = json::array(); - - if (assignedAddresses != "{}") { - std::string tmp = assignedAddresses.substr(1, assignedAddresses.size()-2); - std::vector addrs = split(tmp, ','); - for (auto it = addrs.begin(); it != addrs.end(); ++it) { - config["ipAssignments"].push_back(*it); - } - } - - _memberChanged(empty, config, false); - - memberId = ""; - networkId = ""; - - auto end = std::chrono::high_resolution_clock::now(); - auto dur = std::chrono::duration_cast(end - start);; - total += dur.count(); - ++count; - if (count > 0 && count % 10000 == 0) { - fprintf(stderr, "Averaging %llu us per member\n", (total/count)); - } - } - if (count > 0) { - fprintf(stderr, "Took %llu us per member to load\n", (total/count)); - } - - stream.complete(); - - w.commit(); - _pool->unborrow(c2); - _pool->unborrow(c); - - if (++this->_ready == 2) { - if (_waitNoticePrinted) { - fprintf(stderr,"[%s] NOTICE: %.10llx controller PostgreSQL data download complete." ZT_EOL_S,_timestr(),(unsigned long long)_myAddress.toInt()); - } - _readyLock.unlock(); - } - } catch (sw::redis::Error &e) { - fprintf(stderr, "ERROR: Error initializing members (redis): %s\n", e.what()); - } catch (std::exception &e) { - fprintf(stderr, "ERROR: Error initializing member: %s-%s %s\n", networkId.c_str(), memberId.c_str(), e.what()); - exit(-1); - } -} - -void PostgreSQL::heartbeat() -{ - char publicId[1024]; - char hostnameTmp[1024]; - _myId.toString(false,publicId); - if (gethostname(hostnameTmp, sizeof(hostnameTmp))!= 0) { - hostnameTmp[0] = (char)0; - } else { - for (int i = 0; i < (int)sizeof(hostnameTmp); ++i) { - if ((hostnameTmp[i] == '.')||(hostnameTmp[i] == 0)) { - hostnameTmp[i] = (char)0; - break; - } - } - } - const char *controllerId = _myAddressStr.c_str(); - const char *publicIdentity = publicId; - const char *hostname = hostnameTmp; - - while (_run == 1) { - // fprintf(stderr, "%s: heartbeat\n", controllerId); - auto c = _pool->borrow(); - int64_t ts = OSUtils::now(); - - if(c->c) { - pqxx::work w{*c->c}; - - std::string major = std::to_string(ZEROTIER_ONE_VERSION_MAJOR); - std::string minor = std::to_string(ZEROTIER_ONE_VERSION_MINOR); - std::string rev = std::to_string(ZEROTIER_ONE_VERSION_REVISION); - std::string build = std::to_string(ZEROTIER_ONE_VERSION_BUILD); - std::string now = std::to_string(ts); - std::string host_port = std::to_string(_listenPort); - std::string use_redis = (_rc != NULL) ? "true" : "false"; - - try { - pqxx::result res = w.exec0("INSERT INTO ztc_controller (id, cluster_host, last_alive, public_identity, v_major, v_minor, v_rev, v_build, host_port, use_redis) " - "VALUES ("+w.quote(controllerId)+", "+w.quote(hostname)+", TO_TIMESTAMP("+now+"::double precision/1000), "+ - w.quote(publicIdentity)+", "+major+", "+minor+", "+rev+", "+build+", "+host_port+", "+use_redis+") " - "ON CONFLICT (id) DO UPDATE SET cluster_host = EXCLUDED.cluster_host, last_alive = EXCLUDED.last_alive, " - "public_identity = EXCLUDED.public_identity, v_major = EXCLUDED.v_major, v_minor = EXCLUDED.v_minor, " - "v_rev = EXCLUDED.v_rev, v_build = EXCLUDED.v_rev, host_port = EXCLUDED.host_port, " - "use_redis = EXCLUDED.use_redis"); - } catch (std::exception &e) { - fprintf(stderr, "Heartbeat update failed: %s\n", e.what()); - w.abort(); - _pool->unborrow(c); - std::this_thread::sleep_for(std::chrono::milliseconds(1000)); - continue; - } - w.commit(); - } - _pool->unborrow(c); - - std::this_thread::sleep_for(std::chrono::milliseconds(1000)); - } - fprintf(stderr, "Exited heartbeat thread\n"); -} - -void PostgreSQL::membersDbWatcher() -{ - if (_rc) { - _membersWatcher_Redis(); - } else { - _membersWatcher_Postgres(); - } - - if (_run == 1) { - fprintf(stderr, "ERROR: %s membersDbWatcher should still be running! Exiting Controller.\n", _myAddressStr.c_str()); - exit(9); - } - fprintf(stderr, "Exited membersDbWatcher\n"); -} - -void PostgreSQL::_membersWatcher_Postgres() { - auto c = _pool->borrow(); - - std::string stream = "member_" + _myAddressStr; - - fprintf(stderr, "Listening to member stream: %s\n", stream.c_str()); - MemberNotificationReceiver m(this, *c->c, stream); - - while(_run == 1) { - c->c->await_notification(5, 0); - } - - _pool->unborrow(c); -} - -void PostgreSQL::_membersWatcher_Redis() { - char buf[11] = {0}; - std::string key = "member-stream:{" + std::string(_myAddress.toString(buf)) + "}"; - fprintf(stderr, "Listening to member stream: %s\n", key.c_str()); - while (_run == 1) { - try { - json tmp; - std::unordered_map result; - if (_rc->clusterMode) { - _cluster->xread(key, "$", std::chrono::seconds(1), 0, std::inserter(result, result.end())); - } else { - _redis->xread(key, "$", std::chrono::seconds(1), 0, std::inserter(result, result.end())); - } - if (!result.empty()) { - for (auto element : result) { - #ifdef ZT_TRACE - fprintf(stdout, "Received notification from: %s\n", element.first.c_str()); - #endif - for (auto rec : element.second) { - std::string id = rec.first; - auto attrs = rec.second; - #ifdef ZT_TRACE - fprintf(stdout, "Record ID: %s\n", id.c_str()); - fprintf(stdout, "attrs len: %lu\n", attrs.size()); - #endif - for (auto a : attrs) { - #ifdef ZT_TRACE - fprintf(stdout, "key: %s\nvalue: %s\n", a.first.c_str(), a.second.c_str()); - #endif - try { - tmp = json::parse(a.second); - json &ov = tmp["old_val"]; - json &nv = tmp["new_val"]; - json oldConfig, newConfig; - if (ov.is_object()) oldConfig = ov; - if (nv.is_object()) newConfig = nv; - if (oldConfig.is_object()||newConfig.is_object()) { - _memberChanged(oldConfig,newConfig,(this->_ready >= 2)); - } - } catch (...) { - fprintf(stderr, "json parse error in networkWatcher_Redis\n"); - } - } - if (_rc->clusterMode) { - _cluster->xdel(key, id); - } else { - _redis->xdel(key, id); - } - } - } - } - } catch (sw::redis::Error &e) { - fprintf(stderr, "Error in Redis members watcher: %s\n", e.what()); - } - } - fprintf(stderr, "membersWatcher ended\n"); -} - -void PostgreSQL::networksDbWatcher() -{ - if (_rc) { - _networksWatcher_Redis(); - } else { - _networksWatcher_Postgres(); - } - - if (_run == 1) { - fprintf(stderr, "ERROR: %s networksDbWatcher should still be running! Exiting Controller.\n", _myAddressStr.c_str()); - exit(8); - } - fprintf(stderr, "Exited networksDbWatcher\n"); -} - -void PostgreSQL::_networksWatcher_Postgres() { - std::string stream = "network_" + _myAddressStr; - - fprintf(stderr, "Listening to member stream: %s\n", stream.c_str()); - - auto c = _pool->borrow(); - - NetworkNotificationReceiver n(this, *c->c, stream); - - while(_run == 1) { - c->c->await_notification(5,0); - } -} - -void PostgreSQL::_networksWatcher_Redis() { - char buf[11] = {0}; - std::string key = "network-stream:{" + std::string(_myAddress.toString(buf)) + "}"; - - while (_run == 1) { - try { - json tmp; - std::unordered_map result; - if (_rc->clusterMode) { - _cluster->xread(key, "$", std::chrono::seconds(1), 0, std::inserter(result, result.end())); - } else { - _redis->xread(key, "$", std::chrono::seconds(1), 0, std::inserter(result, result.end())); - } - - if (!result.empty()) { - for (auto element : result) { -#ifdef ZT_TRACE - fprintf(stdout, "Received notification from: %s\n", element.first.c_str()); -#endif - for (auto rec : element.second) { - std::string id = rec.first; - auto attrs = rec.second; -#ifdef ZT_TRACE - fprintf(stdout, "Record ID: %s\n", id.c_str()); - fprintf(stdout, "attrs len: %lu\n", attrs.size()); -#endif - for (auto a : attrs) { -#ifdef ZT_TRACE - fprintf(stdout, "key: %s\nvalue: %s\n", a.first.c_str(), a.second.c_str()); -#endif - try { - tmp = json::parse(a.second); - json &ov = tmp["old_val"]; - json &nv = tmp["new_val"]; - json oldConfig, newConfig; - if (ov.is_object()) oldConfig = ov; - if (nv.is_object()) newConfig = nv; - if (oldConfig.is_object()||newConfig.is_object()) { - _networkChanged(oldConfig,newConfig,(this->_ready >= 2)); - } - } catch (...) { - fprintf(stderr, "json parse error in networkWatcher_Redis\n"); - } - } - if (_rc->clusterMode) { - _cluster->xdel(key, id); - } else { - _redis->xdel(key, id); - } - } - } - } - } catch (sw::redis::Error &e) { - fprintf(stderr, "Error in Redis networks watcher: %s\n", e.what()); - } - } - fprintf(stderr, "networksWatcher ended\n"); -} - -void PostgreSQL::commitThread() -{ - fprintf(stderr, "%s: commitThread start\n", _myAddressStr.c_str()); - std::pair qitem; - while(_commitQueue.get(qitem)&(_run == 1)) { - //fprintf(stderr, "commitThread tick\n"); - if (!qitem.first.is_object()) { - fprintf(stderr, "not an object\n"); - continue; - } - - std::shared_ptr c; - try { - c = _pool->borrow(); - } catch (std::exception &e) { - fprintf(stderr, "ERROR: %s\n", e.what()); - continue; - } - - if (!c) { - fprintf(stderr, "Error getting database connection\n"); - continue; - } - - try { - nlohmann::json &config = (qitem.first); - const std::string objtype = config["objtype"]; - if (objtype == "member") { - // fprintf(stderr, "%s: commitThread: member\n", _myAddressStr.c_str()); - std::string memberId; - std::string networkId; - try { - pqxx::work w(*c->c); - - memberId = config["id"]; - networkId = config["nwid"]; - - std::string target = "NULL"; - if (!config["remoteTraceTarget"].is_null()) { - target = config["remoteTraceTarget"]; - } - - pqxx::row nwrow = w.exec_params1("SELECT COUNT(id) FROM ztc_network WHERE id = $1", networkId); - int nwcount = nwrow[0].as(); - - if (nwcount != 1) { - fprintf(stderr, "network %s does not exist. skipping member upsert\n", networkId.c_str()); - w.abort(); - _pool->unborrow(c); - continue; - } - - pqxx::result res = w.exec_params0( - "INSERT INTO ztc_member (id, network_id, active_bridge, authorized, capabilities, " - "identity, last_authorized_time, last_deauthorized_time, no_auto_assign_ips, " - "remote_trace_level, remote_trace_target, revision, tags, v_major, v_minor, v_rev, v_proto) " - "VALUES ($1, $2, $3, $4, $5, $6, " - "TO_TIMESTAMP($7::double precision/1000), TO_TIMESTAMP($8::double precision/1000), " - "$9, $10, $11, $12, $13, $14, $15, $16, $17) ON CONFLICT (network_id, id) DO UPDATE SET " - "active_bridge = EXCLUDED.active_bridge, authorized = EXCLUDED.authorized, capabilities = EXCLUDED.capabilities, " - "identity = EXCLUDED.identity, last_authorized_time = EXCLUDED.last_authorized_time, " - "last_deauthorized_time = EXCLUDED.last_deauthorized_time, no_auto_assign_ips = EXCLUDED.no_auto_assign_ips, " - "remote_trace_level = EXCLUDED.remote_trace_level, remote_trace_target = EXCLUDED.remote_trace_target, " - "revision = EXCLUDED.revision+1, tags = EXCLUDED.tags, v_major = EXCLUDED.v_major, " - "v_minor = EXCLUDED.v_minor, v_rev = EXCLUDED.v_rev, v_proto = EXCLUDED.v_proto", - memberId, - networkId, - (bool)config["activeBridge"], - (bool)config["authorized"], - OSUtils::jsonDump(config["capabilities"], -1), - OSUtils::jsonString(config["identity"], ""), - (uint64_t)config["lastAuthorizedTime"], - (uint64_t)config["lastDeauthorizedTime"], - (bool)config["noAutoAssignIps"], - (int)config["remoteTraceLevel"], - target, - (uint64_t)config["revision"], - OSUtils::jsonDump(config["tags"], -1), - (int)config["vMajor"], - (int)config["vMinor"], - (int)config["vRev"], - (int)config["vProto"]); - - - res = w.exec_params0("DELETE FROM ztc_member_ip_assignment WHERE member_id = $1 AND network_id = $2", - memberId, networkId); - - std::vector assignments; - bool ipAssignError = false; - for (auto i = config["ipAssignments"].begin(); i != config["ipAssignments"].end(); ++i) { - std::string addr = *i; - - if (std::find(assignments.begin(), assignments.end(), addr) != assignments.end()) { - continue; - } - - res = w.exec_params0( - "INSERT INTO ztc_member_ip_assignment (member_id, network_id, address) VALUES ($1, $2, $3) ON CONFLICT (network_id, member_id, address) DO NOTHING", - memberId, networkId, addr); - - assignments.push_back(addr); - } - if (ipAssignError) { - fprintf(stderr, "%s: ipAssignError\n", _myAddressStr.c_str()); - w.abort(); - _pool->unborrow(c); - c.reset(); - continue; - } - - w.commit(); - - const uint64_t nwidInt = OSUtils::jsonIntHex(config["nwid"], 0ULL); - const uint64_t memberidInt = OSUtils::jsonIntHex(config["id"], 0ULL); - if (nwidInt && memberidInt) { - nlohmann::json nwOrig; - nlohmann::json memOrig; - - nlohmann::json memNew(config); - - get(nwidInt, nwOrig, memberidInt, memOrig); - - _memberChanged(memOrig, memNew, qitem.second); - } else { - fprintf(stderr, "%s: Can't notify of change. Error parsing nwid or memberid: %llu-%llu\n", _myAddressStr.c_str(), (unsigned long long)nwidInt, (unsigned long long)memberidInt); - } - } catch (std::exception &e) { - fprintf(stderr, "%s ERROR: Error updating member %s-%s: %s\n", _myAddressStr.c_str(), networkId.c_str(), memberId.c_str(), e.what()); - } - } else if (objtype == "network") { - try { - // fprintf(stderr, "%s: commitThread: network\n", _myAddressStr.c_str()); - pqxx::work w(*c->c); - - std::string id = config["id"]; - std::string remoteTraceTarget = ""; - if(!config["remoteTraceTarget"].is_null()) { - remoteTraceTarget = config["remoteTraceTarget"]; - } - std::string rulesSource = ""; - if (config["rulesSource"].is_string()) { - rulesSource = config["rulesSource"]; - } - - // This ugly query exists because when we want to mirror networks to/from - // another data store (e.g. FileDB or LFDB) it is possible to get a network - // that doesn't exist in Central's database. This does an upsert and sets - // the owner_id to the "first" global admin in the user DB if the record - // did not previously exist. If the record already exists owner_id is left - // unchanged, so owner_id should be left out of the update clause. - pqxx::result res = w.exec_params0( - "INSERT INTO ztc_network (id, creation_time, owner_id, controller_id, capabilities, enable_broadcast, " - "last_modified, mtu, multicast_limit, name, private, " - "remote_trace_level, remote_trace_target, rules, rules_source, " - "tags, v4_assign_mode, v6_assign_mode, sso_enabled) VALUES (" - "$1, TO_TIMESTAMP($5::double precision/1000), " - "(SELECT user_id AS owner_id FROM ztc_global_permissions WHERE authorize = true AND del = true AND modify = true AND read = true LIMIT 1)," - "$2, $3, $4, TO_TIMESTAMP($5::double precision/1000), " - "$6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, 17) " - "ON CONFLICT (id) DO UPDATE set controller_id = EXCLUDED.controller_id, " - "capabilities = EXCLUDED.capabilities, enable_broadcast = EXCLUDED.enable_broadcast, " - "last_modified = EXCLUDED.last_modified, mtu = EXCLUDED.mtu, " - "multicast_limit = EXCLUDED.multicast_limit, name = EXCLUDED.name, " - "private = EXCLUDED.private, remote_trace_level = EXCLUDED.remote_trace_level, " - "remote_trace_target = EXCLUDED.remote_trace_target, rules = EXCLUDED.rules, " - "rules_source = EXCLUDED.rules_source, tags = EXCLUDED.tags, " - "v4_assign_mode = EXCLUDED.v4_assign_mode, v6_assign_mode = EXCLUDED.v6_assign_mode, " - "sso_enabled = EXCLUDED.sso_enabled", - id, - _myAddressStr, - OSUtils::jsonDump(config["capabilitles"], -1), - (bool)config["enableBroadcast"], - OSUtils::now(), - (int)config["mtu"], - (int)config["multicastLimit"], - OSUtils::jsonString(config["name"],""), - (bool)config["private"], - (int)config["remoteTraceLevel"], - remoteTraceTarget, - OSUtils::jsonDump(config["rules"], -1), - rulesSource, - OSUtils::jsonDump(config["tags"], -1), - OSUtils::jsonDump(config["v4AssignMode"],-1), - OSUtils::jsonDump(config["v6AssignMode"], -1), - OSUtils::jsonBool(config["ssoEnabled"], false)); - - res = w.exec_params0("DELETE FROM ztc_network_assignment_pool WHERE network_id = $1", 0); - - auto pool = config["ipAssignmentPools"]; - bool err = false; - for (auto i = pool.begin(); i != pool.end(); ++i) { - std::string start = (*i)["ipRangeStart"]; - std::string end = (*i)["ipRangeEnd"]; - - res = w.exec_params0( - "INSERT INTO ztc_network_assignment_pool (network_id, ip_range_start, ip_range_end) " - "VALUES ($1, $2, $3)", id, start, end); - } - - res = w.exec_params0("DELETE FROM ztc_network_route WHERE network_id = $1", id); - - auto routes = config["routes"]; - err = false; - for (auto i = routes.begin(); i != routes.end(); ++i) { - std::string t = (*i)["target"]; - std::vector target; - std::istringstream f(t); - std::string s; - while(std::getline(f, s, '/')) { - target.push_back(s); - } - if (target.empty() || target.size() != 2) { - continue; - } - std::string targetAddr = target[0]; - std::string targetBits = target[1]; - std::string via = "NULL"; - if (!(*i)["via"].is_null()) { - via = (*i)["via"]; - } - - res = w.exec_params0("INSERT INTO ztc_network_route (network_id, address, bits, via) VALUES ($1, $2, $3, $4)", - id, targetAddr, targetBits, (via == "NULL" ? NULL : via.c_str())); - } - if (err) { - fprintf(stderr, "%s: route add error\n", _myAddressStr.c_str()); - w.abort(); - _pool->unborrow(c); - continue; - } - - auto dns = config["dns"]; - std::string domain = dns["domain"]; - std::stringstream servers; - servers << "{"; - for (auto j = dns["servers"].begin(); j < dns["servers"].end(); ++j) { - servers << *j; - if ( (j+1) != dns["servers"].end()) { - servers << ","; - } - } - servers << "}"; - - std::string s = servers.str(); - - res = w.exec_params0("INSERT INTO ztc_network_dns (network_id, domain, servers) VALUES ($1, $2, $3) ON CONFLICT (network_id) DO UPDATE SET domain = EXCLUDED.domain, servers = EXCLUDED.servers", - id, domain, s); - - w.commit(); - - const uint64_t nwidInt = OSUtils::jsonIntHex(config["nwid"], 0ULL); - if (nwidInt) { - nlohmann::json nwOrig; - nlohmann::json nwNew(config); - - get(nwidInt, nwOrig); - - _networkChanged(nwOrig, nwNew, qitem.second); - } else { - fprintf(stderr, "%s: Can't notify network changed: %llu\n", _myAddressStr.c_str(), (unsigned long long)nwidInt); - } - } catch (std::exception &e) { - fprintf(stderr, "%s ERROR: Error updating network: %s\n", _myAddressStr.c_str(), e.what()); - } - } else if (objtype == "_delete_network") { - // fprintf(stderr, "%s: commitThread: delete network\n", _myAddressStr.c_str()); - try { - pqxx::work w(*c->c); - - std::string networkId = config["nwid"]; - - pqxx::result res = w.exec_params0("UPDATE ztc_network SET deleted = true WHERE id = $1", - networkId); - - w.commit(); - } catch (std::exception &e) { - fprintf(stderr, "%s ERROR: Error deleting network: %s\n", _myAddressStr.c_str(), e.what()); - } - - } else if (objtype == "_delete_member") { - // fprintf(stderr, "%s commitThread: delete member\n", _myAddressStr.c_str()); - try { - pqxx::work w(*c->c); - - std::string memberId = config["id"]; - std::string networkId = config["nwid"]; - - pqxx::result res = w.exec_params0( - "UPDATE ztc_member SET hidden = true, deleted = true WHERE id = $1 AND network_id = $2", - memberId, networkId); - - w.commit(); - } catch (std::exception &e) { - fprintf(stderr, "%s ERROR: Error deleting member: %s\n", _myAddressStr.c_str(), e.what()); - } - } else { - fprintf(stderr, "%s ERROR: unknown objtype\n", _myAddressStr.c_str()); - } - } catch (std::exception &e) { - fprintf(stderr, "%s ERROR: Error getting objtype: %s\n", _myAddressStr.c_str(), e.what()); - } - _pool->unborrow(c); - c.reset(); - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - } - - fprintf(stderr, "%s commitThread finished\n", _myAddressStr.c_str()); -} - -void PostgreSQL::onlineNotificationThread() -{ - waitForReady(); - onlineNotification_Postgres(); -} - -void PostgreSQL::onlineNotification_Postgres() -{ - _connected = 1; - - nlohmann::json jtmp1, jtmp2; - while (_run == 1) { - auto c = _pool->borrow(); - auto c2 = _pool->borrow(); - try { - fprintf(stderr, "%s onlineNotification_Postgres\n", _myAddressStr.c_str()); - std::unordered_map< std::pair,std::pair,_PairHasher > lastOnline; - { - std::lock_guard l(_lastOnline_l); - lastOnline.swap(_lastOnline); - } - - pqxx::work w(*c->c); - pqxx::work w2(*c2->c); - - fprintf(stderr, "online notification tick\n"); - - bool firstRun = true; - bool memberAdded = false; - int updateCount = 0; - - pqxx::pipeline pipe(w); - - for (auto i=lastOnline.begin(); i != lastOnline.end(); ++i) { - updateCount += 1; - uint64_t nwid_i = i->first.first; - char nwidTmp[64]; - char memTmp[64]; - char ipTmp[64]; - OSUtils::ztsnprintf(nwidTmp,sizeof(nwidTmp), "%.16llx", nwid_i); - OSUtils::ztsnprintf(memTmp,sizeof(memTmp), "%.10llx", i->first.second); - - if(!get(nwid_i, jtmp1, i->first.second, jtmp2)) { - continue; // skip non existent networks/members - } - - std::string networkId(nwidTmp); - std::string memberId(memTmp); - - try { - pqxx::row r = w2.exec_params1("SELECT id, network_id FROM ztc_member WHERE network_id = $1 AND id = $2", - networkId, memberId); - } catch (pqxx::unexpected_rows &e) { - continue; - } - - int64_t ts = i->second.first; - std::string ipAddr = i->second.second.toIpString(ipTmp); - std::string timestamp = std::to_string(ts); - - std::stringstream memberUpdate; - memberUpdate << "INSERT INTO ztc_member_status (network_id, member_id, address, last_updated) VALUES " - << "('" << networkId << "', '" << memberId << "', "; - if (ipAddr.empty()) { - memberUpdate << "NULL, "; - } else { - memberUpdate << "'" << ipAddr << "', "; - } - memberUpdate << "TO_TIMESTAMP(" << timestamp << "::double precision/1000)) " - << " ON CONFLICT (network_id, member_id) DO UPDATE SET address = EXCLUDED.address, last_updated = EXCLUDED.last_updated"; - - pipe.insert(memberUpdate.str()); - } - while(!pipe.empty()) { - pipe.retrieve(); - } - - pipe.complete(); - w.commit(); - fprintf(stderr, "%s: Updated online status of %d members\n", _myAddressStr.c_str(), updateCount); - } catch (std::exception &e) { - fprintf(stderr, "%s: error in onlinenotification thread: %s\n", _myAddressStr.c_str(), e.what()); - } - _pool->unborrow(c2); - _pool->unborrow(c); - - ConnectionPoolStats stats = _pool->get_stats(); - fprintf(stderr, "%s pool stats: in use size: %llu, available size: %llu, total: %llu\n", - _myAddressStr.c_str(), stats.borrowed_size, stats.pool_size, (stats.borrowed_size + stats.pool_size)); - - std::this_thread::sleep_for(std::chrono::seconds(10)); - } - fprintf(stderr, "%s: Fell out of run loop in onlineNotificationThread\n", _myAddressStr.c_str()); - if (_run == 1) { - fprintf(stderr, "ERROR: %s onlineNotificationThread should still be running! Exiting Controller.\n", _myAddressStr.c_str()); - exit(6); - } -} - -void PostgreSQL::onlineNotification_Redis() -{ - _connected = 1; - - char buf[11] = {0}; - std::string controllerId = std::string(_myAddress.toString(buf)); - - while (_run == 1) { - std::unordered_map< std::pair,std::pair,_PairHasher > lastOnline; - { - std::lock_guard l(_lastOnline_l); - lastOnline.swap(_lastOnline); - } - try { - if (!lastOnline.empty()) { - if (_rc->clusterMode) { - auto tx = _cluster->transaction(controllerId, true); - _doRedisUpdate(tx, controllerId, lastOnline); - } else { - auto tx = _redis->transaction(true); - _doRedisUpdate(tx, controllerId, lastOnline); - } - } - } catch (sw::redis::Error &e) { -#ifdef ZT_TRACE - fprintf(stderr, "Error in online notification thread (redis): %s\n", e.what()); -#endif - } - std::this_thread::sleep_for(std::chrono::seconds(10)); - } -} - -void PostgreSQL::_doRedisUpdate(sw::redis::Transaction &tx, std::string &controllerId, - std::unordered_map< std::pair,std::pair,_PairHasher > &lastOnline) - -{ - nlohmann::json jtmp1, jtmp2; - for (auto i=lastOnline.begin(); i != lastOnline.end(); ++i) { - uint64_t nwid_i = i->first.first; - uint64_t memberid_i = i->first.second; - char nwidTmp[64]; - char memTmp[64]; - char ipTmp[64]; - OSUtils::ztsnprintf(nwidTmp,sizeof(nwidTmp), "%.16llx", nwid_i); - OSUtils::ztsnprintf(memTmp,sizeof(memTmp), "%.10llx", memberid_i); - - if (!get(nwid_i, jtmp1, memberid_i, jtmp2)){ - continue; // skip non existent members/networks - } - - std::string networkId(nwidTmp); - std::string memberId(memTmp); - - int64_t ts = i->second.first; - std::string ipAddr = i->second.second.toIpString(ipTmp); - std::string timestamp = std::to_string(ts); - - std::unordered_map record = { - {"id", memberId}, - {"address", ipAddr}, - {"last_updated", std::to_string(ts)} - }; - tx.zadd("nodes-online:{"+controllerId+"}", memberId, ts) - .zadd("nodes-online2:{"+controllerId+"}", networkId+"-"+memberId, ts) - .zadd("network-nodes-online:{"+controllerId+"}:"+networkId, memberId, ts) - .zadd("active-networks:{"+controllerId+"}", networkId, ts) - .sadd("network-nodes-all:{"+controllerId+"}:"+networkId, memberId) - .hmset("member:{"+controllerId+"}:"+networkId+":"+memberId, record.begin(), record.end()); - } - - // expire records from all-nodes and network-nodes member list - uint64_t expireOld = OSUtils::now() - 300000; - - tx.zremrangebyscore("nodes-online:{"+controllerId+"}", sw::redis::RightBoundedInterval(expireOld, sw::redis::BoundType::LEFT_OPEN)); - tx.zremrangebyscore("nodes-online2:{"+controllerId+"}", sw::redis::RightBoundedInterval(expireOld, sw::redis::BoundType::LEFT_OPEN)); - tx.zremrangebyscore("active-networks:{"+controllerId+"}", sw::redis::RightBoundedInterval(expireOld, sw::redis::BoundType::LEFT_OPEN)); - { - std::lock_guard l(_networks_l); - for (const auto &it : _networks) { - uint64_t nwid_i = it.first; - char nwidTmp[64]; - OSUtils::ztsnprintf(nwidTmp,sizeof(nwidTmp), "%.16llx", nwid_i); - tx.zremrangebyscore("network-nodes-online:{"+controllerId+"}:"+nwidTmp, - sw::redis::RightBoundedInterval(expireOld, sw::redis::BoundType::LEFT_OPEN)); - } - } - tx.exec(); -} - - -#endif //ZT_CONTROLLER_USE_LIBPQ +#endif \ No newline at end of file diff --git a/controller/PostgreSQL.hpp b/controller/PostgreSQL.hpp index acbc65a67..41f36e3c5 100644 --- a/controller/PostgreSQL.hpp +++ b/controller/PostgreSQL.hpp @@ -1,183 +1,195 @@ /* - * Copyright (c)2019 ZeroTier, Inc. + * Copyright (c)2025 ZeroTier, Inc. * * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file in the project's root directory. * - * Change Date: 2025-01-01 + * Change Date: 2026-01-01 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2.0 of the Apache License. */ /****/ -#include "DB.hpp" - #ifdef ZT_CONTROLLER_USE_LIBPQ -#ifndef ZT_CONTROLLER_LIBPQ_HPP -#define ZT_CONTROLLER_LIBPQ_HPP - -#define ZT_CENTRAL_CONTROLLER_COMMIT_THREADS 4 +#ifndef ZT_CONTROLLER_POSTGRESQL_HPP +#define ZT_CONTROLLER_POSTGRESQL_HPP #include "ConnectionPool.hpp" -#include +#include "DB.hpp" +#include "opentelemetry/trace/provider.h" #include -#include +#include +#include + +namespace ZeroTier { extern "C" { typedef struct pg_conn PGconn; } -namespace ZeroTier { - -struct RedisConfig; - - class PostgresConnection : public Connection { -public: - virtual ~PostgresConnection() { + public: + virtual ~PostgresConnection() + { } std::shared_ptr c; int a; }; - class PostgresConnFactory : public ConnectionFactory { -public: - PostgresConnFactory(std::string &connString) - : m_connString(connString) + public: + PostgresConnFactory(std::string& connString) : m_connString(connString) { } - virtual std::shared_ptr create() { + virtual std::shared_ptr create() + { + Metrics::conn_counter++; auto c = std::shared_ptr(new PostgresConnection()); c->c = std::make_shared(m_connString); return std::static_pointer_cast(c); } -private: + + private: std::string m_connString; }; -class PostgreSQL; +template class MemberNotificationReceiver : public pqxx::notification_receiver { + public: + MemberNotificationReceiver(T* p, pqxx::connection& c, const std::string& channel) : pqxx::notification_receiver(c, channel), _psql(p) + { + fprintf(stderr, "initialize MemberNotificationReceiver\n"); + } -class MemberNotificationReceiver : public pqxx::notification_receiver { -public: - MemberNotificationReceiver(PostgreSQL *p, pqxx::connection &c, const std::string &channel); - virtual ~MemberNotificationReceiver() { + virtual ~MemberNotificationReceiver() + { fprintf(stderr, "MemberNotificationReceiver destroyed\n"); } - virtual void operator() (const std::string &payload, int backendPid); -private: - PostgreSQL *_psql; + virtual void operator()(const std::string& payload, int backendPid) + { + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_member_notification"); + auto span = tracer->StartSpan("db_member_notification::operator()"); + auto scope = tracer->WithActiveSpan(span); + span->SetAttribute("payload", payload); + span->SetAttribute("psqlReady", _psql->isReady()); + + fprintf(stderr, "Member Notification received: %s\n", payload.c_str()); + Metrics::pgsql_mem_notification++; + nlohmann::json tmp(nlohmann::json::parse(payload)); + nlohmann::json& ov = tmp["old_val"]; + nlohmann::json& nv = tmp["new_val"]; + nlohmann::json oldConfig, newConfig; + if (ov.is_object()) + oldConfig = ov; + if (nv.is_object()) + newConfig = nv; + + if (oldConfig.is_object() && newConfig.is_object()) { + _psql->save(newConfig, _psql->isReady()); + fprintf(stderr, "payload sent\n"); + } + else if (newConfig.is_object() && ! oldConfig.is_object()) { + // new member + Metrics::member_count++; + _psql->save(newConfig, _psql->isReady()); + fprintf(stderr, "new member payload sent\n"); + } + else if (! newConfig.is_object() && oldConfig.is_object()) { + // member delete + uint64_t networkId = OSUtils::jsonIntHex(oldConfig["nwid"], 0ULL); + uint64_t memberId = OSUtils::jsonIntHex(oldConfig["id"], 0ULL); + if (memberId && networkId) { + _psql->eraseMember(networkId, memberId); + fprintf(stderr, "member delete payload sent\n"); + } + } + } + + private: + T* _psql; }; -class NetworkNotificationReceiver : public pqxx::notification_receiver { -public: - NetworkNotificationReceiver(PostgreSQL *p, pqxx::connection &c, const std::string &channel); - virtual ~NetworkNotificationReceiver() { +template class NetworkNotificationReceiver : public pqxx::notification_receiver { + public: + NetworkNotificationReceiver(T* p, pqxx::connection& c, const std::string& channel) : pqxx::notification_receiver(c, channel), _psql(p) + { + fprintf(stderr, "initialize NetworkrNotificationReceiver\n"); + } + + virtual ~NetworkNotificationReceiver() + { fprintf(stderr, "NetworkNotificationReceiver destroyed\n"); }; - virtual void operator() (const std::string &payload, int packend_pid); -private: - PostgreSQL *_psql; -}; - -/** - * A controller database driver that talks to PostgreSQL - * - * This is for use with ZeroTier Central. Others are free to build and use it - * but be aware that we might change it at any time. - */ -class PostgreSQL : public DB -{ - friend class MemberNotificationReceiver; - friend class NetworkNotificationReceiver; -public: - PostgreSQL(const Identity &myId, const char *path, int listenPort, RedisConfig *rc); - virtual ~PostgreSQL(); - - virtual bool waitForReady(); - virtual bool isReady(); - virtual bool save(nlohmann::json &record,bool notifyListeners); - virtual void eraseNetwork(const uint64_t networkId); - virtual void eraseMember(const uint64_t networkId, const uint64_t memberId); - virtual void nodeIsOnline(const uint64_t networkId, const uint64_t memberId, const InetAddress &physicalAddress); - virtual AuthInfo getSSOAuthInfo(const nlohmann::json &member, const std::string &redirectURL); - -protected: - struct _PairHasher + virtual void operator()(const std::string& payload, int packend_pid) { - inline std::size_t operator()(const std::pair &p) const { return (std::size_t)(p.first ^ p.second); } - }; - virtual void _memberChanged(nlohmann::json &old,nlohmann::json &memberConfig,bool notifyListeners) { - DB::_memberChanged(old, memberConfig, notifyListeners); + auto provider = opentelemetry::trace::Provider::GetTracerProvider(); + auto tracer = provider->GetTracer("db_network_notification"); + auto span = tracer->StartSpan("db_network_notification::operator()"); + auto scope = tracer->WithActiveSpan(span); + span->SetAttribute("payload", payload); + span->SetAttribute("psqlReady", _psql->isReady()); + + fprintf(stderr, "Network Notification received: %s\n", payload.c_str()); + Metrics::pgsql_net_notification++; + nlohmann::json tmp(nlohmann::json::parse(payload)); + + nlohmann::json& ov = tmp["old_val"]; + nlohmann::json& nv = tmp["new_val"]; + nlohmann::json oldConfig, newConfig; + + if (ov.is_object()) + oldConfig = ov; + if (nv.is_object()) + newConfig = nv; + + if (oldConfig.is_object() && newConfig.is_object()) { + std::string nwid = oldConfig["id"]; + span->SetAttribute("action", "network_change"); + span->SetAttribute("network_id", nwid); + _psql->save(newConfig, _psql->isReady()); + fprintf(stderr, "payload sent\n"); + } + else if (newConfig.is_object() && ! oldConfig.is_object()) { + std::string nwid = newConfig["id"]; + span->SetAttribute("network_id", nwid); + span->SetAttribute("action", "new_network"); + // new network + _psql->save(newConfig, _psql->isReady()); + fprintf(stderr, "new network payload sent\n"); + } + else if (! newConfig.is_object() && oldConfig.is_object()) { + // network delete + span->SetAttribute("action", "delete_network"); + std::string nwid = oldConfig["id"]; + span->SetAttribute("network_id", nwid); + uint64_t networkId = Utils::hexStrToU64(nwid.c_str()); + span->SetAttribute("network_id_int", networkId); + if (networkId) { + _psql->eraseNetwork(networkId); + fprintf(stderr, "network delete payload sent\n"); + } + } } - virtual void _networkChanged(nlohmann::json &old,nlohmann::json &networkConfig,bool notifyListeners) { - DB::_networkChanged(old, networkConfig, notifyListeners); - } - -private: - void initializeNetworks(); - void initializeMembers(); - void heartbeat(); - void membersDbWatcher(); - void _membersWatcher_Postgres(); - void networksDbWatcher(); - void _networksWatcher_Postgres(); - - void _membersWatcher_Redis(); - void _networksWatcher_Redis(); - - void commitThread(); - void onlineNotificationThread(); - void onlineNotification_Postgres(); - void onlineNotification_Redis(); - void _doRedisUpdate(sw::redis::Transaction &tx, std::string &controllerId, - std::unordered_map< std::pair,std::pair,_PairHasher > &lastOnline); - - enum OverrideMode { - ALLOW_PGBOUNCER_OVERRIDE = 0, - NO_OVERRIDE = 1 - }; - - std::shared_ptr > _pool; - - const Identity _myId; - const Address _myAddress; - std::string _myAddressStr; - std::string _connString; - - BlockingQueue< std::pair > _commitQueue; - - std::thread _heartbeatThread; - std::thread _membersDbWatcher; - std::thread _networksDbWatcher; - std::thread _commitThread[ZT_CENTRAL_CONTROLLER_COMMIT_THREADS]; - std::thread _onlineNotificationThread; - - std::unordered_map< std::pair,std::pair,_PairHasher > _lastOnline; - - mutable std::mutex _lastOnline_l; - mutable std::mutex _readyLock; - std::atomic _ready, _connected, _run; - mutable volatile bool _waitNoticePrinted; - - int _listenPort; - uint8_t _ssoPsk[48]; - - RedisConfig *_rc; - std::shared_ptr _redis; - std::shared_ptr _cluster; + private: + T* _psql; }; -} // namespace ZeroTier +struct NodeOnlineRecord { + uint64_t lastSeen; + InetAddress physicalAddress; + std::string osArch; +}; -#endif // ZT_CONTROLLER_LIBPQ_HPP +} // namespace ZeroTier -#endif // ZT_CONTROLLER_USE_LIBPQ +#endif // ZT_CONTROLLER_POSTGRESQL_HPP + +#endif // ZT_CONTROLLER_USE_LIBPQ \ No newline at end of file diff --git a/controller/README.md b/controller/README.md index 0a76ebbc7..41cfd3ff3 100644 --- a/controller/README.md +++ b/controller/README.md @@ -3,7 +3,7 @@ Network Controller Microservice Every ZeroTier virtual network has a *network controller* responsible for admitting members to the network, issuing certificates, and issuing default configuration information. -This is our reference controller implementation and is the same one we use to power our own hosted services at [my.zerotier.com](https://my.zerotier.com/). As of ZeroTier One version 1.2.0 this code is included in normal builds for desktop, laptop, and server (Linux, etc.) targets. +This is our reference controller implementation and is almost the same as the one we use to power our own hosted services at [my.zerotier.com](https://my.zerotier.com/). The only difference is the database backend used. Controller data is stored in JSON format under `controller.d` in the ZeroTier working directory. It can be copied, rsync'd, placed in `git`, etc. The files under `controller.d` should not be modified in place while the controller is running or data loss may result, and if they are edited directly take care not to save corrupt JSON since that can also lead to data loss when the controller is restarted. Going through the API is strongly preferred to directly modifying these files. @@ -19,10 +19,6 @@ Since ZeroTier nodes are mobile and do not need static IPs, implementing high av ZeroTier network controllers can easily be run in Docker or other container systems. Since containers do not need to actually join networks, extra privilege options like "--device=/dev/net/tun --privileged" are not needed. You'll just need to map the local JSON API port of the running controller and allow it to access the Internet (over UDP/9993 at a minimum) so things can reach and query it. -### PostgreSQL Database Implementation - -The default controller stores its data in the filesystem in `controller.d` under ZeroTier's home folder. There's an alternative implementation that stores data in PostgreSQL that can be built with `make central-controller`. Right now this is only guaranteed to build and run on Centos 7 Linux with PostgreSQL 10 installed via the [PostgreSQL Yum Repository](https://www.postgresql.org/download/linux/redhat/) and is designed for use with [ZeroTier Central](https://my.zerotier.com/). You're welcome to use it but we don't "officially" support it for end-user use and it could change at any time. - ### Upgrading from Older (1.1.14 or earlier) Versions Older versions of this code used a SQLite database instead of in-filesystem JSON. A migration utility called `migrate-sqlite` is included here and *must* be used to migrate this data to the new format. If the controller is started with an old `controller.db` in its working directory it will terminate after printing an error to *stderr*. This is done to prevent "surprises" for those running DIY controllers using the old code. @@ -43,210 +39,17 @@ While networks with any valid ID can be added to the controller's database, it w The controller JSON API is *very* sensitive about types. Integers must be integers and strings strings, etc. Incorrect types may be ignored, set to default values, or set to undefined values. -#### `/controller` +Full documentation of the Controller API can be found on our [documentation site](https://docs.zerotier.com/service/v1#tag/controller) - * Purpose: Check for controller function and return controller status - * Methods: GET - * Returns: { object } +### Prometheus Metrics -| Field | Type | Description | Writable | -| ------------------ | ----------- | ------------------------------------------------- | -------- | -| controller | boolean | Always 'true' | no | -| apiVersion | integer | Controller API version, currently 3 | no | -| clock | integer | Current clock on controller, ms since epoch | no | - -#### `/controller/network` - - * Purpose: List all networks hosted by this controller - * Methods: GET - * Returns: [ string, ... ] - -This returns an array of 16-digit hexadecimal network IDs. - -#### `/controller/network/` - - * Purpose: Create, configure, and delete hosted networks - * Methods: GET, POST, DELETE - * Returns: { object } - -By making queries to this path you can create, configure, and delete networks. DELETE is final, so don't do it unless you really mean it. - -When POSTing new networks take care that their IDs are not in use, otherwise you may overwrite an existing one. To create a new network with a random unused ID, POST to `/controller/network/##########______`. The #'s are the controller's 10-digit ZeroTier address and they're followed by six underscores. Check the `nwid` field of the returned JSON object for your network's newly allocated ID. Subsequent POSTs to this network must refer to its actual path. - -Example: - -`curl -X POST --header "X-ZT1-Auth: secret" -d '{"name":"my network"}' http://localhost:9993/controller/network/305f406058______` - -**Network object format:** - -| Field | Type | Description | Writable | -| --------------------- | ------------- | ------------------------------------------------- | -------- | -| id | string | 16-digit network ID | no | -| nwid | string | 16-digit network ID (legacy) | no | -| objtype | string | Always "network" | no | -| name | string | A short name for this network | YES | -| creationTime | integer | Time network record was created (ms since epoch) | no | -| private | boolean | Is access control enabled? | YES | -| enableBroadcast | boolean | Ethernet ff:ff:ff:ff:ff:ff allowed? | YES | -| v4AssignMode | object | IPv4 management and assign options (see below) | YES | -| v6AssignMode | object | IPv6 management and assign options (see below) | YES | -| mtu | integer | Network MTU (default: 2800) | YES | -| multicastLimit | integer | Maximum recipients for a multicast packet | YES | -| revision | integer | Network config revision counter | no | -| routes | array[object] | Managed IPv4 and IPv6 routes; see below | YES | -| ipAssignmentPools | array[object] | IP auto-assign ranges; see below | YES | -| rules | array[object] | Traffic rules; see below | YES | -| capabilities | array[object] | Array of capability objects (see below) | YES | -| tags | array[object] | Array of tag objects (see below) | YES | -| remoteTraceTarget | string | 10-digit ZeroTier ID of remote trace target | YES | -| remoteTraceLevel | integer | Remote trace verbosity level | YES | - - * Networks without rules won't carry any traffic. If you don't specify any on network creation an "accept anything" rule set will automatically be added. - * Managed IP address assignments and IP assignment pools that do not fall within a route configured in `routes` are ignored and won't be used or sent to members. - * The default for `private` is `true` and this is probably what you want. Turning `private` off means *anyone* can join your network with only its 16-digit network ID. It's also impossible to de-authorize a member as these networks don't issue or enforce certificates. Such "party line" networks are used for decentralized app backplanes, gaming, and testing but are otherwise not common. - * Changing the MTU can be disruptive and on some operating systems may require a leave/rejoin of the network or a restart of the ZeroTier service. - -**Auto-Assign Modes:** - -Auto assign modes (`v4AssignMode` and `v6AssignMode`) contain objects that map assignment modes to booleans. - -For IPv4 the only valid setting is `zt` which, if true, causes IPv4 addresses to be auto-assigned from `ipAssignmentPools` to members that do not have an IPv4 assignment. Note that active bridges are exempt and will not get auto-assigned IPs since this can interfere with bridging. (You can still manually assign one if you want.) - -IPv6 includes this option and two others: `6plane` and `rfc4193`. These assign private IPv6 addresses to each member based on a deterministic assignment scheme that allows members to emulate IPv6 NDP to skip multicast for better performance and scalability. The `rfc4193` mode gives every member a /128 on a /88 network, while `6plane` gives every member a /80 within a /40 network but uses NDP emulation to route *all* IPs under that /80 to its owner. The `6plane` mode is great for use cases like Docker since it allows every member to assign IPv6 addresses within its /80 that just work instantly and globally across the network. - -**IP assignment pool object format:** - -| Field | Type | Description | -| --------------------- | ------------- | ------------------------------------------------- | -| ipRangeStart | string | Starting IP address in range | -| ipRangeEnd | string | Ending IP address in range (inclusive) | - -Pools are only used if auto-assignment is on for the given address type (IPv4 or IPv6) and if the entire range falls within a managed route. - -IPv6 ranges work just like IPv4 ranges and look like this: - - { - "ipRangeStart": "fd00:feed:feed:beef:0000:0000:0000:0000", - "ipRangeEnd": "fd00:feed:feed:beef:ffff:ffff:ffff:ffff" - } - -(You can POST a shortened-form IPv6 address but the API will always report back un-shortened canonical form addresses.) - -That defines a range within network `fd00:feed:feed:beef::/64` that contains up to 2^64 addresses. If an IPv6 range is large enough, the controller will assign addresses by placing each member's device ID into the address in a manner similar to the RFC4193 and 6PLANE modes. Otherwise it will assign addresses at random. - -**Managed Route object format:** - -| Field | Type | Description | -| --------------------- | ------------- | ------------------------------------------------- | -| target | string | Subnet in CIDR notation | -| via | string/null | Next hop router IP address | - -Managed Route objects look like this: - - { - "target": "10.147.20.0/24" - } - -or - - { - "target": "192.168.168.0/24", - "via": "10.147.20.1" - } - -**Rule object format:** - -Each rule is actually a sequence of zero or more `MATCH_` entries in the rule array followed by an `ACTION_` entry that describes what to do if all the preceding entries match. An `ACTION_` without any preceding `MATCH_` entries is always taken, so setting a single `ACTION_ACCEPT` rule yields a network that allows all traffic. If no rules are present the default action is `ACTION_DROP`. - -Rules are evaluated in the order in which they appear in the array. There is currently a limit of 256 entries per network. Capabilities should be used if a larger and more complex rule set is needed since they allow rules to be grouped by purpose and only shipped to members that need them. - -Each rule table entry has two common fields. - -| Field | Type | Description | -| --------------------- | ------------- | ------------------------------------------------- | -| type | string | Entry type (all caps, case sensitive) | -| not | boolean | If true, MATCHes match if they don't match | - -The following fields may or may not be present depending on rule type: - -| Field | Type | Description | -| --------------------- | ------------- | ------------------------------------------------- | -| zt | string | 10-digit hex ZeroTier address | -| etherType | integer | Ethernet frame type | -| mac | string | Hex MAC address (with or without :'s) | -| ip | string | IPv4 or IPv6 address | -| ipTos | integer | IP type of service | -| ipProtocol | integer | IP protocol (e.g. TCP) | -| start | integer | Start of an integer range (e.g. port range) | -| end | integer | End of an integer range (inclusive) | -| id | integer | Tag ID | -| value | integer | Tag value or comparison value | -| mask | integer | Bit mask (for characteristics flags) | - -The entry types and their additional fields are: - -| Entry type | Description | Fields | -| ------------------------------- | ----------------------------------------------------------------- | -------------- | -| `ACTION_DROP` | Drop any packets matching this rule | (none) | -| `ACTION_ACCEPT` | Accept any packets matching this rule | (none) | -| `ACTION_TEE` | Send a copy of this packet to a node (rule parsing continues) | `zt` | -| `ACTION_REDIRECT` | Redirect this packet to another node | `zt` | -| `ACTION_DEBUG_LOG` | Output debug info on match (if built with rules engine debug) | (none) | -| `MATCH_SOURCE_ZEROTIER_ADDRESS` | Match VL1 ZeroTier address of packet sender. | `zt` | -| `MATCH_DEST_ZEROTIER_ADDRESS` | Match VL1 ZeroTier address of recipient | `zt` | -| `MATCH_ETHERTYPE` | Match Ethernet frame type | `etherType` | -| `MATCH_MAC_SOURCE` | Match source Ethernet MAC address | `mac` | -| `MATCH_MAC_DEST` | Match destination Ethernet MAC address | `mac` | -| `MATCH_IPV4_SOURCE` | Match source IPv4 address | `ip` | -| `MATCH_IPV4_DEST` | Match destination IPv4 address | `ip` | -| `MATCH_IPV6_SOURCE` | Match source IPv6 address | `ip` | -| `MATCH_IPV6_DEST` | Match destination IPv6 address | `ip` | -| `MATCH_IP_TOS` | Match IP TOS field | `ipTos` | -| `MATCH_IP_PROTOCOL` | Match IP protocol field | `ipProtocol` | -| `MATCH_IP_SOURCE_PORT_RANGE` | Match a source IP port range | `start`,`end` | -| `MATCH_IP_DEST_PORT_RANGE` | Match a destination IP port range | `start`,`end` | -| `MATCH_CHARACTERISTICS` | Match on characteristics flags | `mask`,`value` | -| `MATCH_FRAME_SIZE_RANGE` | Match a range of Ethernet frame sizes | `start`,`end` | -| `MATCH_TAGS_SAMENESS` | Match if both sides' tags differ by no more than value | `id`,`value` | -| `MATCH_TAGS_BITWISE_AND` | Match if both sides' tags AND to value | `id`,`value` | -| `MATCH_TAGS_BITWISE_OR` | Match if both sides' tags OR to value | `id`,`value` | -| `MATCH_TAGS_BITWISE_XOR` | Match if both sides' tags XOR to value | `id`,`value` | - -Important notes about rules engine behavior: - - * IPv4 and IPv6 IP address rules do not match for frames that are not IPv4 or IPv6 respectively. - * `ACTION_DEBUG_LOG` is a no-op on nodes not built with `ZT_RULES_ENGINE_DEBUGGING` enabled (see Network.cpp). If that is enabled nodes will dump a trace of rule evaluation results to *stdout* when this action is encountered but will otherwise keep evaluating rules. This is used for basic "smoke testing" of the rules engine. - * Multicast packets and packets destined for bridged devices treated a little differently. They are matched more than once. They are matched at the point of send with a NULL ZeroTier destination address, meaning that `MATCH_DEST_ZEROTIER_ADDRESS` is useless. That's because the true VL1 destination is not yet known. Then they are matched again for each true VL1 destination. On these later subsequent matches TEE actions are ignored and REDIRECT rules are interpreted as DROPs. This prevents multiple TEE or REDIRECT packets from being sent to third party devices. - * Rules in capabilities are always matched as if the current device is the sender (inbound == false). A capability specifies sender side rules that can be enforced on both sides. - -#### `/controller/network//member` - - * Purpose: Get a set of all members on this network - * Methods: GET - * Returns: { object } - -This returns a JSON object containing all member IDs as keys and their `memberRevisionCounter` values as values. - -#### `/controller/network//member/
` - - * Purpose: Create, authorize, or remove a network member - * Methods: GET, POST, DELETE - * Returns: { object } - -| Field | Type | Description | Writable | -| --------------------- | ------------- | ------------------------------------------------- | -------- | -| id | string | Member's 10-digit ZeroTier address | no | -| address | string | Member's 10-digit ZeroTier address | no | -| nwid | string | 16-digit network ID | no | -| authorized | boolean | Is member authorized? (for private networks) | YES | -| activeBridge | boolean | Member is able to bridge to other Ethernet nets | YES | -| identity | string | Member's public ZeroTier identity (if known) | no | -| ipAssignments | array[string] | Managed IP address assignments | YES | -| revision | integer | Member revision counter | no | -| vMajor | integer | Most recently known major version | no | -| vMinor | integer | Most recently known minor version | no | -| vRev | integer | Most recently known revision | no | -| vProto | integer | Most recently known protocl version | no | - -Note that managed IP assignments are only used if they fall within a managed route. Otherwise they are ignored. +Controller specific metrics are available from the `/metrics` endpoint. +| Metric Name | Type | Description | +| --- | --- | --- | +| controller_network_count | Gauge | number of networks the controller is serving | +| controller_member_count | Gauge | number of network members the controller is serving | +| controller_network_change_count | Counter | number of times a network configuration is changed | +| controller_member_change_count | Counter | number of times a network member configuration is changed | +| controller_member_auth_count | Counter | number of network member auths | +| controller_member_deauth_count | Counter | number of network member deauths| diff --git a/controller/Redis.hpp b/controller/Redis.hpp index 095419b01..c6845d517 100644 --- a/controller/Redis.hpp +++ b/controller/Redis.hpp @@ -5,11 +5,11 @@ namespace ZeroTier { struct RedisConfig { - std::string hostname; - int port; - std::string password; - bool clusterMode; + std::string hostname; + int port; + std::string password; + bool clusterMode; }; -} +} // namespace ZeroTier #endif \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 271f580e7..fdc85f860 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,87 @@ +zerotier-one (1.14.2) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Wed, 23 Oct 2024 01:00:00 -0700 + +zerotier-one (1.14.1) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Wed, 11 Sep 2024 01:00:00 -0700 + +zerotier-one (1.14.0) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Tue, 19 Mar 2024 01:00:00 -0700 + +zerotier-one (1.12.2) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Tue, 12 Sep 2023 01:00:00 -0700 + +zerotier-one (1.12.1) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Fri, 25 Aug 2023 01:00:00 -0700 + +zerotier-one (1.12.0) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Thu, 17 Aug 2023 01:00:00 -0700 + +zerotier-one (1.10.6) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Tue, 21 Mar 2023 01:00:00 -0700 + +zerotier-one (1.10.5) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Fri, 10 Mar 2023 01:00:00 -0700 + +zerotier-one (1.10.4) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Mon, 06 Mar 2023 01:00:00 -0700 + +zerotier-one (1.10.3) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Sat, 21 Jan 2023 01:00:00 -0700 + +zerotier-one (1.10.2) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Thu, 13 Oct 2022 01:00:00 -0700 + +zerotier-one (1.10.1) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Mon, 27 Jun 2022 01:00:00 -0700 + +zerotier-one (1.10.0) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Fri, 03 Jun 2022 01:00:00 -0700 + +zerotier-one (1.8.10) unstable; urgency=medium + + * See RELEASE-NOTES.md for release notes. + + -- Adam Ierymenko Tue, 10 May 2022 01:00:00 -0700 + zerotier-one (1.8.9) unstable; urgency=medium * See RELEASE-NOTES.md for release notes. diff --git a/debian/compat b/debian/compat index 301160a93..9a037142a 100644 --- a/debian/compat +++ b/debian/compat @@ -1 +1 @@ -8 \ No newline at end of file +10 \ No newline at end of file diff --git a/debian/control b/debian/control index c7bfa47ab..b0c178ab1 100644 --- a/debian/control +++ b/debian/control @@ -3,14 +3,14 @@ Maintainer: Adam Ierymenko Section: net Priority: optional Standards-Version: 3.9.6 -Build-Depends: debhelper (>= 9) +Build-Depends: debhelper Vcs-Git: git://github.com/zerotier/ZeroTierOne Vcs-Browser: https://github.com/zerotier/ZeroTierOne Homepage: https://www.zerotier.com/ Package: zerotier-one Architecture: any -Depends: iproute2, adduser, libstdc++6 (>= 5), openssl +Depends: adduser, libstdc++6 (>= 5), openssl Homepage: https://www.zerotier.com/ Description: ZeroTier network virtualization service ZeroTier One lets you join ZeroTier virtual networks and diff --git a/debian/control.wheezy b/debian/control.wheezy index a8e240847..fbbc40d93 100644 --- a/debian/control.wheezy +++ b/debian/control.wheezy @@ -10,7 +10,7 @@ Homepage: https://www.zerotier.com/ Package: zerotier-one Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends}, iproute, libstdc++6 +Depends: ${shlibs:Depends}, ${misc:Depends}, libstdc++6 Homepage: https://www.zerotier.com/ Description: ZeroTier network virtualization service ZeroTier One lets you join ZeroTier virtual networks and diff --git a/debian/copyright b/debian/copyright index d22affa29..da3678d52 100644 --- a/debian/copyright +++ b/debian/copyright @@ -12,7 +12,7 @@ License: ZeroTier BSL 1.1 Use of this software is governed by the Business Source License included in the LICENSE.TXT file in the project's root directory. - Change Date: 2025-01-01 + Change Date: 2026-01-01 On the date above, in accordance with the Business Source License, use of this software will be governed by version 2.0 of the Apache License. diff --git a/debian/rules b/debian/rules old mode 100755 new mode 100644 diff --git a/debian/rules.wheezy b/debian/rules.wheezy old mode 100755 new mode 100644 diff --git a/doc/README.md b/doc/README.md index 707c64a9a..681954cc5 100644 --- a/doc/README.md +++ b/doc/README.md @@ -3,4 +3,4 @@ Manual Pages and Other Documentation Use "./build.sh" to build the manual pages. -You'll need either NodeJS/npm installed (script will then automatically install the npm *marked-man* package) or */usr/bin/ronn*. The latter is a Ruby program packaged on some distributions as *rubygem-ronn* or *ruby-ronn* or installable as *gem install ronn*. The Node *marked-man* package and *ronn* from rubygems are two roughly equivalent alternatives for compiling MarkDown into roff/man format. +You'll need either Node.js/npm installed (script will then automatically install the npm *marked-man* package) or */usr/bin/ronn*. The latter is a Ruby program packaged on some distributions as *rubygem-ronn* or *ruby-ronn* or installable as *gem install ronn*. The Node *marked-man* package and *ronn* from RubyGems are two roughly equivalent alternatives for compiling Markdown into roff/man format. diff --git a/doc/zerotier-one.8.md b/doc/zerotier-one.8.md index bd31d5c80..4f4655076 100644 --- a/doc/zerotier-one.8.md +++ b/doc/zerotier-one.8.md @@ -81,7 +81,7 @@ These are found in the service's working directory. If the ZeroTier One service is built with the network controller enabled, it periodically backs up its controller.db database in this file (currently every 5 minutes if there have been changes). Since this file is not a currently in use SQLite3 database it's safer to back up without corruption. On new backups the file is rotated out rather than being rewritten in place. * `iddb.d/` (directory): - Caches the public identity of every peer ZeroTier has spoken with in the last 60 days. This directory and its contents can be deleted, but this may result in slower connection initations since it will require that we go out and re-fetch full identities for peers we're speaking to. + Caches the public identity of every peer ZeroTier has spoken with in the last 60 days. This directory and its contents can be deleted, but this may result in slower connection initiations since it will require that we go out and re-fetch full identities for peers we're speaking to. * `networks.d` (directory): This caches network configurations and certificate information for networks you belong to. ZeroTier scans this directory for .conf files on startup to recall its networks, so "touch"ing an empty .conf file in this directory is a way of pre-configuring ZeroTier to join a specific network on startup without using the API. If the config file is empty ZeroTIer will just fetch it from the network's controller. diff --git a/dockerbuild/Dockerfile.alpine b/dockerbuild/Dockerfile.alpine deleted file mode 100644 index 04c298d15..000000000 --- a/dockerbuild/Dockerfile.alpine +++ /dev/null @@ -1,23 +0,0 @@ -FROM alpine:3.15 - -ARG go_pkg_url - -RUN apk add --update alpine-sdk linux-headers cmake openssh curl - - -RUN adduser -D -s /bin/ash jenkins && \ - passwd -u jenkins && \ - ssh-keygen -A && \ - mkdir /home/jenkins/.ssh && \ - chown -R jenkins:jenkins /home/jenkins - -RUN curl -s $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz - -COPY authorized_keys /home/jenkins/.ssh/authorized_keys -RUN chown -R jenkins:jenkins /home/jenkins/.ssh && \ - chmod 600 /home/jenkins/.ssh/authorized_keys - -EXPOSE 22 -CMD ["/usr/sbin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.centos6 b/dockerbuild/Dockerfile.centos6 deleted file mode 100644 index 6b8f023ed..000000000 --- a/dockerbuild/Dockerfile.centos6 +++ /dev/null @@ -1,20 +0,0 @@ -FROM centos:6 - -ARG go_pkg_url - -RUN yum update -y -RUN yum install -y curl git wget openssh-server sudo make rpmdevtools && yum clean all - -RUN curl -s $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build - -RUN echo $'\n\ - export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n'\ - >> ~/.bash_profile - -RUN mkdir /rpmbuild && chmod 777 /rpmbuild - -CMD ["/usr/sbin/sshd", "-D"] diff --git a/dockerbuild/Dockerfile.centos6-i386 b/dockerbuild/Dockerfile.centos6-i386 deleted file mode 100644 index c6a47072b..000000000 --- a/dockerbuild/Dockerfile.centos6-i386 +++ /dev/null @@ -1,21 +0,0 @@ -FROM i386/centos:6 - -ARG go_pkg_url - -RUN echo i386 > /etc/yum/vars/basearch && echo i686 > /etc/yum/vars/arch - -RUN yum install -y curl git wget openssh-server sudo make rpmdevtools && yum clean all - -RUN curl -s $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build - -RUN echo $'\n\ - export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n'\ - >> ~/.bash_profile - -RUN mkdir /rpmbuild && chmod 777 /rpmbuild - -CMD ["/usr/sbin/sshd", "-D"] diff --git a/dockerbuild/Dockerfile.centos7 b/dockerbuild/Dockerfile.centos7 deleted file mode 100644 index 751d02c0c..000000000 --- a/dockerbuild/Dockerfile.centos7 +++ /dev/null @@ -1,25 +0,0 @@ -FROM centos:7 - -ARG go_pkg_url - -RUN yum install -y epel-release -RUN yum install -y curl git wget openssh-server sudo make development-tools rpmdevtools clang gcc-c++ ruby ruby-devel centos-release-scl devtoolset-8 llvm-toolset-7 && yum clean all - -RUN curl -s $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN wget -qO- "https://cmake.org/files/v3.15/cmake-3.15.1-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local - -RUN /usr/bin/ssh-keygen -A -RUN useradd jenkins-build - -RUN echo $'\n\ - export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n\ - source scl_source enable devtoolset-8 llvm-toolset-7\n'\ - >> ~/.bash_profile - -RUN mkdir /rpmbuild && chmod 777 /rpmbuild - -CMD ["/usr/sbin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.centos7-i386 b/dockerbuild/Dockerfile.centos7-i386 deleted file mode 100644 index f7ee9a0c4..000000000 --- a/dockerbuild/Dockerfile.centos7-i386 +++ /dev/null @@ -1,22 +0,0 @@ -FROM centos:7 - -ARG go_pkg_url - -RUN yum install -y curl git wget openssh-server sudo make development-tools rpmdevtools clang gcc-c++ ruby ruby-devel && yum clean all - -RUN curl -s $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN /usr/bin/ssh-keygen -A - -RUN useradd jenkins-build - -RUN echo $'\n\ - export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n'\ - >> ~/.bash_profile - -RUN mkdir /rpmbuild && chmod 777 /rpmbuild - -CMD ["/usr/sbin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.centos8 b/dockerbuild/Dockerfile.centos8 deleted file mode 100644 index 106ab5b44..000000000 --- a/dockerbuild/Dockerfile.centos8 +++ /dev/null @@ -1,25 +0,0 @@ -FROM centos:8 - -ARG go_pkg_url - -RUN yum install -y epel-release -RUN yum install -y curl git wget openssh-server sudo make rpmdevtools clang gcc-c++ ruby ruby-devel && yum clean all - -RUN curl -s $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN wget -qO- "https://cmake.org/files/v3.15/cmake-3.15.1-Linux-x86_64.tar.gz" | tar --strip-components=1 -xz -C /usr/local - -RUN /usr/bin/ssh-keygen -A -RUN useradd jenkins-build - -RUN echo $'\n\ - export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n\ - source scl_source enable devtoolset-8 llvm-toolset-7\n'\ - >> ~/.bash_profile - -RUN mkdir /rpmbuild && chmod 777 /rpmbuild - -CMD ["/usr/sbin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.clefos-s390x b/dockerbuild/Dockerfile.clefos-s390x deleted file mode 100644 index 135f70abc..000000000 --- a/dockerbuild/Dockerfile.clefos-s390x +++ /dev/null @@ -1,20 +0,0 @@ -FROM s390x/clefos:7 - -ARG go_pkg_url - -RUN yum install -y curl git wget openssh-server sudo make development-tools rpmdevtools clang gcc-c++ ruby ruby-devel && yum clean all - -RUN curl -s $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN /usr/bin/ssh-keygen -A - -RUN echo $'\n\ - export PATH=$PATH:/usr/local/go/bin:$HOME/go/bin\n'\ - >> ~/.bash_profile - -RUN mkdir /rpmbuild && chmod 777 /rpmbuild - -CMD ["/usr/sbin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.debian-bullseye b/dockerbuild/Dockerfile.debian-bullseye deleted file mode 100644 index 518a08ae2..000000000 --- a/dockerbuild/Dockerfile.debian-bullseye +++ /dev/null @@ -1,15 +0,0 @@ -FROM debian:bullseye-20191224 - -ARG go_pkg_url - -RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd - -RUN curl -s -k $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build -RUN chmod 777 /home - -CMD ["/usr/bin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.debian-buster b/dockerbuild/Dockerfile.debian-buster deleted file mode 100644 index 6504178bc..000000000 --- a/dockerbuild/Dockerfile.debian-buster +++ /dev/null @@ -1,15 +0,0 @@ -FROM debian:buster-20191224 - -ARG go_pkg_url - -RUN apt-get update && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd - -RUN curl -s -k $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build -RUN chmod 777 /home - -CMD ["/usr/bin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.debian-jessie b/dockerbuild/Dockerfile.debian-jessie deleted file mode 100644 index 31b0a904b..000000000 --- a/dockerbuild/Dockerfile.debian-jessie +++ /dev/null @@ -1,15 +0,0 @@ -FROM debian:jessie-20191224 - -ARG go_pkg_url - -RUN apt-get update && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd - -RUN curl -s -k $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build -RUN chmod 777 /home - -CMD ["/usr/bin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.debian-sid b/dockerbuild/Dockerfile.debian-sid deleted file mode 100644 index 5fde10d5d..000000000 --- a/dockerbuild/Dockerfile.debian-sid +++ /dev/null @@ -1,15 +0,0 @@ -FROM debian:sid-20191224 - -ARG go_pkg_url - -RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd - -RUN curl -s -k $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build -RUN chmod 777 /home - -CMD ["/usr/bin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.debian-stretch b/dockerbuild/Dockerfile.debian-stretch deleted file mode 100644 index 76342fb8d..000000000 --- a/dockerbuild/Dockerfile.debian-stretch +++ /dev/null @@ -1,15 +0,0 @@ -FROM debian:stretch-20191224 - -ARG go_pkg_url - -RUN apt-get update && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd - -RUN curl -s -k $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build -RUN chmod 777 /home - -CMD ["/usr/bin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.debian-wheezy b/dockerbuild/Dockerfile.debian-wheezy deleted file mode 100644 index 7322959e6..000000000 --- a/dockerbuild/Dockerfile.debian-wheezy +++ /dev/null @@ -1,23 +0,0 @@ -FROM debian:wheezy-20190228 - -ARG go_pkg_url - -RUN echo "deb http://archive.debian.org/debian/ wheezy contrib main non-free" > /etc/apt/sources.list && \ - echo "deb-src http://archive.debian.org/debian/ wheezy contrib main non-free" >> /etc/apt/sources.list && \ - apt-get update && apt-get install -y apt-utils && \ - apt-get install -y --force-yes \ - curl gcc make sudo expect gnupg fakeroot perl-base=5.14.2-21+deb7u3 perl \ - libc-bin=2.13-38+deb7u10 libc6=2.13-38+deb7u10 libc6-dev build-essential \ - cdbs devscripts equivs automake autoconf libtool libaudit-dev selinux-basics \ - libdb5.1=5.1.29-5 libdb5.1-dev libssl1.0.0=1.0.1e-2+deb7u20 procps gawk libsigsegv2 \ - curl ca-certificates devscripts - -RUN curl -s -k $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build -RUN chmod 777 /home - -CMD ["/usr/bin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.kali-rolling b/dockerbuild/Dockerfile.kali-rolling deleted file mode 100644 index 2825dffe4..000000000 --- a/dockerbuild/Dockerfile.kali-rolling +++ /dev/null @@ -1,15 +0,0 @@ -FROM kalilinux/kali-rolling:latest - -ARG go_pkg_url - -RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd cmake - -RUN curl -s -k $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build -RUN chmod 777 /home - -CMD ["/usr/bin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.ubuntu-bionic b/dockerbuild/Dockerfile.ubuntu-bionic deleted file mode 100644 index 2bbc5b02e..000000000 --- a/dockerbuild/Dockerfile.ubuntu-bionic +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:bionic-20200112 - -ARG go_pkg_url - -RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd - -RUN curl -s -k $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build -RUN chmod 777 /home - -CMD ["/usr/bin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.ubuntu-eoan b/dockerbuild/Dockerfile.ubuntu-eoan deleted file mode 100644 index 0fd5e33ca..000000000 --- a/dockerbuild/Dockerfile.ubuntu-eoan +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:eoan-20200114 - -ARG go_pkg_url - -RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd - -RUN curl -s -k $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build -RUN chmod 777 /home - -CMD ["/usr/bin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.ubuntu-trusty b/dockerbuild/Dockerfile.ubuntu-trusty deleted file mode 100644 index 52a80c949..000000000 --- a/dockerbuild/Dockerfile.ubuntu-trusty +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:trusty-20191217 - -ARG go_pkg_url - -RUN apt-get update && apt-get upgrade -y && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd - -RUN curl -s -k $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build -RUN chmod 777 /home - -CMD ["/usr/bin/sshd", "-D"] - diff --git a/dockerbuild/Dockerfile.ubuntu-xenial b/dockerbuild/Dockerfile.ubuntu-xenial deleted file mode 100644 index bda74f477..000000000 --- a/dockerbuild/Dockerfile.ubuntu-xenial +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:xenial-20200114 - -ARG go_pkg_url - -RUN apt-get update && apt-get -y install build-essential curl ca-certificates devscripts dh-systemd - -RUN curl -s -k $go_pkg_url -o go.tar.gz && \ - tar -C /usr/local -xzf go.tar.gz && \ - rm go.tar.gz - -RUN groupadd -g 1000 jenkins-build && useradd -u 1000 -g 1000 jenkins-build -RUN chmod 777 /home - -CMD ["/usr/bin/sshd", "-D"] - diff --git a/dockerbuild/Makefile b/dockerbuild/Makefile deleted file mode 100644 index 543d45d5a..000000000 --- a/dockerbuild/Makefile +++ /dev/null @@ -1,108 +0,0 @@ -.PHONY: all - -all: alpine centos debian ubuntu kali-rolling - -alpine: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.alpine . -t ztbuild/alpine-arm64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.alpine . -t ztbuild/alpine-i386 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.alpine . -t ztbuild/alpine-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.alpine . -t ztbuild/alpine-armel --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.alpine . -t ztbuild/alpine-armhf --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.alpine . -t ztbuild/alpine-ppc64le --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.alpine . -t ztbuild/alpine-s390x --load - -centos: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.centos7 . -t ztbuild/centos7-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.centos7-i386 . -t ztbuild/centos7-i386 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.centos6 . -t ztbuild/centos6-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.centos6-i386 . -t ztbuild/centos6-i386 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.centos8 . -t ztbuild/centos8-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.centos8 . -t ztbuild/centos8-arm64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.centos8 . -t ztbuild/centos8-ppc64le --load - -debian: debian-wheezy debian-jessie debian-buster debian-stretch debian-bullseye debian-sid - -debian-wheezy: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-wheezy . -t ztbuild/debian-wheezy-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-wheezy . -t ztbuild/debian-wheezy-armhf --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-wheezy . -t ztbuild/debian-wheezy-armel --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-wheezy . -t ztbuild/debian-wheezy-i386 --load - -debian-jessie: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-jessie . -t ztbuild/debian-jessie-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-jessie . -t ztbuild/debian-jessie-armhf --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-jessie . -t ztbuild/debian-jessie-armel --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-jessie . -t ztbuild/debian-jessie-i386 --load - -debian-buster: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-buster . -t ztbuild/debian-buster-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.debian-buster . -t ztbuild/debian-buster-arm64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-buster . -t ztbuild/debian-buster-armel --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-buster . -t ztbuild/debian-buster-armhf --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-buster . -t ztbuild/debian-buster-i386 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.debian-buster . -t ztbuild/debian-buster-ppc64le --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.debian-buster . -t ztbuild/debian-buster-s390x --load - -debian-stretch: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-arm64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-armel --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-armhf --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-i386 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-ppc64le --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.debian-stretch . -t ztbuild/debian-stretch-s390x --load - -debian-bullseye: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-arm64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-armel --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-armhf --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-i386 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-ppc64le --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.debian-bullseye . -t ztbuild/debian-bullseye-s390x --load - -debian-sid: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.debian-sid . -t ztbuild/debian-sid-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.debian-sid . -t ztbuild/debian-sid-arm64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v6 -f Dockerfile.debian-sid . -t ztbuild/debian-sid-armel --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.debian-sid . -t ztbuild/debian-sid-armhf --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.debian-sid . -t ztbuild/debian-sid-i386 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.debian-sid . -t ztbuild/debian-sid-ppc64le --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.debian-sid . -t ztbuild/debian-sid-s390x --load - -ubuntu: ubuntu-trusty ubuntu-xenial ubuntu-bionic ubuntu-eoan - -ubuntu-trusty: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.ubuntu-trusty . -t ztbuild/ubuntu-trusty-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.ubuntu-trusty . -t ztbuild/ubuntu-trusty-arm64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.ubuntu-trusty . -t ztbuild/ubuntu-trusty-armhf --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.ubuntu-trusty . -t ztbuild/ubuntu-trusty-i386 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.ubuntu-trusty . -t ztbuild/ubuntu-trusty-ppc64le --load - -ubuntu-xenial: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-arm64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-armhf --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-i386 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-ppc64le --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.ubuntu-xenial . -t ztbuild/ubuntu-xenial-s390x --load - -ubuntu-bionic: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-arm64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-armhf --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-i386 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-ppc64le --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.ubuntu-bionic . -t ztbuild/ubuntu-bionic-s390x --load - -ubuntu-eoan: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-amd64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-arm64.tar.gz" --platform linux/arm64 -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-arm64 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz" --platform linux/arm/v7 -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-armhf --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-386.tar.gz" --platform linux/386 -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-i386 --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-ppc64le.tar.gz" --platform linux/ppc64le -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-ppc64le --load - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-s390x.tar.gz" --platform linux/s390x -f Dockerfile.ubuntu-eoan . -t ztbuild/ubuntu-eoan-s390x --load - -kali-rolling: - @docker buildx build --build-arg go_pkg_url="https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz" --platform linux/amd64 -f Dockerfile.kali-rolling . -t ztbuild/kali-rolling-amd64 --load - diff --git a/dockerbuild/authorized_keys b/dockerbuild/authorized_keys deleted file mode 100644 index 0dd35c7b7..000000000 --- a/dockerbuild/authorized_keys +++ /dev/null @@ -1,2 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8hgysbj2Luu3aN/Ya2wr4Y9LpUGqWWfn3k+UhIwOIE/Kd7/YpLjxHpseUA1hLnj9kHFShH8eiqoY0S6EDIYrTUwbXMMu8454lX/LcJOCJ9RlSeMMf7vpkxcI7cVRgOA430a3FR7M0Q8vKlyJzxxAEjMIxMyuVyinknfanNt+sQFiDUvOXoacqgZAHBWMlO7wOPyHWHNOzy7g8N0dHiJveKZqX/UUwuqJuS6UBq7MBMSU6TcMvJwHr+AbNvfyIUWCqlTByqFL9cmviRbIvQanxoRxi/5fVUGhtVBXUYvbCdFxDw5W2Svo9fDMm4Z5xWAD7rY1J3AM15RVyRTTtYvgD - diff --git a/dockerbuild/pipelint.sh b/dockerbuild/pipelint.sh deleted file mode 100644 index 7fbd0de82..000000000 --- a/dockerbuild/pipelint.sh +++ /dev/null @@ -1,13 +0,0 @@ -# curl (REST API) -# User -JENKINS_USER=grant - -# Api key from "/me/configure" on my Jenkins instance -JENKINS_USER_KEY=11edf2d49321321119712c46c6349eaad7 - -# Url for my local Jenkins instance. -JENKINS_URL=http://$JENKINS_USER:$JENKINS_USER_KEY@jenkins.int.zerotier.com - -# JENKINS_CRUMB is needed if your Jenkins master has CRSF protection enabled (which it should) -JENKINS_CRUMB=`curl "$JENKINS_URL/crumbIssuer/api/xml?xpath=concat(//crumbRequestField,\":\",//crumb)"` -curl -X POST -H $JENKINS_CRUMB -F "jenkinsfile= "/var/lib/zerotier-one/$file" + echo -n "$content" > "/var/lib/zerotier-one/$file" chmod "$mode" "/var/lib/zerotier-one/$file" } if [ "x$ZEROTIER_API_SECRET" != "x" ] then mkztfile authtoken.secret 0600 "$ZEROTIER_API_SECRET" + mkztfile metricstoken.secret 0600 "$ZEROTIER_API_SECRET" fi if [ "x$ZEROTIER_IDENTITY_PUBLIC" != "x" ] @@ -30,6 +31,11 @@ then mkztfile identity.secret 0600 "$ZEROTIER_IDENTITY_SECRET" fi +if [ "x$ZEROTIER_LOCAL_CONF" != "x" ] +then + mkztfile local.conf 0644 "$ZEROTIER_LOCAL_CONF" +fi + mkztfile zerotier-one.port 0600 "9993" killzerotier() { @@ -71,13 +77,23 @@ trap killzerotier INT TERM log "Configuring networks to join" mkdir -p /var/lib/zerotier-one/networks.d -log_params "Joining networks:" $@ +log_params "Joining networks from command line:" $@ for i in "$@" do log_detail_params "Configuring join:" "$i" touch "/var/lib/zerotier-one/networks.d/${i}.conf" done +if [ "x$ZEROTIER_JOIN_NETWORKS" != "x" ] +then + log_params "Joining networks from environment:" $ZEROTIER_JOIN_NETWORKS + for i in $ZEROTIER_JOIN_NETWORKS + do + log_detail_params "Configuring join:" "$i" + touch "/var/lib/zerotier-one/networks.d/${i}.conf" + done +fi + log "Starting ZeroTier" nohup /usr/sbin/zerotier-one & @@ -97,7 +113,7 @@ log_params "Writing healthcheck for networks:" $@ cat >/healthcheck.sh < - - - - CFBundleDevelopmentRegion - English - CFBundleExecutable - tap - CFBundleIdentifier - com.zerotier.tap - CFBundleInfoDictionaryVersion - 6.0 - CFBundleName - tap - CFBundlePackageType - KEXT - CFBundleShortVersionString - 20150118 - CFBundleSignature - ???? - CFBundleVersion - 1.0 - OSBundleLibraries - - com.apple.kpi.mach - 8.0 - com.apple.kpi.bsd - 8.0 - com.apple.kpi.libkern - 8.0 - com.apple.kpi.unsupported - 8.0 - - - - diff --git a/ext/bin/tap-mac/tap.kext/Contents/MacOS/tap b/ext/bin/tap-mac/tap.kext/Contents/MacOS/tap deleted file mode 100755 index 48bf96255..000000000 Binary files a/ext/bin/tap-mac/tap.kext/Contents/MacOS/tap and /dev/null differ diff --git a/ext/bin/tap-mac/tap.kext/Contents/_CodeSignature/CodeResources b/ext/bin/tap-mac/tap.kext/Contents/_CodeSignature/CodeResources deleted file mode 100644 index 0710b4008..000000000 --- a/ext/bin/tap-mac/tap.kext/Contents/_CodeSignature/CodeResources +++ /dev/null @@ -1,105 +0,0 @@ - - - - - files - - files2 - - rules - - ^Resources/ - - ^Resources/.*\.lproj/ - - optional - - weight - 1000 - - ^Resources/.*\.lproj/locversion.plist$ - - omit - - weight - 1100 - - ^version.plist$ - - - rules2 - - .*\.dSYM($|/) - - weight - 11 - - ^(.*/)?\.DS_Store$ - - omit - - weight - 2000 - - ^(Frameworks|SharedFrameworks|PlugIns|Plug-ins|XPCServices|Helpers|MacOS|Library/(Automator|Spotlight|LoginItems))/ - - nested - - weight - 10 - - ^.* - - ^Info\.plist$ - - omit - - weight - 20 - - ^PkgInfo$ - - omit - - weight - 20 - - ^Resources/ - - weight - 20 - - ^Resources/.*\.lproj/ - - optional - - weight - 1000 - - ^Resources/.*\.lproj/locversion.plist$ - - omit - - weight - 1100 - - ^[^/]+$ - - nested - - weight - 10 - - ^embedded\.provisionprofile$ - - weight - 20 - - ^version\.plist$ - - weight - 20 - - - - diff --git a/ext/bin/tap-windows-ndis6/arm64/zttap300.cat b/ext/bin/tap-windows-ndis6/arm64/zttap300.cat new file mode 100644 index 000000000..906a15a4b Binary files /dev/null and b/ext/bin/tap-windows-ndis6/arm64/zttap300.cat differ diff --git a/ext/bin/tap-windows-ndis6/x64.old/zttap300.inf b/ext/bin/tap-windows-ndis6/arm64/zttap300.inf similarity index 91% rename from ext/bin/tap-windows-ndis6/x64.old/zttap300.inf rename to ext/bin/tap-windows-ndis6/arm64/zttap300.inf index 453797b38..8e74daab1 100644 --- a/ext/bin/tap-windows-ndis6/x64.old/zttap300.inf +++ b/ext/bin/tap-windows-ndis6/arm64/zttap300.inf @@ -1,10 +1,15 @@ +; +; **************************************************************************** +; * Copyright (C) 2002-2014 OpenVPN Technologies, Inc. * +; * This program is free software; you can redistribute it and/or modify * +; * it under the terms of the GNU General Public License version 2 * +; * as published by the Free Software Foundation. * +; **************************************************************************** +; + ; ; ZeroTier One Virtual Network Port NDIS6 Driver ; -; Based on the OpenVPN tap-windows6 driver version 9.21.1 git -; commit 48f027cfca52b16b5fd23d82e6016ed8a91fc4d3. -; See: https://github.com/OpenVPN/tap-windows6 -; ; Modified by ZeroTier, Inc. - https://www.zerotier.com/ ; ; (1) Comment out 'tun' functionality and related features such as DHCP @@ -15,14 +20,6 @@ ; (5) Rename/rebrand driver as ZeroTier network port driver. ; ; Original copyright below. Modifications released under GPLv2 as well. -; -; **************************************************************************** -; * Copyright (C) 2002-2014 OpenVPN Technologies, Inc. * -; * This program is free software; you can redistribute it and/or modify * -; * it under the terms of the GNU General Public License version 2 * -; * as published by the Free Software Foundation. * -; **************************************************************************** -; [Version] Signature = "$Windows NT$" @@ -30,17 +27,18 @@ CatalogFile = zttap300.cat ClassGUID = {4d36e972-e325-11ce-bfc1-08002be10318} Provider = %Provider% Class = Net -DriverVer=08/13/2015,6.2.9200.20557 +DriverVer=11/24/2020,3.00.00.1 [Strings] -DeviceDescription = "ZeroTier One Virtual Port" -Provider = "ZeroTier Networks LLC" ; We're ZeroTier, Inc. now but kernel mode certs are $300+ so fuqdat. +DeviceDescription = "ZeroTier Virtual Port" +Provider = "ZeroTier" -; To build for x86, take NTamd64 off this and off the named section manually, build, then put it back! [Manufacturer] +%Provider%=zttap300,NTx86 %Provider%=zttap300,NTamd64 +%Provider%=zttap300,NTarm64 -[zttap300] +[zttap300.NTx86] %DeviceDescription% = zttap300.ndi, root\zttap300 ; Root enumerated %DeviceDescription% = zttap300.ndi, zttap300 ; Legacy @@ -48,6 +46,10 @@ Provider = "ZeroTier Networks LLC" ; We're ZeroTier, Inc. now but kernel mode ce %DeviceDescription% = zttap300.ndi, root\zttap300 ; Root enumerated %DeviceDescription% = zttap300.ndi, zttap300 ; Legacy +[zttap300.NTarm64] +%DeviceDescription% = zttap300.ndi, root\zttap300 ; Root enumerated +%DeviceDescription% = zttap300.ndi, zttap300 ; Legacy + ;----------------- Characteristics ------------ ; NCF_PHYSICAL = 0x04 ; NCF_VIRTUAL = 0x01 diff --git a/ext/bin/tap-windows-ndis6/arm64/zttap300.sys b/ext/bin/tap-windows-ndis6/arm64/zttap300.sys new file mode 100644 index 000000000..ae1a16f4b Binary files /dev/null and b/ext/bin/tap-windows-ndis6/arm64/zttap300.sys differ diff --git a/ext/bin/tap-windows-ndis6/certutil.exe b/ext/bin/tap-windows-ndis6/certutil.exe deleted file mode 100644 index b9a0a09c5..000000000 Binary files a/ext/bin/tap-windows-ndis6/certutil.exe and /dev/null differ diff --git a/ext/bin/tap-windows-ndis6/x64.old/ZeroTierOne_NDIS6_x64.msi b/ext/bin/tap-windows-ndis6/x64.old/ZeroTierOne_NDIS6_x64.msi deleted file mode 100644 index 17fe08c24..000000000 Binary files a/ext/bin/tap-windows-ndis6/x64.old/ZeroTierOne_NDIS6_x64.msi and /dev/null differ diff --git a/ext/bin/tap-windows-ndis6/x64.old/zttap300.cat b/ext/bin/tap-windows-ndis6/x64.old/zttap300.cat deleted file mode 100644 index 8b9114c71..000000000 Binary files a/ext/bin/tap-windows-ndis6/x64.old/zttap300.cat and /dev/null differ diff --git a/ext/bin/tap-windows-ndis6/x64.old/zttap300.sys b/ext/bin/tap-windows-ndis6/x64.old/zttap300.sys deleted file mode 100644 index 3d846a53a..000000000 Binary files a/ext/bin/tap-windows-ndis6/x64.old/zttap300.sys and /dev/null differ diff --git a/ext/bin/tap-windows-ndis6/x64/ZeroTierOne_NDIS6_x64.msi b/ext/bin/tap-windows-ndis6/x64/ZeroTierOne_NDIS6_x64.msi deleted file mode 100644 index e08388d8f..000000000 Binary files a/ext/bin/tap-windows-ndis6/x64/ZeroTierOne_NDIS6_x64.msi and /dev/null differ diff --git a/ext/bin/tap-windows-ndis6/x86.old/ZeroTierOne_NDIS6_x86.msi b/ext/bin/tap-windows-ndis6/x86.old/ZeroTierOne_NDIS6_x86.msi deleted file mode 100644 index 415774c9d..000000000 Binary files a/ext/bin/tap-windows-ndis6/x86.old/ZeroTierOne_NDIS6_x86.msi and /dev/null differ diff --git a/ext/bin/tap-windows-ndis6/x86.old/zttap300.cat b/ext/bin/tap-windows-ndis6/x86.old/zttap300.cat deleted file mode 100644 index 44347f54f..000000000 Binary files a/ext/bin/tap-windows-ndis6/x86.old/zttap300.cat and /dev/null differ diff --git a/ext/bin/tap-windows-ndis6/x86.old/zttap300.inf b/ext/bin/tap-windows-ndis6/x86.old/zttap300.inf deleted file mode 100644 index 453797b38..000000000 --- a/ext/bin/tap-windows-ndis6/x86.old/zttap300.inf +++ /dev/null @@ -1,143 +0,0 @@ -; -; ZeroTier One Virtual Network Port NDIS6 Driver -; -; Based on the OpenVPN tap-windows6 driver version 9.21.1 git -; commit 48f027cfca52b16b5fd23d82e6016ed8a91fc4d3. -; See: https://github.com/OpenVPN/tap-windows6 -; -; Modified by ZeroTier, Inc. - https://www.zerotier.com/ -; -; (1) Comment out 'tun' functionality and related features such as DHCP -; emulation, since we don't use any of that. Just want straight 'tap'. -; (2) Added custom IOCTL to enumerate L2 multicast memberships. -; (3) Increase maximum number of multicast memberships to 128. -; (4) Set default and max device MTU to 2800. -; (5) Rename/rebrand driver as ZeroTier network port driver. -; -; Original copyright below. Modifications released under GPLv2 as well. -; -; **************************************************************************** -; * Copyright (C) 2002-2014 OpenVPN Technologies, Inc. * -; * This program is free software; you can redistribute it and/or modify * -; * it under the terms of the GNU General Public License version 2 * -; * as published by the Free Software Foundation. * -; **************************************************************************** -; - -[Version] -Signature = "$Windows NT$" -CatalogFile = zttap300.cat -ClassGUID = {4d36e972-e325-11ce-bfc1-08002be10318} -Provider = %Provider% -Class = Net -DriverVer=08/13/2015,6.2.9200.20557 - -[Strings] -DeviceDescription = "ZeroTier One Virtual Port" -Provider = "ZeroTier Networks LLC" ; We're ZeroTier, Inc. now but kernel mode certs are $300+ so fuqdat. - -; To build for x86, take NTamd64 off this and off the named section manually, build, then put it back! -[Manufacturer] -%Provider%=zttap300,NTamd64 - -[zttap300] -%DeviceDescription% = zttap300.ndi, root\zttap300 ; Root enumerated -%DeviceDescription% = zttap300.ndi, zttap300 ; Legacy - -[zttap300.NTamd64] -%DeviceDescription% = zttap300.ndi, root\zttap300 ; Root enumerated -%DeviceDescription% = zttap300.ndi, zttap300 ; Legacy - -;----------------- Characteristics ------------ -; NCF_PHYSICAL = 0x04 -; NCF_VIRTUAL = 0x01 -; NCF_SOFTWARE_ENUMERATED = 0x02 -; NCF_HIDDEN = 0x08 -; NCF_NO_SERVICE = 0x10 -; NCF_HAS_UI = 0x80 -;----------------- Characteristics ------------ -[zttap300.ndi] -CopyFiles = zttap300.driver, zttap300.files -AddReg = zttap300.reg -AddReg = zttap300.params.reg -Characteristics = 0x81 -*IfType = 0x6 ; IF_TYPE_ETHERNET_CSMACD -*MediaType = 0x0 ; NdisMedium802_3 -*PhysicalMediaType = 14 ; NdisPhysicalMedium802_3 - -[zttap300.ndi.Services] -AddService = zttap300, 2, zttap300.service - -[zttap300.reg] -HKR, Ndi, Service, 0, "zttap300" -HKR, Ndi\Interfaces, UpperRange, 0, "ndis5" ; yes, 'ndis5' is correct... yup, Windows. -HKR, Ndi\Interfaces, LowerRange, 0, "ethernet" -HKR, , Manufacturer, 0, "%Provider%" -HKR, , ProductName, 0, "%DeviceDescription%" - -[zttap300.params.reg] -HKR, Ndi\params\MTU, ParamDesc, 0, "MTU" -HKR, Ndi\params\MTU, Type, 0, "int" -HKR, Ndi\params\MTU, Default, 0, "2800" -HKR, Ndi\params\MTU, Optional, 0, "0" -HKR, Ndi\params\MTU, Min, 0, "100" -HKR, Ndi\params\MTU, Max, 0, "2800" -HKR, Ndi\params\MTU, Step, 0, "1" -HKR, Ndi\params\MediaStatus, ParamDesc, 0, "Media Status" -HKR, Ndi\params\MediaStatus, Type, 0, "enum" -HKR, Ndi\params\MediaStatus, Default, 0, "0" -HKR, Ndi\params\MediaStatus, Optional, 0, "0" -HKR, Ndi\params\MediaStatus\enum, "0", 0, "Application Controlled" -HKR, Ndi\params\MediaStatus\enum, "1", 0, "Always Connected" -HKR, Ndi\params\MAC, ParamDesc, 0, "MAC Address" -HKR, Ndi\params\MAC, Type, 0, "edit" -HKR, Ndi\params\MAC, Optional, 0, "1" -HKR, Ndi\params\AllowNonAdmin, ParamDesc, 0, "Non-Admin Access" -HKR, Ndi\params\AllowNonAdmin, Type, 0, "enum" -HKR, Ndi\params\AllowNonAdmin, Default, 0, "0" -HKR, Ndi\params\AllowNonAdmin, Optional, 0, "0" -HKR, Ndi\params\AllowNonAdmin\enum, "0", 0, "Not Allowed" -HKR, Ndi\params\AllowNonAdmin\enum, "1", 0, "Allowed" - -;---------- Service Type ------------- -; SERVICE_KERNEL_DRIVER = 0x01 -; SERVICE_WIN32_OWN_PROCESS = 0x10 -;---------- Service Type ------------- - -;---------- Start Mode --------------- -; SERVICE_BOOT_START = 0x0 -; SERVICE_SYSTEM_START = 0x1 -; SERVICE_AUTO_START = 0x2 -; SERVICE_DEMAND_START = 0x3 -; SERVICE_DISABLED = 0x4 -;---------- Start Mode --------------- - -[zttap300.service] -DisplayName = %DeviceDescription% -ServiceType = 1 -StartType = 3 -ErrorControl = 1 -LoadOrderGroup = NDIS -ServiceBinary = %12%\zttap300.sys - -;----------------- Copy Flags ------------ -; COPYFLG_NOSKIP = 0x02 -; COPYFLG_NOVERSIONCHECK = 0x04 -;----------------- Copy Flags ------------ - -[SourceDisksNames] -1 = %DeviceDescription%, zttap300.sys - -[SourceDisksFiles] -zttap300.sys = 1 - -[DestinationDirs] -zttap300.files = 11 -zttap300.driver = 12 - -[zttap300.files] -; - -[zttap300.driver] -zttap300.sys,,,6 ; COPYFLG_NOSKIP | COPYFLG_NOVERSIONCHECK - diff --git a/ext/bin/tap-windows-ndis6/x86.old/zttap300.sys b/ext/bin/tap-windows-ndis6/x86.old/zttap300.sys deleted file mode 100644 index 664398e93..000000000 Binary files a/ext/bin/tap-windows-ndis6/x86.old/zttap300.sys and /dev/null differ diff --git a/ext/bin/tap-windows-ndis6/x86/ZeroTierOne_NDIS6_x86.msi b/ext/bin/tap-windows-ndis6/x86/ZeroTierOne_NDIS6_x86.msi deleted file mode 100644 index 8a8ab2e29..000000000 Binary files a/ext/bin/tap-windows-ndis6/x86/ZeroTierOne_NDIS6_x86.msi and /dev/null differ diff --git a/ext/bin/tap-windows-ndis6/zttap300.cer b/ext/bin/tap-windows-ndis6/zttap300.cer deleted file mode 100644 index ef74e041c..000000000 Binary files a/ext/bin/tap-windows-ndis6/zttap300.cer and /dev/null differ diff --git a/ext/central-controller-docker/Dockerfile b/ext/central-controller-docker/Dockerfile index c59aa8f5d..29670ec68 100644 --- a/ext/central-controller-docker/Dockerfile +++ b/ext/central-controller-docker/Dockerfile @@ -1,11 +1,17 @@ # Dockerfile for ZeroTier Central Controllers -FROM registry.zerotier.com/zerotier/controller-builder:latest as builder -MAINTAINER Adam Ierymekno , Grant Limberg +FROM registry.zerotier.com/zerotier/ctlbuild:2025-07-14 AS builder ADD . /ZeroTierOne RUN export PATH=$PATH:~/.cargo/bin && cd ZeroTierOne && make clean && make central-controller -j8 -FROM registry.zerotier.com/zerotier/controller-run:latest +FROM golang:bookworm AS go_base +RUN go install -tags 'postgres' github.com/golang-migrate/migrate/v4/cmd/migrate@latest + +FROM registry.zerotier.com/zerotier/ctlrun:2025-07-14 AS run_base COPY --from=builder /ZeroTierOne/zerotier-one /usr/local/bin/zerotier-one +COPY --from=builder /ZeroTierOne/ext/opentelemetry-cpp-1.21.0/localinstall/lib/*.so /usr/local/lib64/ +COPY --from=go_base /go/bin/migrate /usr/local/bin/migrate +COPY ext/central-controller-docker/migrations /migrations + RUN chmod a+x /usr/local/bin/zerotier-one RUN echo "/usr/local/lib64" > /etc/ld.so.conf.d/usr-local-lib64.conf && ldconfig diff --git a/ext/central-controller-docker/Dockerfile.builder b/ext/central-controller-docker/Dockerfile.builder index 5c2787570..6e10db887 100644 --- a/ext/central-controller-docker/Dockerfile.builder +++ b/ext/central-controller-docker/Dockerfile.builder @@ -1,13 +1,34 @@ # Dockerfile for building ZeroTier Central Controllers -FROM centos:8 as builder -MAINTAINER Adam Ierymekno , Grant Limberg +FROM debian:bookworm + +RUN apt update && apt upgrade -y +RUN apt -y install \ + build-essential \ + pkg-config \ + bash \ + clang \ + libjemalloc2 \ + libjemalloc-dev \ + libpq5 \ + libpq-dev \ + openssl \ + libssl-dev \ + postgresql-client \ + postgresql-client-common \ + curl \ + libcurl4-openssl-dev \ + google-perftools \ + libgoogle-perftools-dev \ + protobuf-compiler \ + protobuf-compiler-grpc \ + protobuf-c-compiler \ + grpc-proto \ + libgrpc++1.51 \ + libgrpc++-dev \ + libgrpc-dev \ + libgrpc29 \ + cmake \ + git -ARG git_branch=master -RUN yum update -y -RUN yum install -y https://download.postgresql.org/pub/repos/yum/reporpms/EL-8-x86_64/pgdg-redhat-repo-latest.noarch.rpm -RUN dnf -qy module disable postgresql -RUN yum -y install epel-release && yum -y update && yum clean all -RUN yum groupinstall -y "Development Tools" && yum clean all -RUN yum install -y bash cmake postgresql10 postgresql10-devel clang jemalloc jemalloc-devel libpqxx libpqxx-devel openssl-devel && yum clean all RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y diff --git a/ext/central-controller-docker/Dockerfile.run_base b/ext/central-controller-docker/Dockerfile.run_base index 9a08f5f72..fb5d0e2d5 100644 --- a/ext/central-controller-docker/Dockerfile.run_base +++ b/ext/central-controller-docker/Dockerfile.run_base @@ -1,5 +1,19 @@ -FROM centos:8 -RUN yum install -y https://download.postgresql.org/pub/repos/yum/reporpms/EL-8-x86_64/pgdg-redhat-repo-latest.noarch.rpm -RUN dnf -qy module disable postgresql -RUN yum -y install epel-release && yum -y update && yum clean all -RUN yum install -y jemalloc jemalloc-devel postgresql10 libpqxx libpqxx-devel && yum clean all +FROM debian:bookworm + + + +RUN apt update && apt upgrade -y +RUN apt -y install \ + netcat-traditional \ + postgresql-client \ + postgresql-client-common \ + libjemalloc2 \ + libpq5 \ + curl \ + binutils \ + perf-tools-unstable \ + google-perftools \ + gnupg \ + libgrpc++1.51 \ + libgrpc29 + diff --git a/ext/central-controller-docker/Makefile b/ext/central-controller-docker/Makefile new file mode 100644 index 000000000..2318d2903 --- /dev/null +++ b/ext/central-controller-docker/Makefile @@ -0,0 +1,16 @@ +registry = registry.zerotier.com/zerotier + +all: controller-builder controller-runbase + +buildx: + @echo "docker buildx create" + # docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + docker run --privileged --rm tonistiigi/binfmt --install all + @echo docker buildx create --name multiarch --driver docker-container --use + @echo docker buildx inspect --bootstrap + +controller-builder: buildx + docker buildx build --no-cache --platform linux/amd64,linux/arm64 -t $(registry)/ctlbuild:$(shell date +"%Y-%m-%d") -f Dockerfile.builder . --push + +controller-runbase: buildx + docker buildx build --no-cache --platform linux/amd64,linux/arm64 -t $(registry)/ctlrun:$(shell date +"%Y-%m-%d") -f Dockerfile.run_base . --push diff --git a/ext/central-controller-docker/main.sh b/ext/central-controller-docker/main.sh index 2938ba11c..b1c4b4cbc 100755 --- a/ext/central-controller-docker/main.sh +++ b/ext/central-controller-docker/main.sh @@ -1,9 +1,5 @@ #!/bin/bash -if [ -z "$ZT_IDENTITY_PATH" ]; then - echo '*** FAILED: ZT_IDENTITY_PATH environment variable is not defined' - exit 1 -fi if [ -z "$ZT_DB_HOST" ]; then echo '*** FAILED: ZT_DB_HOST environment variable not defined' exit 1 @@ -24,6 +20,9 @@ if [ -z "$ZT_DB_PASSWORD" ]; then echo '*** FAILED: ZT_DB_PASSWORD environment variable not defined' exit 1 fi +if [ -z "$ZT_DB_TYPE" ]; then + ZT_DB_TYPE="postgres" +fi REDIS="" if [ "$ZT_USE_REDIS" == "true" ]; then @@ -56,35 +55,77 @@ fi mkdir -p /var/lib/zerotier-one pushd /var/lib/zerotier-one -ln -s $ZT_IDENTITY_PATH/identity.public identity.public -ln -s $ZT_IDENTITY_PATH/identity.secret identity.secret +if [ -d "$ZT_IDENTITY_PATH" ]; then + echo '*** Using existing ZT identity from path $ZT_IDENTITY_PATH' + + ln -s $ZT_IDENTITY_PATH/identity.public identity.public + ln -s $ZT_IDENTITY_PATH/identity.secret identity.secret + if [ -f "$ZT_IDENTITY_PATH/authtoken.secret" ]; then + ln -s $ZT_IDENTITY_PATH/authtoken.secret authtoken.secret + fi +fi popd DEFAULT_PORT=9993 +DEFAULT_LB_MODE=false APP_NAME="controller-$(cat /var/lib/zerotier-one/identity.public | cut -d ':' -f 1)" echo "{ \"settings\": { - \"controllerDbPath\": \"postgres:host=${ZT_DB_HOST} port=${ZT_DB_PORT} dbname=${ZT_DB_NAME} user=${ZT_DB_USER} password=${ZT_DB_PASSWORD} application_name=${APP_NAME} sslmode=prefer sslcert=${DB_CLIENT_CERT} sslkey=${DB_CLIENT_KEY} sslrootcert=${DB_SERVER_CA}\", + \"controllerDbPath\": \"${ZT_DB_TYPE}:host=${ZT_DB_HOST} port=${ZT_DB_PORT} dbname=${ZT_DB_NAME} user=${ZT_DB_USER} password=${ZT_DB_PASSWORD} application_name=${APP_NAME} sslmode=prefer sslcert=${DB_CLIENT_CERT} sslkey=${DB_CLIENT_KEY} sslrootcert=${DB_SERVER_CA}\", \"portMappingEnabled\": true, \"softwareUpdate\": \"disable\", \"interfacePrefixBlacklist\": [ \"inot\", \"nat64\" ], + \"lowBandwidthMode\": ${ZT_LB_MODE:-$DEFAULT_LB_MODE}, \"ssoRedirectURL\": \"${ZT_SSO_REDIRECT_URL}\", + \"allowManagementFrom\": [\"127.0.0.1\", \"::1\", \"10.0.0.0/8\"], + \"otel\": { + \"exporterEndpoint\": \"${ZT_EXPORTER_ENDPOINT}\", + \"exporterSampleRate\": ${ZT_EXPORTER_SAMPLE_RATE:-0} + }, ${REDIS} } } " > /var/lib/zerotier-one/local.conf -until /usr/pgsql-10/bin/pg_isready -h ${ZT_DB_HOST} -p ${ZT_DB_PORT}; do - echo "Waiting for PostgreSQL..."; - sleep 2; -done +if [ -n "$DB_SERVER_CA" ]; then + echo "secret list" + chmod 600 /secrets/db/*.pem + ls -l /secrets/db/ + until /usr/bin/pg_isready -h ${ZT_DB_HOST} -p ${ZT_DB_PORT} -d "sslmode=prefer sslcert=${DB_CLIENT_CERT} sslkey=${DB_CLIENT_KEY} sslrootcert=${DB_SERVER_CA}"; do + echo "Waiting for PostgreSQL..."; + sleep 2; + done +else + until /usr/bin/pg_isready -h ${ZT_DB_HOST} -p ${ZT_DB_PORT}; do + echo "Waiting for PostgreSQL..."; + sleep 2; + done +fi + +if [ "$ZT_DB_TYPE" == "cv2" ]; then + echo "Migrating database (if needed)..." + if [ -n "$DB_SERVER_CA" ]; then + /usr/local/bin/migrate -source file:///migrations -database "postgres://$ZT_DB_USER:$ZT_DB_PASSWORD@$ZT_DB_HOST:$ZT_DB_PORT/$ZT_DB_NAME?x-migrations-table=controller_migrations&sslmode=verify-full&sslrootcert=$DB_SERVER_CA&sslcert=$DB_CLIENT_CERT&sslkey=$DB_CLIENT_KEY" up + else + /usr/local/bin/migrate -source file:///migrations -database "postgres://$ZT_DB_USER:$ZT_DB_PASSWORD@$ZT_DB_HOST:$ZT_DB_PORT/$ZT_DB_NAME?x-migrations-table=controller_migrations&sslmode=disable" up + fi +fi + +if [ -n "$ZT_TEMPORAL_HOST" ] && [ -n "$ZT_TEMPORAL_PORT" ]; then + echo "waiting for temporal..." + while ! nc -z ${ZT_TEMPORAL_HOST} ${ZT_TEMPORAL_PORT}; do + echo "waiting..."; + sleep 1; + done + echo "Temporal is up" +fi export GLIBCXX_FORCE_NEW=1 export GLIBCPP_FORCE_NEW=1 -export LD_PRELOAD="/usr/lib64/libjemalloc.so" +export LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libjemalloc.so.2" exec /usr/local/bin/zerotier-one -p${ZT_CONTROLLER_PORT:-$DEFAULT_PORT} /var/lib/zerotier-one diff --git a/ext/central-controller-docker/migrations/0001_init.down.sql b/ext/central-controller-docker/migrations/0001_init.down.sql new file mode 100644 index 000000000..03dc63c81 --- /dev/null +++ b/ext/central-controller-docker/migrations/0001_init.down.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS network_memberships_ctl; +DROP TABLE IF EXISTS networks_ctl; +DROP TABLE IF EXISTS controllers_ctl; \ No newline at end of file diff --git a/ext/central-controller-docker/migrations/0001_init.up.sql b/ext/central-controller-docker/migrations/0001_init.up.sql new file mode 100644 index 000000000..90d29e889 --- /dev/null +++ b/ext/central-controller-docker/migrations/0001_init.up.sql @@ -0,0 +1,47 @@ +-- inits controller db schema + +CREATE TABLE IF NOT EXISTS controllers_ctl ( + id text NOT NULL PRIMARY KEY, + hostname text, + last_heartbeat timestamp with time zone, + public_identity text NOT NULL, + version text +); + +CREATE TABLE IF NOT EXISTS networks_ctl ( + id character varying(22) NOT NULL PRIMARY KEY, + name text NOT NULL, + configuration jsonb DEFAULT '{}'::jsonb NOT NULL, + controller_id text REFERENCES controllers_ctl(id), + revision integer DEFAULT 0 NOT NULL, + last_modified timestamp with time zone DEFAULT now(), + creation_time timestamp with time zone DEFAULT now() +); + +CREATE TABLE IF NOT EXISTS network_memberships_ctl ( + device_id character varying(22) NOT NULL, + network_id character varying(22) NOT NULL REFERENCES networks_ctl(id), + authorized boolean, + active_bridge boolean, + ip_assignments text[], + no_auto_assign_ips boolean, + sso_exempt boolean, + authentication_expiry_time timestamp with time zone, + capabilities jsonb, + creation_time timestamp with time zone DEFAULT now(), + last_modified timestamp with time zone DEFAULT now(), + identity text DEFAULT ''::text, + last_authorized_credential text, + last_authorized_time timestamp with time zone, + last_deauthorized_time timestamp with time zone, + last_seen jsonb DEFAULT '{}'::jsonb NOT NULL, -- in the context of the network + remote_trace_level integer DEFAULT 0 NOT NULL, + remote_trace_target text DEFAULT ''::text NOT NULL, + revision integer DEFAULT 0 NOT NULL, + tags jsonb, + version_major integer DEFAULT 0 NOT NULL, + version_minor integer DEFAULT 0 NOT NULL, + version_revision integer DEFAULT 0 NOT NULL, + version_protocol integer DEFAULT 0 NOT NULL, + PRIMARY KEY (device_id, network_id) +); diff --git a/ext/central-controller-docker/migrations/0002_os_arch.down.sql b/ext/central-controller-docker/migrations/0002_os_arch.down.sql new file mode 100644 index 000000000..3c90f0a2e --- /dev/null +++ b/ext/central-controller-docker/migrations/0002_os_arch.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE network_memberships_ctl + DROP COLUMN os, + DROP COLUMN arch; \ No newline at end of file diff --git a/ext/central-controller-docker/migrations/0002_os_arch.up.sql b/ext/central-controller-docker/migrations/0002_os_arch.up.sql new file mode 100644 index 000000000..095f7c698 --- /dev/null +++ b/ext/central-controller-docker/migrations/0002_os_arch.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE network_memberships_ctl + ADD COLUMN os TEXT NOT NULL DEFAULT 'unknown', + ADD COLUMN arch TEXT NOT NULL DEFAULT 'unknown'; \ No newline at end of file diff --git a/ext/cpp-httplib/httplib.h b/ext/cpp-httplib/httplib.h index 3947df060..862e97e9e 100644 --- a/ext/cpp-httplib/httplib.h +++ b/ext/cpp-httplib/httplib.h @@ -1,13 +1,15 @@ // // httplib.h // -// Copyright (c) 2020 Yuji Hirose. All rights reserved. +// Copyright (c) 2024 Yuji Hirose. All rights reserved. // MIT License // #ifndef CPPHTTPLIB_HTTPLIB_H #define CPPHTTPLIB_HTTPLIB_H +#define CPPHTTPLIB_VERSION "0.15.3" + /* * Configuration */ @@ -60,14 +62,30 @@ #define CPPHTTPLIB_REQUEST_URI_MAX_LENGTH 8192 #endif +#ifndef CPPHTTPLIB_HEADER_MAX_LENGTH +#define CPPHTTPLIB_HEADER_MAX_LENGTH 8192 +#endif + #ifndef CPPHTTPLIB_REDIRECT_MAX_COUNT #define CPPHTTPLIB_REDIRECT_MAX_COUNT 20 #endif +#ifndef CPPHTTPLIB_MULTIPART_FORM_DATA_FILE_MAX_COUNT +#define CPPHTTPLIB_MULTIPART_FORM_DATA_FILE_MAX_COUNT 1024 +#endif + #ifndef CPPHTTPLIB_PAYLOAD_MAX_LENGTH #define CPPHTTPLIB_PAYLOAD_MAX_LENGTH ((std::numeric_limits::max)()) #endif +#ifndef CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH +#define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 8192 +#endif + +#ifndef CPPHTTPLIB_RANGE_MAX_COUNT +#define CPPHTTPLIB_RANGE_MAX_COUNT 1024 +#endif + #ifndef CPPHTTPLIB_TCP_NODELAY #define CPPHTTPLIB_TCP_NODELAY false #endif @@ -87,6 +105,18 @@ : 0)) #endif +#ifndef CPPHTTPLIB_RECV_FLAGS +#define CPPHTTPLIB_RECV_FLAGS 0 +#endif + +#ifndef CPPHTTPLIB_SEND_FLAGS +#define CPPHTTPLIB_SEND_FLAGS 0 +#endif + +#ifndef CPPHTTPLIB_LISTEN_BACKLOG +#define CPPHTTPLIB_LISTEN_BACKLOG 5 +#endif + /* * Headers */ @@ -101,23 +131,25 @@ #endif //_CRT_NONSTDC_NO_DEPRECATE #if defined(_MSC_VER) +#if _MSC_VER < 1900 +#error Sorry, Visual Studio versions prior to 2015 are not supported +#endif + +#pragma comment(lib, "ws2_32.lib") + #ifdef _WIN64 using ssize_t = __int64; #else -using ssize_t = int; -#endif - -#if _MSC_VER < 1900 -#define snprintf _snprintf_s +using ssize_t = long; #endif #endif // _MSC_VER #ifndef S_ISREG -#define S_ISREG(m) (((m)&S_IFREG) == S_IFREG) +#define S_ISREG(m) (((m) & S_IFREG) == S_IFREG) #endif // S_ISREG #ifndef S_ISDIR -#define S_ISDIR(m) (((m)&S_IFDIR) == S_IFDIR) +#define S_ISDIR(m) (((m) & S_IFDIR) == S_IFDIR) #endif // S_ISDIR #ifndef NOMINMAX @@ -126,24 +158,12 @@ using ssize_t = int; #include #include - -#include #include #ifndef WSA_FLAG_NO_HANDLE_INHERIT #define WSA_FLAG_NO_HANDLE_INHERIT 0x80 #endif -#ifdef _MSC_VER -#pragma comment(lib, "ws2_32.lib") -#pragma comment(lib, "crypt32.lib") -#pragma comment(lib, "cryptui.lib") -#endif - -#ifndef strcasecmp -#define strcasecmp _stricmp -#endif // strcasecmp - using socket_t = SOCKET; #ifdef CPPHTTPLIB_USE_POLL #define poll(fds, nfds, timeout) WSAPoll(fds, nfds, timeout) @@ -152,8 +172,16 @@ using socket_t = SOCKET; #else // not _WIN32 #include -#include +#if !defined(_AIX) && !defined(__MVS__) #include +#endif +#ifdef __MVS__ +#include +#ifndef NI_MAXHOST +#define NI_MAXHOST 1025 +#endif +#endif +#include #include #include #ifdef __linux__ @@ -165,24 +193,32 @@ using socket_t = SOCKET; #endif #include #include +#include #include #include +#include #include using socket_t = int; +#ifndef INVALID_SOCKET #define INVALID_SOCKET (-1) +#endif #endif //_WIN32 +#include #include #include #include #include #include #include +#include #include +#include #include #include #include +#include #include #include #include @@ -190,14 +226,39 @@ using socket_t = int; #include #include #include +#include #include #include #include #include +#include +#include +#include #ifdef CPPHTTPLIB_OPENSSL_SUPPORT +#ifdef _WIN32 +#include + +// these are defined in wincrypt.h and it breaks compilation if BoringSSL is +// used +#undef X509_NAME +#undef X509_CERT_PAIR +#undef X509_EXTENSIONS +#undef PKCS7_SIGNER_INFO + +#ifdef _MSC_VER +#pragma comment(lib, "crypt32.lib") +#endif +#elif defined(CPPHTTPLIB_USE_CERTS_FROM_MACOSX_KEYCHAIN) && defined(__APPLE__) +#include +#if TARGET_OS_OSX +#include +#include +#endif // TARGET_OS_OSX +#endif // _WIN32 + #include -#include +#include #include #include @@ -205,20 +266,13 @@ using socket_t = int; #include #endif -#include #include #include -#if OPENSSL_VERSION_NUMBER < 0x1010100fL -#error Sorry, OpenSSL versions prior to 1.1.1 are not supported +#if OPENSSL_VERSION_NUMBER < 0x30000000L +#error Sorry, OpenSSL versions prior to 3.0.0 are not supported #endif -#if OPENSSL_VERSION_NUMBER < 0x10100000L -#include -inline const unsigned char *ASN1_STRING_get0_data(const ASN1_STRING *asn1) { - return M_ASN1_STRING_data(asn1); -} -#endif #endif #ifdef CPPHTTPLIB_ZLIB_SUPPORT @@ -237,16 +291,142 @@ namespace httplib { namespace detail { +/* + * Backport std::make_unique from C++14. + * + * NOTE: This code came up with the following stackoverflow post: + * https://stackoverflow.com/questions/10149840/c-arrays-and-make-unique + * + */ + +template +typename std::enable_if::value, std::unique_ptr>::type +make_unique(Args &&...args) { + return std::unique_ptr(new T(std::forward(args)...)); +} + +template +typename std::enable_if::value, std::unique_ptr>::type +make_unique(std::size_t n) { + typedef typename std::remove_extent::type RT; + return std::unique_ptr(new RT[n]); +} + struct ci { bool operator()(const std::string &s1, const std::string &s2) const { - return std::lexicographical_compare( - s1.begin(), s1.end(), s2.begin(), s2.end(), - [](char c1, char c2) { return ::tolower(c1) < ::tolower(c2); }); + return std::lexicographical_compare(s1.begin(), s1.end(), s2.begin(), + s2.end(), + [](unsigned char c1, unsigned char c2) { + return ::tolower(c1) < ::tolower(c2); + }); } }; +// This is based on +// "http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n4189". + +struct scope_exit { + explicit scope_exit(std::function &&f) + : exit_function(std::move(f)), execute_on_destruction{true} {} + + scope_exit(scope_exit &&rhs) noexcept + : exit_function(std::move(rhs.exit_function)), + execute_on_destruction{rhs.execute_on_destruction} { + rhs.release(); + } + + ~scope_exit() { + if (execute_on_destruction) { this->exit_function(); } + } + + void release() { this->execute_on_destruction = false; } + +private: + scope_exit(const scope_exit &) = delete; + void operator=(const scope_exit &) = delete; + scope_exit &operator=(scope_exit &&) = delete; + + std::function exit_function; + bool execute_on_destruction; +}; + } // namespace detail +enum StatusCode { + // Information responses + Continue_100 = 100, + SwitchingProtocol_101 = 101, + Processing_102 = 102, + EarlyHints_103 = 103, + + // Successful responses + OK_200 = 200, + Created_201 = 201, + Accepted_202 = 202, + NonAuthoritativeInformation_203 = 203, + NoContent_204 = 204, + ResetContent_205 = 205, + PartialContent_206 = 206, + MultiStatus_207 = 207, + AlreadyReported_208 = 208, + IMUsed_226 = 226, + + // Redirection messages + MultipleChoices_300 = 300, + MovedPermanently_301 = 301, + Found_302 = 302, + SeeOther_303 = 303, + NotModified_304 = 304, + UseProxy_305 = 305, + unused_306 = 306, + TemporaryRedirect_307 = 307, + PermanentRedirect_308 = 308, + + // Client error responses + BadRequest_400 = 400, + Unauthorized_401 = 401, + PaymentRequired_402 = 402, + Forbidden_403 = 403, + NotFound_404 = 404, + MethodNotAllowed_405 = 405, + NotAcceptable_406 = 406, + ProxyAuthenticationRequired_407 = 407, + RequestTimeout_408 = 408, + Conflict_409 = 409, + Gone_410 = 410, + LengthRequired_411 = 411, + PreconditionFailed_412 = 412, + PayloadTooLarge_413 = 413, + UriTooLong_414 = 414, + UnsupportedMediaType_415 = 415, + RangeNotSatisfiable_416 = 416, + ExpectationFailed_417 = 417, + ImATeapot_418 = 418, + MisdirectedRequest_421 = 421, + UnprocessableContent_422 = 422, + Locked_423 = 423, + FailedDependency_424 = 424, + TooEarly_425 = 425, + UpgradeRequired_426 = 426, + PreconditionRequired_428 = 428, + TooManyRequests_429 = 429, + RequestHeaderFieldsTooLarge_431 = 431, + UnavailableForLegalReasons_451 = 451, + + // Server error responses + InternalServerError_500 = 500, + NotImplemented_501 = 501, + BadGateway_502 = 502, + ServiceUnavailable_503 = 503, + GatewayTimeout_504 = 504, + HttpVersionNotSupported_505 = 505, + VariantAlsoNegotiates_506 = 506, + InsufficientStorage_507 = 507, + LoopDetected_508 = 508, + NotExtended_510 = 510, + NetworkAuthenticationRequired_511 = 511, +}; + using Headers = std::multimap; using Params = std::multimap; @@ -275,9 +455,10 @@ public: DataSink(DataSink &&) = delete; DataSink &operator=(DataSink &&) = delete; - std::function write; - std::function done; + std::function write; std::function is_writable; + std::function done; + std::function done_with_trailer; std::ostream os; private: @@ -286,7 +467,7 @@ private: explicit data_sink_streambuf(DataSink &sink) : sink_(sink) {} protected: - std::streamsize xsputn(const char *s, std::streamsize n) { + std::streamsize xsputn(const char *s, std::streamsize n) override { sink_.write(s, static_cast(n)); return n; } @@ -304,6 +485,20 @@ using ContentProvider = using ContentProviderWithoutLength = std::function; +using ContentProviderResourceReleaser = std::function; + +struct MultipartFormDataProvider { + std::string name; + ContentProviderWithoutLength provider; + std::string filename; + std::string content_type; +}; +using MultipartFormDataProviderItems = std::vector; + +using ContentReceiverWithProgress = + std::function; + using ContentReceiver = std::function; @@ -317,14 +512,17 @@ public: ContentReceiver receiver)>; ContentReader(Reader reader, MultipartReader multipart_reader) - : reader_(reader), multipart_reader_(multipart_reader) {} + : reader_(std::move(reader)), + multipart_reader_(std::move(multipart_reader)) {} bool operator()(MultipartContentHeader header, ContentReceiver receiver) const { - return multipart_reader_(header, receiver); + return multipart_reader_(std::move(header), std::move(receiver)); } - bool operator()(ContentReceiver receiver) const { return reader_(receiver); } + bool operator()(ContentReceiver receiver) const { + return reader_(std::move(receiver)); + } Reader reader_; MultipartReader multipart_reader_; @@ -341,6 +539,8 @@ struct Request { std::string remote_addr; int remote_port = -1; + std::string local_addr; + int local_port = -1; // for server std::string version; @@ -349,37 +549,37 @@ struct Request { MultipartFormDataMap files; Ranges ranges; Match matches; + std::unordered_map path_params; // for client - size_t redirect_count = CPPHTTPLIB_REDIRECT_MAX_COUNT; ResponseHandler response_handler; - ContentReceiver content_receiver; - size_t content_length = 0; - ContentProvider content_provider; + ContentReceiverWithProgress content_receiver; Progress progress; - #ifdef CPPHTTPLIB_OPENSSL_SUPPORT - const SSL *ssl; + const SSL *ssl = nullptr; #endif - bool has_header(const char *key) const; - std::string get_header_value(const char *key, size_t id = 0) const; - template - T get_header_value(const char *key, size_t id = 0) const; - size_t get_header_value_count(const char *key) const; - void set_header(const char *key, const char *val); - void set_header(const char *key, const std::string &val); + bool has_header(const std::string &key) const; + std::string get_header_value(const std::string &key, size_t id = 0) const; + uint64_t get_header_value_u64(const std::string &key, size_t id = 0) const; + size_t get_header_value_count(const std::string &key) const; + void set_header(const std::string &key, const std::string &val); - bool has_param(const char *key) const; - std::string get_param_value(const char *key, size_t id = 0) const; - size_t get_param_value_count(const char *key) const; + bool has_param(const std::string &key) const; + std::string get_param_value(const std::string &key, size_t id = 0) const; + size_t get_param_value_count(const std::string &key) const; bool is_multipart_form_data() const; - bool has_file(const char *key) const; - MultipartFormData get_file_value(const char *key) const; + bool has_file(const std::string &key) const; + MultipartFormData get_file_value(const std::string &key) const; + std::vector get_file_values(const std::string &key) const; // private members... + size_t redirect_count_ = CPPHTTPLIB_REDIRECT_MAX_COUNT; + size_t content_length_ = 0; + ContentProvider content_provider_; + bool is_chunked_content_provider_ = false; size_t authorization_count_ = 0; }; @@ -389,31 +589,30 @@ struct Response { std::string reason; Headers headers; std::string body; + std::string location; // Redirect location - bool has_header(const char *key) const; - std::string get_header_value(const char *key, size_t id = 0) const; - template - T get_header_value(const char *key, size_t id = 0) const; - size_t get_header_value_count(const char *key) const; - void set_header(const char *key, const char *val); - void set_header(const char *key, const std::string &val); + bool has_header(const std::string &key) const; + std::string get_header_value(const std::string &key, size_t id = 0) const; + uint64_t get_header_value_u64(const std::string &key, size_t id = 0) const; + size_t get_header_value_count(const std::string &key) const; + void set_header(const std::string &key, const std::string &val); - void set_redirect(const char *url, int status = 302); - void set_redirect(const std::string &url, int status = 302); - void set_content(const char *s, size_t n, const char *content_type); - void set_content(std::string s, const char *content_type); + void set_redirect(const std::string &url, int status = StatusCode::Found_302); + void set_content(const char *s, size_t n, const std::string &content_type); + void set_content(const std::string &s, const std::string &content_type); + void set_content(std::string &&s, const std::string &content_type); void set_content_provider( - size_t length, const char *content_type, ContentProvider provider, - const std::function &resource_releaser = nullptr); + size_t length, const std::string &content_type, ContentProvider provider, + ContentProviderResourceReleaser resource_releaser = nullptr); void set_content_provider( - const char *content_type, ContentProviderWithoutLength provider, - const std::function &resource_releaser = nullptr); + const std::string &content_type, ContentProviderWithoutLength provider, + ContentProviderResourceReleaser resource_releaser = nullptr); void set_chunked_content_provider( - const char *content_type, ContentProviderWithoutLength provider, - const std::function &resource_releaser = nullptr); + const std::string &content_type, ContentProviderWithoutLength provider, + ContentProviderResourceReleaser resource_releaser = nullptr); Response() = default; Response(const Response &) = default; @@ -422,15 +621,16 @@ struct Response { Response &operator=(Response &&) = default; ~Response() { if (content_provider_resource_releaser_) { - content_provider_resource_releaser_(); + content_provider_resource_releaser_(content_provider_success_); } } // private members... size_t content_length_ = 0; ContentProvider content_provider_; - std::function content_provider_resource_releaser_; - bool is_chunked_content_provider = false; + ContentProviderResourceReleaser content_provider_resource_releaser_; + bool is_chunked_content_provider_ = false; + bool content_provider_success_ = false; }; class Stream { @@ -443,9 +643,11 @@ public: virtual ssize_t read(char *ptr, size_t size) = 0; virtual ssize_t write(const char *ptr, size_t size) = 0; virtual void get_remote_ip_and_port(std::string &ip, int &port) const = 0; + virtual void get_local_ip_and_port(std::string &ip, int &port) const = 0; + virtual socket_t socket() const = 0; template - ssize_t write_format(const char *fmt, const Args &... args); + ssize_t write_format(const char *fmt, const Args &...args); ssize_t write(const char *ptr); ssize_t write(const std::string &s); }; @@ -455,15 +657,16 @@ public: TaskQueue() = default; virtual ~TaskQueue() = default; - virtual void enqueue(std::function fn) = 0; + virtual bool enqueue(std::function fn) = 0; virtual void shutdown() = 0; - virtual void on_idle(){}; + virtual void on_idle() {} }; class ThreadPool : public TaskQueue { public: - explicit ThreadPool(size_t n) : shutdown_(false) { + explicit ThreadPool(size_t n, size_t mqr = 0) + : shutdown_(false), max_queued_requests_(mqr) { while (n) { threads_.emplace_back(worker(*this)); n--; @@ -473,10 +676,17 @@ public: ThreadPool(const ThreadPool &) = delete; ~ThreadPool() override = default; - void enqueue(std::function fn) override { - std::unique_lock lock(mutex_); - jobs_.push_back(fn); + bool enqueue(std::function fn) override { + { + std::unique_lock lock(mutex_); + if (max_queued_requests_ > 0 && jobs_.size() >= max_queued_requests_) { + return false; + } + jobs_.push_back(std::move(fn)); + } + cond_.notify_one(); + return true; } void shutdown() override { @@ -509,7 +719,7 @@ private: if (pool_.shutdown_ && pool_.jobs_.empty()) { break; } - fn = pool_.jobs_.front(); + fn = std::move(pool_.jobs_.front()); pool_.jobs_.pop_front(); } @@ -526,6 +736,7 @@ private: std::list> jobs_; bool shutdown_; + size_t max_queued_requests_ = 0; std::condition_variable cond_; std::mutex mutex_; @@ -535,29 +746,101 @@ using Logger = std::function; using SocketOptions = std::function; -inline void default_socket_options(socket_t sock) { - int yes = 1; -#ifdef _WIN32 - setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast(&yes), - sizeof(yes)); - setsockopt(sock, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, - reinterpret_cast(&yes), sizeof(yes)); -#else -#ifdef SO_REUSEPORT - setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, reinterpret_cast(&yes), - sizeof(yes)); -#else - setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, reinterpret_cast(&yes), - sizeof(yes)); -#endif -#endif -} +void default_socket_options(socket_t sock); + +const char *status_message(int status); + +std::string get_bearer_token_auth(const Request &req); + +namespace detail { + +class MatcherBase { +public: + virtual ~MatcherBase() = default; + + // Match request path and populate its matches and + virtual bool match(Request &request) const = 0; +}; + +/** + * Captures parameters in request path and stores them in Request::path_params + * + * Capture name is a substring of a pattern from : to /. + * The rest of the pattern is matched agains the request path directly + * Parameters are captured starting from the next character after + * the end of the last matched static pattern fragment until the next /. + * + * Example pattern: + * "/path/fragments/:capture/more/fragments/:second_capture" + * Static fragments: + * "/path/fragments/", "more/fragments/" + * + * Given the following request path: + * "/path/fragments/:1/more/fragments/:2" + * the resulting capture will be + * {{"capture", "1"}, {"second_capture", "2"}} + */ +class PathParamsMatcher : public MatcherBase { +public: + PathParamsMatcher(const std::string &pattern); + + bool match(Request &request) const override; + +private: + static constexpr char marker = ':'; + // Treat segment separators as the end of path parameter capture + // Does not need to handle query parameters as they are parsed before path + // matching + static constexpr char separator = '/'; + + // Contains static path fragments to match against, excluding the '/' after + // path params + // Fragments are separated by path params + std::vector static_fragments_; + // Stores the names of the path parameters to be used as keys in the + // Request::path_params map + std::vector param_names_; +}; + +/** + * Performs std::regex_match on request path + * and stores the result in Request::matches + * + * Note that regex match is performed directly on the whole request. + * This means that wildcard patterns may match multiple path segments with /: + * "/begin/(.*)/end" will match both "/begin/middle/end" and "/begin/1/2/end". + */ +class RegexMatcher : public MatcherBase { +public: + RegexMatcher(const std::string &pattern) : regex_(pattern) {} + + bool match(Request &request) const override; + +private: + std::regex regex_; +}; + +ssize_t write_headers(Stream &strm, const Headers &headers); + +} // namespace detail class Server { public: using Handler = std::function; + + using ExceptionHandler = + std::function; + + enum class HandlerResponse { + Handled, + Unhandled, + }; + using HandlerWithResponse = + std::function; + using HandlerWithContentReader = std::function; + using Expect100ContinueHandler = std::function; @@ -567,56 +850,81 @@ public: virtual bool is_valid() const; - Server &Get(const char *pattern, Handler handler); - Server &Post(const char *pattern, Handler handler); - Server &Post(const char *pattern, HandlerWithContentReader handler); - Server &Put(const char *pattern, Handler handler); - Server &Put(const char *pattern, HandlerWithContentReader handler); - Server &Patch(const char *pattern, Handler handler); - Server &Patch(const char *pattern, HandlerWithContentReader handler); - Server &Delete(const char *pattern, Handler handler); - Server &Delete(const char *pattern, HandlerWithContentReader handler); - Server &Options(const char *pattern, Handler handler); + Server &Get(const std::string &pattern, Handler handler); + Server &Post(const std::string &pattern, Handler handler); + Server &Post(const std::string &pattern, HandlerWithContentReader handler); + Server &Put(const std::string &pattern, Handler handler); + Server &Put(const std::string &pattern, HandlerWithContentReader handler); + Server &Patch(const std::string &pattern, Handler handler); + Server &Patch(const std::string &pattern, HandlerWithContentReader handler); + Server &Delete(const std::string &pattern, Handler handler); + Server &Delete(const std::string &pattern, HandlerWithContentReader handler); + Server &Options(const std::string &pattern, Handler handler); - bool set_base_dir(const char *dir, const char *mount_point = nullptr); - bool set_mount_point(const char *mount_point, const char *dir); - bool remove_mount_point(const char *mount_point); - void set_file_extension_and_mimetype_mapping(const char *ext, - const char *mime); - void set_file_request_handler(Handler handler); + bool set_base_dir(const std::string &dir, + const std::string &mount_point = std::string()); + bool set_mount_point(const std::string &mount_point, const std::string &dir, + Headers headers = Headers()); + bool remove_mount_point(const std::string &mount_point); + Server &set_file_extension_and_mimetype_mapping(const std::string &ext, + const std::string &mime); + Server &set_default_file_mimetype(const std::string &mime); + Server &set_file_request_handler(Handler handler); - void set_error_handler(Handler handler); - void set_expect_100_continue_handler(Expect100ContinueHandler handler); - void set_logger(Logger logger); + Server &set_error_handler(HandlerWithResponse handler); + Server &set_error_handler(Handler handler); + Server &set_exception_handler(ExceptionHandler handler); + Server &set_pre_routing_handler(HandlerWithResponse handler); + Server &set_post_routing_handler(Handler handler); - void set_tcp_nodelay(bool on); - void set_socket_options(SocketOptions socket_options); + Server &set_expect_100_continue_handler(Expect100ContinueHandler handler); + Server &set_logger(Logger logger); - void set_keep_alive_max_count(size_t count); - void set_keep_alive_timeout(time_t sec); - void set_read_timeout(time_t sec, time_t usec = 0); - void set_write_timeout(time_t sec, time_t usec = 0); - void set_idle_interval(time_t sec, time_t usec = 0); + Server &set_address_family(int family); + Server &set_tcp_nodelay(bool on); + Server &set_socket_options(SocketOptions socket_options); - void set_payload_max_length(size_t length); + Server &set_default_headers(Headers headers); + Server & + set_header_writer(std::function const &writer); - bool bind_to_port(const char *host, int port, int socket_flags = 0); - int bind_to_any_port(const char *host, int socket_flags = 0); + Server &set_keep_alive_max_count(size_t count); + Server &set_keep_alive_timeout(time_t sec); + + Server &set_read_timeout(time_t sec, time_t usec = 0); + template + Server &set_read_timeout(const std::chrono::duration &duration); + + Server &set_write_timeout(time_t sec, time_t usec = 0); + template + Server &set_write_timeout(const std::chrono::duration &duration); + + Server &set_idle_interval(time_t sec, time_t usec = 0); + template + Server &set_idle_interval(const std::chrono::duration &duration); + + Server &set_payload_max_length(size_t length); + + bool bind_to_port(const std::string &host, int port, int socket_flags = 0); + int bind_to_any_port(const std::string &host, int socket_flags = 0); bool listen_after_bind(); - bool listen(const char *host, int port, int socket_flags = 0); + bool listen(const std::string &host, int port, int socket_flags = 0); bool is_running() const; + void wait_until_ready() const; void stop(); std::function new_task_queue; + bool routing(Request &req, Response &res, Stream &strm); + protected: bool process_request(Stream &strm, bool close_connection, bool &connection_closed, const std::function &setup_request); - std::atomic svr_sock_; + std::atomic svr_sock_{INVALID_SOCKET}; size_t keep_alive_max_count_ = CPPHTTPLIB_KEEPALIVE_MAX_COUNT; time_t keep_alive_timeout_sec_ = CPPHTTPLIB_KEEPALIVE_TIMEOUT_SECOND; time_t read_timeout_sec_ = CPPHTTPLIB_READ_TIMEOUT_SECOND; @@ -628,26 +936,39 @@ protected: size_t payload_max_length_ = CPPHTTPLIB_PAYLOAD_MAX_LENGTH; private: - using Handlers = std::vector>; + using Handlers = + std::vector, Handler>>; using HandlersForContentReader = - std::vector>; + std::vector, + HandlerWithContentReader>>; - socket_t create_server_socket(const char *host, int port, int socket_flags, + static std::unique_ptr + make_matcher(const std::string &pattern); + + socket_t create_server_socket(const std::string &host, int port, + int socket_flags, SocketOptions socket_options) const; - int bind_internal(const char *host, int port, int socket_flags); + int bind_internal(const std::string &host, int port, int socket_flags); bool listen_internal(); - bool routing(Request &req, Response &res, Stream &strm); - bool handle_file_request(Request &req, Response &res, bool head = false); - bool dispatch_request(Request &req, Response &res, const Handlers &handlers); - bool - dispatch_request_for_content_reader(Request &req, Response &res, - ContentReader content_reader, - const HandlersForContentReader &handlers); + bool handle_file_request(const Request &req, Response &res, + bool head = false); + bool dispatch_request(Request &req, Response &res, + const Handlers &handlers) const; + bool dispatch_request_for_content_reader( + Request &req, Response &res, ContentReader content_reader, + const HandlersForContentReader &handlers) const; - bool parse_request_line(const char *s, Request &req); - bool write_response(Stream &strm, bool close_connection, const Request &req, + bool parse_request_line(const char *s, Request &req) const; + void apply_ranges(const Request &req, Response &res, + std::string &content_type, std::string &boundary) const; + bool write_response(Stream &strm, bool close_connection, Request &req, Response &res); + bool write_response_with_content(Stream &strm, bool close_connection, + const Request &req, Response &res); + bool write_response_core(Stream &strm, bool close_connection, + const Request &req, Response &res, + bool need_apply_ranges); bool write_content_with_provider(Stream &strm, const Request &req, Response &res, const std::string &boundary, const std::string &content_type); @@ -659,15 +980,24 @@ private: ContentReceiver multipart_receiver); bool read_content_core(Stream &strm, Request &req, Response &res, ContentReceiver receiver, - MultipartContentHeader mulitpart_header, - ContentReceiver multipart_receiver); + MultipartContentHeader multipart_header, + ContentReceiver multipart_receiver) const; virtual bool process_and_close_socket(socket_t sock); - std::atomic is_running_; - std::vector> base_dirs_; + std::atomic is_running_{false}; + std::atomic done_{false}; + + struct MountPointEntry { + std::string mount_point; + std::string base_dir; + Headers headers; + }; + std::vector base_dirs_; std::map file_extension_and_mimetype_map_; + std::string default_file_mimetype_ = "application/octet-stream"; Handler file_request_handler_; + Handlers get_handlers_; Handlers post_handlers_; HandlersForContentReader post_handlers_for_content_reader_; @@ -678,15 +1008,25 @@ private: Handlers delete_handlers_; HandlersForContentReader delete_handlers_for_content_reader_; Handlers options_handlers_; - Handler error_handler_; - Logger logger_; + + HandlerWithResponse error_handler_; + ExceptionHandler exception_handler_; + HandlerWithResponse pre_routing_handler_; + Handler post_routing_handler_; Expect100ContinueHandler expect_100_continue_handler_; + Logger logger_; + + int address_family_ = AF_UNSPEC; bool tcp_nodelay_ = CPPHTTPLIB_TCP_NODELAY; SocketOptions socket_options_ = default_socket_options; + + Headers default_headers_; + std::function header_writer_ = + detail::write_headers; }; -enum Error { +enum class Error { Success = 0, Unknown, Connection, @@ -697,24 +1037,53 @@ enum Error { Canceled, SSLConnection, SSLLoadingCerts, - SSLServerVerification + SSLServerVerification, + UnsupportedMultipartBoundaryChars, + Compression, + ConnectionTimeout, + ProxyConnection, + + // For internal use only + SSLPeerCouldBeClosed_, }; +std::string to_string(Error error); + +std::ostream &operator<<(std::ostream &os, const Error &obj); + class Result { public: - Result(const std::shared_ptr &res, Error err) - : res_(res), err_(err) {} + Result() = default; + Result(std::unique_ptr &&res, Error err, + Headers &&request_headers = Headers{}) + : res_(std::move(res)), err_(err), + request_headers_(std::move(request_headers)) {} + // Response operator bool() const { return res_ != nullptr; } bool operator==(std::nullptr_t) const { return res_ == nullptr; } bool operator!=(std::nullptr_t) const { return res_ != nullptr; } const Response &value() const { return *res_; } + Response &value() { return *res_; } const Response &operator*() const { return *res_; } + Response &operator*() { return *res_; } const Response *operator->() const { return res_.get(); } + Response *operator->() { return res_.get(); } + + // Error Error error() const { return err_; } + // Request Headers + bool has_request_header(const std::string &key) const; + std::string get_request_header_value(const std::string &key, + size_t id = 0) const; + uint64_t get_request_header_value_u64(const std::string &key, + size_t id = 0) const; + size_t get_request_header_value_count(const std::string &key) const; + private: - std::shared_ptr res_; - Error err_; + std::unique_ptr res_; + Error err_ = Error::Unknown; + Headers request_headers_; }; class ClientImpl { @@ -731,112 +1100,212 @@ public: virtual bool is_valid() const; - Result Get(const char *path); - Result Get(const char *path, const Headers &headers); - Result Get(const char *path, Progress progress); - Result Get(const char *path, const Headers &headers, Progress progress); - Result Get(const char *path, ContentReceiver content_receiver); - Result Get(const char *path, const Headers &headers, - ContentReceiver content_receiver); - Result Get(const char *path, ContentReceiver content_receiver, + Result Get(const std::string &path); + Result Get(const std::string &path, const Headers &headers); + Result Get(const std::string &path, Progress progress); + Result Get(const std::string &path, const Headers &headers, Progress progress); - Result Get(const char *path, const Headers &headers, - ContentReceiver content_receiver, Progress progress); - Result Get(const char *path, ResponseHandler response_handler, + Result Get(const std::string &path, ContentReceiver content_receiver); + Result Get(const std::string &path, const Headers &headers, ContentReceiver content_receiver); - Result Get(const char *path, const Headers &headers, + Result Get(const std::string &path, ContentReceiver content_receiver, + Progress progress); + Result Get(const std::string &path, const Headers &headers, + ContentReceiver content_receiver, Progress progress); + Result Get(const std::string &path, ResponseHandler response_handler, + ContentReceiver content_receiver); + Result Get(const std::string &path, const Headers &headers, ResponseHandler response_handler, ContentReceiver content_receiver); - Result Get(const char *path, ResponseHandler response_handler, + Result Get(const std::string &path, ResponseHandler response_handler, ContentReceiver content_receiver, Progress progress); - Result Get(const char *path, const Headers &headers, + Result Get(const std::string &path, const Headers &headers, ResponseHandler response_handler, ContentReceiver content_receiver, Progress progress); - Result Head(const char *path); - Result Head(const char *path, const Headers &headers); + Result Get(const std::string &path, const Params ¶ms, + const Headers &headers, Progress progress = nullptr); + Result Get(const std::string &path, const Params ¶ms, + const Headers &headers, ContentReceiver content_receiver, + Progress progress = nullptr); + Result Get(const std::string &path, const Params ¶ms, + const Headers &headers, ResponseHandler response_handler, + ContentReceiver content_receiver, Progress progress = nullptr); - Result Post(const char *path); - Result Post(const char *path, const std::string &body, - const char *content_type); - Result Post(const char *path, const Headers &headers, const std::string &body, - const char *content_type); - Result Post(const char *path, size_t content_length, - ContentProvider content_provider, const char *content_type); - Result Post(const char *path, const Headers &headers, size_t content_length, - ContentProvider content_provider, const char *content_type); - Result Post(const char *path, const Params ¶ms); - Result Post(const char *path, const Headers &headers, const Params ¶ms); - Result Post(const char *path, const MultipartFormDataItems &items); - Result Post(const char *path, const Headers &headers, + Result Head(const std::string &path); + Result Head(const std::string &path, const Headers &headers); + + Result Post(const std::string &path); + Result Post(const std::string &path, const Headers &headers); + Result Post(const std::string &path, const char *body, size_t content_length, + const std::string &content_type); + Result Post(const std::string &path, const Headers &headers, const char *body, + size_t content_length, const std::string &content_type); + Result Post(const std::string &path, const std::string &body, + const std::string &content_type); + Result Post(const std::string &path, const Headers &headers, + const std::string &body, const std::string &content_type); + Result Post(const std::string &path, size_t content_length, + ContentProvider content_provider, + const std::string &content_type); + Result Post(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type); + Result Post(const std::string &path, const Headers &headers, + size_t content_length, ContentProvider content_provider, + const std::string &content_type); + Result Post(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type); + Result Post(const std::string &path, const Params ¶ms); + Result Post(const std::string &path, const Headers &headers, + const Params ¶ms); + Result Post(const std::string &path, const MultipartFormDataItems &items); + Result Post(const std::string &path, const Headers &headers, const MultipartFormDataItems &items); + Result Post(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, const std::string &boundary); + Result Post(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const MultipartFormDataProviderItems &provider_items); - Result Put(const char *path); - Result Put(const char *path, const std::string &body, - const char *content_type); - Result Put(const char *path, const Headers &headers, const std::string &body, - const char *content_type); - Result Put(const char *path, size_t content_length, - ContentProvider content_provider, const char *content_type); - Result Put(const char *path, const Headers &headers, size_t content_length, - ContentProvider content_provider, const char *content_type); - Result Put(const char *path, const Params ¶ms); - Result Put(const char *path, const Headers &headers, const Params ¶ms); + Result Put(const std::string &path); + Result Put(const std::string &path, const char *body, size_t content_length, + const std::string &content_type); + Result Put(const std::string &path, const Headers &headers, const char *body, + size_t content_length, const std::string &content_type); + Result Put(const std::string &path, const std::string &body, + const std::string &content_type); + Result Put(const std::string &path, const Headers &headers, + const std::string &body, const std::string &content_type); + Result Put(const std::string &path, size_t content_length, + ContentProvider content_provider, const std::string &content_type); + Result Put(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type); + Result Put(const std::string &path, const Headers &headers, + size_t content_length, ContentProvider content_provider, + const std::string &content_type); + Result Put(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type); + Result Put(const std::string &path, const Params ¶ms); + Result Put(const std::string &path, const Headers &headers, + const Params ¶ms); + Result Put(const std::string &path, const MultipartFormDataItems &items); + Result Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items); + Result Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, const std::string &boundary); + Result Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const MultipartFormDataProviderItems &provider_items); - Result Patch(const char *path, const std::string &body, - const char *content_type); - Result Patch(const char *path, const Headers &headers, - const std::string &body, const char *content_type); - Result Patch(const char *path, size_t content_length, - ContentProvider content_provider, const char *content_type); - Result Patch(const char *path, const Headers &headers, size_t content_length, - ContentProvider content_provider, const char *content_type); + Result Patch(const std::string &path); + Result Patch(const std::string &path, const char *body, size_t content_length, + const std::string &content_type); + Result Patch(const std::string &path, const Headers &headers, + const char *body, size_t content_length, + const std::string &content_type); + Result Patch(const std::string &path, const std::string &body, + const std::string &content_type); + Result Patch(const std::string &path, const Headers &headers, + const std::string &body, const std::string &content_type); + Result Patch(const std::string &path, size_t content_length, + ContentProvider content_provider, + const std::string &content_type); + Result Patch(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type); + Result Patch(const std::string &path, const Headers &headers, + size_t content_length, ContentProvider content_provider, + const std::string &content_type); + Result Patch(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type); - Result Delete(const char *path); - Result Delete(const char *path, const std::string &body, - const char *content_type); - Result Delete(const char *path, const Headers &headers); - Result Delete(const char *path, const Headers &headers, - const std::string &body, const char *content_type); + Result Delete(const std::string &path); + Result Delete(const std::string &path, const Headers &headers); + Result Delete(const std::string &path, const char *body, + size_t content_length, const std::string &content_type); + Result Delete(const std::string &path, const Headers &headers, + const char *body, size_t content_length, + const std::string &content_type); + Result Delete(const std::string &path, const std::string &body, + const std::string &content_type); + Result Delete(const std::string &path, const Headers &headers, + const std::string &body, const std::string &content_type); - Result Options(const char *path); - Result Options(const char *path, const Headers &headers); + Result Options(const std::string &path); + Result Options(const std::string &path, const Headers &headers); - bool send(const Request &req, Response &res); - - size_t is_socket_open() const; + bool send(Request &req, Response &res, Error &error); + Result send(const Request &req); void stop(); + std::string host() const; + int port() const; + + size_t is_socket_open() const; + socket_t socket() const; + + void set_hostname_addr_map(std::map addr_map); + void set_default_headers(Headers headers); + void + set_header_writer(std::function const &writer); + + void set_address_family(int family); void set_tcp_nodelay(bool on); void set_socket_options(SocketOptions socket_options); void set_connection_timeout(time_t sec, time_t usec = 0); - void set_read_timeout(time_t sec, time_t usec = 0); - void set_write_timeout(time_t sec, time_t usec = 0); + template + void + set_connection_timeout(const std::chrono::duration &duration); - void set_basic_auth(const char *username, const char *password); - void set_bearer_token_auth(const char *token); + void set_read_timeout(time_t sec, time_t usec = 0); + template + void set_read_timeout(const std::chrono::duration &duration); + + void set_write_timeout(time_t sec, time_t usec = 0); + template + void set_write_timeout(const std::chrono::duration &duration); + + void set_basic_auth(const std::string &username, const std::string &password); + void set_bearer_token_auth(const std::string &token); #ifdef CPPHTTPLIB_OPENSSL_SUPPORT - void set_digest_auth(const char *username, const char *password); + void set_digest_auth(const std::string &username, + const std::string &password); #endif void set_keep_alive(bool on); void set_follow_location(bool on); + void set_url_encode(bool on); + void set_compress(bool on); void set_decompress(bool on); - void set_interface(const char *intf); + void set_interface(const std::string &intf); - void set_proxy(const char *host, int port); - void set_proxy_basic_auth(const char *username, const char *password); - void set_proxy_bearer_token_auth(const char *token); + void set_proxy(const std::string &host, int port); + void set_proxy_basic_auth(const std::string &username, + const std::string &password); + void set_proxy_bearer_token_auth(const std::string &token); #ifdef CPPHTTPLIB_OPENSSL_SUPPORT - void set_proxy_digest_auth(const char *username, const char *password); + void set_proxy_digest_auth(const std::string &username, + const std::string &password); +#endif + +#ifdef CPPHTTPLIB_OPENSSL_SUPPORT + void set_ca_cert_path(const std::string &ca_cert_file_path, + const std::string &ca_cert_dir_path = std::string()); + void set_ca_cert_store(X509_STORE *ca_cert_store); + X509_STORE *create_ca_cert_store(const char *ca_cert, std::size_t size) const; #endif #ifdef CPPHTTPLIB_OPENSSL_SUPPORT @@ -855,20 +1324,28 @@ protected: bool is_open() const { return sock != INVALID_SOCKET; } }; - virtual bool create_and_connect_socket(Socket &socket); - virtual void close_socket(Socket &socket, bool process_socket_ret); + virtual bool create_and_connect_socket(Socket &socket, Error &error); - bool process_request(Stream &strm, const Request &req, Response &res, - bool close_connection); + // All of: + // shutdown_ssl + // shutdown_socket + // close_socket + // should ONLY be called when socket_mutex_ is locked. + // Also, shutdown_ssl and close_socket should also NOT be called concurrently + // with a DIFFERENT thread sending requests using that socket. + virtual void shutdown_ssl(Socket &socket, bool shutdown_gracefully); + void shutdown_socket(Socket &socket) const; + void close_socket(Socket &socket); - Error get_last_error() const; + bool process_request(Stream &strm, Request &req, Response &res, + bool close_connection, Error &error); + + bool write_content_with_provider(Stream &strm, const Request &req, + Error &error) const; void copy_settings(const ClientImpl &rhs); - // Error state - mutable Error error_ = Error::Success; - - // Socket endoint information + // Socket endpoint information const std::string host_; const int port_; const std::string host_and_port_; @@ -878,9 +1355,21 @@ protected: mutable std::mutex socket_mutex_; std::recursive_mutex request_mutex_; + // These are all protected under socket_mutex + size_t socket_requests_in_flight_ = 0; + std::thread::id socket_requests_are_from_thread_ = std::thread::id(); + bool socket_should_be_closed_when_request_is_done_ = false; + + // Hostname-IP map + std::map addr_map_; + // Default headers Headers default_headers_; + // Header writer + std::function header_writer_ = + detail::write_headers; + // Settings std::string client_cert_path_; std::string client_key_path_; @@ -903,6 +1392,9 @@ protected: bool keep_alive_ = false; bool follow_location_ = false; + bool url_encode_ = true; + + int address_family_ = AF_UNSPEC; bool tcp_nodelay_ = CPPHTTPLIB_TCP_NODELAY; SocketOptions socket_options_ = nullptr; @@ -922,6 +1414,13 @@ protected: std::string proxy_digest_auth_password_; #endif +#ifdef CPPHTTPLIB_OPENSSL_SUPPORT + std::string ca_cert_file_path_; + std::string ca_cert_dir_path_; + + X509_STORE *ca_cert_store_ = nullptr; +#endif + #ifdef CPPHTTPLIB_OPENSSL_SUPPORT bool server_certificate_verification_ = true; #endif @@ -929,19 +1428,35 @@ protected: Logger logger_; private: - socket_t create_client_socket() const; - bool read_response_line(Stream &strm, Response &res); - bool write_request(Stream &strm, const Request &req, bool close_connection); - bool redirect(const Request &req, Response &res); - bool handle_request(Stream &strm, const Request &req, Response &res, - bool close_connection); - void stop_core(); - std::shared_ptr send_with_content_provider( - const char *method, const char *path, const Headers &headers, - const std::string &body, size_t content_length, - ContentProvider content_provider, const char *content_type); + bool send_(Request &req, Response &res, Error &error); + Result send_(Request &&req); - virtual bool process_socket(Socket &socket, + socket_t create_client_socket(Error &error) const; + bool read_response_line(Stream &strm, const Request &req, + Response &res) const; + bool write_request(Stream &strm, Request &req, bool close_connection, + Error &error); + bool redirect(Request &req, Response &res, Error &error); + bool handle_request(Stream &strm, Request &req, Response &res, + bool close_connection, Error &error); + std::unique_ptr send_with_content_provider( + Request &req, const char *body, size_t content_length, + ContentProvider content_provider, + ContentProviderWithoutLength content_provider_without_length, + const std::string &content_type, Error &error); + Result send_with_content_provider( + const std::string &method, const std::string &path, + const Headers &headers, const char *body, size_t content_length, + ContentProvider content_provider, + ContentProviderWithoutLength content_provider_without_length, + const std::string &content_type); + ContentProviderWithoutLength get_multipart_content_provider( + const std::string &boundary, const MultipartFormDataItems &items, + const MultipartFormDataProviderItems &provider_items) const; + + std::string adjust_host_string(const std::string &host) const; + + virtual bool process_socket(const Socket &socket, std::function callback); virtual bool is_ssl() const; }; @@ -949,9 +1464,9 @@ private: class Client { public: // Universal interface - explicit Client(const char *scheme_host_port); + explicit Client(const std::string &scheme_host_port); - explicit Client(const char *scheme_host_port, + explicit Client(const std::string &scheme_host_port, const std::string &client_cert_path, const std::string &client_key_path); @@ -962,114 +1477,211 @@ public: const std::string &client_cert_path, const std::string &client_key_path); + Client(Client &&) = default; + ~Client(); bool is_valid() const; - Result Get(const char *path); - Result Get(const char *path, const Headers &headers); - Result Get(const char *path, Progress progress); - Result Get(const char *path, const Headers &headers, Progress progress); - Result Get(const char *path, ContentReceiver content_receiver); - Result Get(const char *path, const Headers &headers, - ContentReceiver content_receiver); - Result Get(const char *path, ContentReceiver content_receiver, + Result Get(const std::string &path); + Result Get(const std::string &path, const Headers &headers); + Result Get(const std::string &path, Progress progress); + Result Get(const std::string &path, const Headers &headers, Progress progress); - Result Get(const char *path, const Headers &headers, - ContentReceiver content_receiver, Progress progress); - Result Get(const char *path, ResponseHandler response_handler, + Result Get(const std::string &path, ContentReceiver content_receiver); + Result Get(const std::string &path, const Headers &headers, ContentReceiver content_receiver); - Result Get(const char *path, const Headers &headers, + Result Get(const std::string &path, ContentReceiver content_receiver, + Progress progress); + Result Get(const std::string &path, const Headers &headers, + ContentReceiver content_receiver, Progress progress); + Result Get(const std::string &path, ResponseHandler response_handler, + ContentReceiver content_receiver); + Result Get(const std::string &path, const Headers &headers, ResponseHandler response_handler, ContentReceiver content_receiver); - Result Get(const char *path, const Headers &headers, + Result Get(const std::string &path, const Headers &headers, ResponseHandler response_handler, ContentReceiver content_receiver, Progress progress); - Result Get(const char *path, ResponseHandler response_handler, + Result Get(const std::string &path, ResponseHandler response_handler, ContentReceiver content_receiver, Progress progress); - Result Head(const char *path); - Result Head(const char *path, const Headers &headers); + Result Get(const std::string &path, const Params ¶ms, + const Headers &headers, Progress progress = nullptr); + Result Get(const std::string &path, const Params ¶ms, + const Headers &headers, ContentReceiver content_receiver, + Progress progress = nullptr); + Result Get(const std::string &path, const Params ¶ms, + const Headers &headers, ResponseHandler response_handler, + ContentReceiver content_receiver, Progress progress = nullptr); - Result Post(const char *path); - Result Post(const char *path, const std::string &body, - const char *content_type); - Result Post(const char *path, const Headers &headers, const std::string &body, - const char *content_type); - Result Post(const char *path, size_t content_length, - ContentProvider content_provider, const char *content_type); - Result Post(const char *path, const Headers &headers, size_t content_length, - ContentProvider content_provider, const char *content_type); - Result Post(const char *path, const Params ¶ms); - Result Post(const char *path, const Headers &headers, const Params ¶ms); - Result Post(const char *path, const MultipartFormDataItems &items); - Result Post(const char *path, const Headers &headers, + Result Head(const std::string &path); + Result Head(const std::string &path, const Headers &headers); + + Result Post(const std::string &path); + Result Post(const std::string &path, const Headers &headers); + Result Post(const std::string &path, const char *body, size_t content_length, + const std::string &content_type); + Result Post(const std::string &path, const Headers &headers, const char *body, + size_t content_length, const std::string &content_type); + Result Post(const std::string &path, const std::string &body, + const std::string &content_type); + Result Post(const std::string &path, const Headers &headers, + const std::string &body, const std::string &content_type); + Result Post(const std::string &path, size_t content_length, + ContentProvider content_provider, + const std::string &content_type); + Result Post(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type); + Result Post(const std::string &path, const Headers &headers, + size_t content_length, ContentProvider content_provider, + const std::string &content_type); + Result Post(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type); + Result Post(const std::string &path, const Params ¶ms); + Result Post(const std::string &path, const Headers &headers, + const Params ¶ms); + Result Post(const std::string &path, const MultipartFormDataItems &items); + Result Post(const std::string &path, const Headers &headers, const MultipartFormDataItems &items); - Result Put(const char *path); - Result Put(const char *path, const std::string &body, - const char *content_type); - Result Put(const char *path, const Headers &headers, const std::string &body, - const char *content_type); - Result Put(const char *path, size_t content_length, - ContentProvider content_provider, const char *content_type); - Result Put(const char *path, const Headers &headers, size_t content_length, - ContentProvider content_provider, const char *content_type); - Result Put(const char *path, const Params ¶ms); - Result Put(const char *path, const Headers &headers, const Params ¶ms); - Result Patch(const char *path, const std::string &body, - const char *content_type); - Result Patch(const char *path, const Headers &headers, - const std::string &body, const char *content_type); - Result Patch(const char *path, size_t content_length, - ContentProvider content_provider, const char *content_type); - Result Patch(const char *path, const Headers &headers, size_t content_length, - ContentProvider content_provider, const char *content_type); + Result Post(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, const std::string &boundary); + Result Post(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const MultipartFormDataProviderItems &provider_items); - Result Delete(const char *path); - Result Delete(const char *path, const std::string &body, - const char *content_type); - Result Delete(const char *path, const Headers &headers); - Result Delete(const char *path, const Headers &headers, - const std::string &body, const char *content_type); + Result Put(const std::string &path); + Result Put(const std::string &path, const char *body, size_t content_length, + const std::string &content_type); + Result Put(const std::string &path, const Headers &headers, const char *body, + size_t content_length, const std::string &content_type); + Result Put(const std::string &path, const std::string &body, + const std::string &content_type); + Result Put(const std::string &path, const Headers &headers, + const std::string &body, const std::string &content_type); + Result Put(const std::string &path, size_t content_length, + ContentProvider content_provider, const std::string &content_type); + Result Put(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type); + Result Put(const std::string &path, const Headers &headers, + size_t content_length, ContentProvider content_provider, + const std::string &content_type); + Result Put(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type); + Result Put(const std::string &path, const Params ¶ms); + Result Put(const std::string &path, const Headers &headers, + const Params ¶ms); + Result Put(const std::string &path, const MultipartFormDataItems &items); + Result Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items); + Result Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, const std::string &boundary); + Result Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const MultipartFormDataProviderItems &provider_items); - Result Options(const char *path); - Result Options(const char *path, const Headers &headers); + Result Patch(const std::string &path); + Result Patch(const std::string &path, const char *body, size_t content_length, + const std::string &content_type); + Result Patch(const std::string &path, const Headers &headers, + const char *body, size_t content_length, + const std::string &content_type); + Result Patch(const std::string &path, const std::string &body, + const std::string &content_type); + Result Patch(const std::string &path, const Headers &headers, + const std::string &body, const std::string &content_type); + Result Patch(const std::string &path, size_t content_length, + ContentProvider content_provider, + const std::string &content_type); + Result Patch(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type); + Result Patch(const std::string &path, const Headers &headers, + size_t content_length, ContentProvider content_provider, + const std::string &content_type); + Result Patch(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type); - bool send(const Request &req, Response &res); + Result Delete(const std::string &path); + Result Delete(const std::string &path, const Headers &headers); + Result Delete(const std::string &path, const char *body, + size_t content_length, const std::string &content_type); + Result Delete(const std::string &path, const Headers &headers, + const char *body, size_t content_length, + const std::string &content_type); + Result Delete(const std::string &path, const std::string &body, + const std::string &content_type); + Result Delete(const std::string &path, const Headers &headers, + const std::string &body, const std::string &content_type); - size_t is_socket_open() const; + Result Options(const std::string &path); + Result Options(const std::string &path, const Headers &headers); + + bool send(Request &req, Response &res, Error &error); + Result send(const Request &req); void stop(); + std::string host() const; + int port() const; + + size_t is_socket_open() const; + socket_t socket() const; + + void set_hostname_addr_map(std::map addr_map); + void set_default_headers(Headers headers); + void + set_header_writer(std::function const &writer); + + void set_address_family(int family); void set_tcp_nodelay(bool on); void set_socket_options(SocketOptions socket_options); void set_connection_timeout(time_t sec, time_t usec = 0); - void set_read_timeout(time_t sec, time_t usec = 0); - void set_write_timeout(time_t sec, time_t usec = 0); + template + void + set_connection_timeout(const std::chrono::duration &duration); - void set_basic_auth(const char *username, const char *password); - void set_bearer_token_auth(const char *token); + void set_read_timeout(time_t sec, time_t usec = 0); + template + void set_read_timeout(const std::chrono::duration &duration); + + void set_write_timeout(time_t sec, time_t usec = 0); + template + void set_write_timeout(const std::chrono::duration &duration); + + void set_basic_auth(const std::string &username, const std::string &password); + void set_bearer_token_auth(const std::string &token); #ifdef CPPHTTPLIB_OPENSSL_SUPPORT - void set_digest_auth(const char *username, const char *password); + void set_digest_auth(const std::string &username, + const std::string &password); #endif void set_keep_alive(bool on); void set_follow_location(bool on); + void set_url_encode(bool on); + void set_compress(bool on); void set_decompress(bool on); - void set_interface(const char *intf); + void set_interface(const std::string &intf); - void set_proxy(const char *host, int port); - void set_proxy_basic_auth(const char *username, const char *password); - void set_proxy_bearer_token_auth(const char *token); + void set_proxy(const std::string &host, int port); + void set_proxy_basic_auth(const std::string &username, + const std::string &password); + void set_proxy_bearer_token_auth(const std::string &token); #ifdef CPPHTTPLIB_OPENSSL_SUPPORT - void set_proxy_digest_auth(const char *username, const char *password); + void set_proxy_digest_auth(const std::string &username, + const std::string &password); #endif #ifdef CPPHTTPLIB_OPENSSL_SUPPORT @@ -1080,10 +1692,11 @@ public: // SSL #ifdef CPPHTTPLIB_OPENSSL_SUPPORT - void set_ca_cert_path(const char *ca_cert_file_path, - const char *ca_cert_dir_path = nullptr); + void set_ca_cert_path(const std::string &ca_cert_file_path, + const std::string &ca_cert_dir_path = std::string()); void set_ca_cert_store(X509_STORE *ca_cert_store); + void load_ca_cert_store(const char *ca_cert, std::size_t size); long get_openssl_verify_result() const; @@ -1091,27 +1704,33 @@ public: #endif private: - std::shared_ptr cli_; + std::unique_ptr cli_; #ifdef CPPHTTPLIB_OPENSSL_SUPPORT bool is_ssl_ = false; #endif -}; // namespace httplib +}; #ifdef CPPHTTPLIB_OPENSSL_SUPPORT class SSLServer : public Server { public: SSLServer(const char *cert_path, const char *private_key_path, const char *client_ca_cert_file_path = nullptr, - const char *client_ca_cert_dir_path = nullptr); + const char *client_ca_cert_dir_path = nullptr, + const char *private_key_password = nullptr); SSLServer(X509 *cert, EVP_PKEY *private_key, X509_STORE *client_ca_cert_store = nullptr); + SSLServer( + const std::function &setup_ssl_ctx_callback); + ~SSLServer() override; bool is_valid() const override; + SSL_CTX *ssl_context() const; + private: bool process_and_close_socket(socket_t sock) override; @@ -1136,25 +1755,25 @@ public: bool is_valid() const override; - void set_ca_cert_path(const char *ca_cert_file_path, - const char *ca_cert_dir_path = nullptr); - void set_ca_cert_store(X509_STORE *ca_cert_store); + void load_ca_cert_store(const char *ca_cert, std::size_t size); long get_openssl_verify_result() const; SSL_CTX *ssl_context() const; private: - bool create_and_connect_socket(Socket &socket) override; - void close_socket(Socket &socket, bool process_socket_ret) override; + bool create_and_connect_socket(Socket &socket, Error &error) override; + void shutdown_ssl(Socket &socket, bool shutdown_gracefully) override; + void shutdown_ssl_impl(Socket &socket, bool shutdown_gracefully); - bool process_socket(Socket &socket, + bool process_socket(const Socket &socket, std::function callback) override; bool is_ssl() const override; - bool connect_with_proxy(Socket &sock, Response &res, bool &success); - bool initialize_ssl(Socket &socket); + bool connect_with_proxy(Socket &sock, Response &res, bool &success, + Error &error); + bool initialize_ssl(Socket &socket, Error &error); bool load_certs(); @@ -1169,19 +1788,502 @@ private: std::vector host_components_; - std::string ca_cert_file_path_; - std::string ca_cert_dir_path_; - X509_STORE *ca_cert_store_ = nullptr; long verify_result_ = 0; friend class ClientImpl; }; #endif +/* + * Implementation of template methods. + */ + +namespace detail { + +template +inline void duration_to_sec_and_usec(const T &duration, U callback) { + auto sec = std::chrono::duration_cast(duration).count(); + auto usec = std::chrono::duration_cast( + duration - std::chrono::seconds(sec)) + .count(); + callback(static_cast(sec), static_cast(usec)); +} + +inline uint64_t get_header_value_u64(const Headers &headers, + const std::string &key, size_t id, + uint64_t def) { + auto rng = headers.equal_range(key); + auto it = rng.first; + std::advance(it, static_cast(id)); + if (it != rng.second) { + return std::strtoull(it->second.data(), nullptr, 10); + } + return def; +} + +} // namespace detail + +inline uint64_t Request::get_header_value_u64(const std::string &key, + size_t id) const { + return detail::get_header_value_u64(headers, key, id, 0); +} + +inline uint64_t Response::get_header_value_u64(const std::string &key, + size_t id) const { + return detail::get_header_value_u64(headers, key, id, 0); +} + +template +inline ssize_t Stream::write_format(const char *fmt, const Args &...args) { + const auto bufsiz = 2048; + std::array buf{}; + + auto sn = snprintf(buf.data(), buf.size() - 1, fmt, args...); + if (sn <= 0) { return sn; } + + auto n = static_cast(sn); + + if (n >= buf.size() - 1) { + std::vector glowable_buf(buf.size()); + + while (n >= glowable_buf.size() - 1) { + glowable_buf.resize(glowable_buf.size() * 2); + n = static_cast( + snprintf(&glowable_buf[0], glowable_buf.size() - 1, fmt, args...)); + } + return write(&glowable_buf[0], n); + } else { + return write(buf.data(), n); + } +} + +inline void default_socket_options(socket_t sock) { + int yes = 1; +#ifdef _WIN32 + setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, + reinterpret_cast(&yes), sizeof(yes)); + setsockopt(sock, SOL_SOCKET, SO_EXCLUSIVEADDRUSE, + reinterpret_cast(&yes), sizeof(yes)); +#else +#ifdef SO_REUSEPORT + setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, + reinterpret_cast(&yes), sizeof(yes)); +#else + setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, + reinterpret_cast(&yes), sizeof(yes)); +#endif +#endif +} + +inline const char *status_message(int status) { + switch (status) { + case StatusCode::Continue_100: return "Continue"; + case StatusCode::SwitchingProtocol_101: return "Switching Protocol"; + case StatusCode::Processing_102: return "Processing"; + case StatusCode::EarlyHints_103: return "Early Hints"; + case StatusCode::OK_200: return "OK"; + case StatusCode::Created_201: return "Created"; + case StatusCode::Accepted_202: return "Accepted"; + case StatusCode::NonAuthoritativeInformation_203: + return "Non-Authoritative Information"; + case StatusCode::NoContent_204: return "No Content"; + case StatusCode::ResetContent_205: return "Reset Content"; + case StatusCode::PartialContent_206: return "Partial Content"; + case StatusCode::MultiStatus_207: return "Multi-Status"; + case StatusCode::AlreadyReported_208: return "Already Reported"; + case StatusCode::IMUsed_226: return "IM Used"; + case StatusCode::MultipleChoices_300: return "Multiple Choices"; + case StatusCode::MovedPermanently_301: return "Moved Permanently"; + case StatusCode::Found_302: return "Found"; + case StatusCode::SeeOther_303: return "See Other"; + case StatusCode::NotModified_304: return "Not Modified"; + case StatusCode::UseProxy_305: return "Use Proxy"; + case StatusCode::unused_306: return "unused"; + case StatusCode::TemporaryRedirect_307: return "Temporary Redirect"; + case StatusCode::PermanentRedirect_308: return "Permanent Redirect"; + case StatusCode::BadRequest_400: return "Bad Request"; + case StatusCode::Unauthorized_401: return "Unauthorized"; + case StatusCode::PaymentRequired_402: return "Payment Required"; + case StatusCode::Forbidden_403: return "Forbidden"; + case StatusCode::NotFound_404: return "Not Found"; + case StatusCode::MethodNotAllowed_405: return "Method Not Allowed"; + case StatusCode::NotAcceptable_406: return "Not Acceptable"; + case StatusCode::ProxyAuthenticationRequired_407: + return "Proxy Authentication Required"; + case StatusCode::RequestTimeout_408: return "Request Timeout"; + case StatusCode::Conflict_409: return "Conflict"; + case StatusCode::Gone_410: return "Gone"; + case StatusCode::LengthRequired_411: return "Length Required"; + case StatusCode::PreconditionFailed_412: return "Precondition Failed"; + case StatusCode::PayloadTooLarge_413: return "Payload Too Large"; + case StatusCode::UriTooLong_414: return "URI Too Long"; + case StatusCode::UnsupportedMediaType_415: return "Unsupported Media Type"; + case StatusCode::RangeNotSatisfiable_416: return "Range Not Satisfiable"; + case StatusCode::ExpectationFailed_417: return "Expectation Failed"; + case StatusCode::ImATeapot_418: return "I'm a teapot"; + case StatusCode::MisdirectedRequest_421: return "Misdirected Request"; + case StatusCode::UnprocessableContent_422: return "Unprocessable Content"; + case StatusCode::Locked_423: return "Locked"; + case StatusCode::FailedDependency_424: return "Failed Dependency"; + case StatusCode::TooEarly_425: return "Too Early"; + case StatusCode::UpgradeRequired_426: return "Upgrade Required"; + case StatusCode::PreconditionRequired_428: return "Precondition Required"; + case StatusCode::TooManyRequests_429: return "Too Many Requests"; + case StatusCode::RequestHeaderFieldsTooLarge_431: + return "Request Header Fields Too Large"; + case StatusCode::UnavailableForLegalReasons_451: + return "Unavailable For Legal Reasons"; + case StatusCode::NotImplemented_501: return "Not Implemented"; + case StatusCode::BadGateway_502: return "Bad Gateway"; + case StatusCode::ServiceUnavailable_503: return "Service Unavailable"; + case StatusCode::GatewayTimeout_504: return "Gateway Timeout"; + case StatusCode::HttpVersionNotSupported_505: + return "HTTP Version Not Supported"; + case StatusCode::VariantAlsoNegotiates_506: return "Variant Also Negotiates"; + case StatusCode::InsufficientStorage_507: return "Insufficient Storage"; + case StatusCode::LoopDetected_508: return "Loop Detected"; + case StatusCode::NotExtended_510: return "Not Extended"; + case StatusCode::NetworkAuthenticationRequired_511: + return "Network Authentication Required"; + + default: + case StatusCode::InternalServerError_500: return "Internal Server Error"; + } +} + +inline std::string get_bearer_token_auth(const Request &req) { + if (req.has_header("Authorization")) { + static std::string BearerHeaderPrefix = "Bearer "; + return req.get_header_value("Authorization") + .substr(BearerHeaderPrefix.length()); + } + return ""; +} + +template +inline Server & +Server::set_read_timeout(const std::chrono::duration &duration) { + detail::duration_to_sec_and_usec( + duration, [&](time_t sec, time_t usec) { set_read_timeout(sec, usec); }); + return *this; +} + +template +inline Server & +Server::set_write_timeout(const std::chrono::duration &duration) { + detail::duration_to_sec_and_usec( + duration, [&](time_t sec, time_t usec) { set_write_timeout(sec, usec); }); + return *this; +} + +template +inline Server & +Server::set_idle_interval(const std::chrono::duration &duration) { + detail::duration_to_sec_and_usec( + duration, [&](time_t sec, time_t usec) { set_idle_interval(sec, usec); }); + return *this; +} + +inline std::string to_string(const Error error) { + switch (error) { + case Error::Success: return "Success (no error)"; + case Error::Connection: return "Could not establish connection"; + case Error::BindIPAddress: return "Failed to bind IP address"; + case Error::Read: return "Failed to read connection"; + case Error::Write: return "Failed to write connection"; + case Error::ExceedRedirectCount: return "Maximum redirect count exceeded"; + case Error::Canceled: return "Connection handling canceled"; + case Error::SSLConnection: return "SSL connection failed"; + case Error::SSLLoadingCerts: return "SSL certificate loading failed"; + case Error::SSLServerVerification: return "SSL server verification failed"; + case Error::UnsupportedMultipartBoundaryChars: + return "Unsupported HTTP multipart boundary characters"; + case Error::Compression: return "Compression failed"; + case Error::ConnectionTimeout: return "Connection timed out"; + case Error::ProxyConnection: return "Proxy connection failed"; + case Error::Unknown: return "Unknown"; + default: break; + } + + return "Invalid"; +} + +inline std::ostream &operator<<(std::ostream &os, const Error &obj) { + os << to_string(obj); + os << " (" << static_cast::type>(obj) << ')'; + return os; +} + +inline uint64_t Result::get_request_header_value_u64(const std::string &key, + size_t id) const { + return detail::get_header_value_u64(request_headers_, key, id, 0); +} + +template +inline void ClientImpl::set_connection_timeout( + const std::chrono::duration &duration) { + detail::duration_to_sec_and_usec(duration, [&](time_t sec, time_t usec) { + set_connection_timeout(sec, usec); + }); +} + +template +inline void ClientImpl::set_read_timeout( + const std::chrono::duration &duration) { + detail::duration_to_sec_and_usec( + duration, [&](time_t sec, time_t usec) { set_read_timeout(sec, usec); }); +} + +template +inline void ClientImpl::set_write_timeout( + const std::chrono::duration &duration) { + detail::duration_to_sec_and_usec( + duration, [&](time_t sec, time_t usec) { set_write_timeout(sec, usec); }); +} + +template +inline void Client::set_connection_timeout( + const std::chrono::duration &duration) { + cli_->set_connection_timeout(duration); +} + +template +inline void +Client::set_read_timeout(const std::chrono::duration &duration) { + cli_->set_read_timeout(duration); +} + +template +inline void +Client::set_write_timeout(const std::chrono::duration &duration) { + cli_->set_write_timeout(duration); +} + +/* + * Forward declarations and types that will be part of the .h file if split into + * .h + .cc. + */ + +std::string hosted_at(const std::string &hostname); + +void hosted_at(const std::string &hostname, std::vector &addrs); + +std::string append_query_params(const std::string &path, const Params ¶ms); + +std::pair make_range_header(const Ranges &ranges); + +std::pair +make_basic_authentication_header(const std::string &username, + const std::string &password, + bool is_proxy = false); + +namespace detail { + +std::string encode_query_param(const std::string &value); + +std::string decode_url(const std::string &s, bool convert_plus_to_space); + +void read_file(const std::string &path, std::string &out); + +std::string trim_copy(const std::string &s); + +void split(const char *b, const char *e, char d, + std::function fn); + +void split(const char *b, const char *e, char d, size_t m, + std::function fn); + +bool process_client_socket(socket_t sock, time_t read_timeout_sec, + time_t read_timeout_usec, time_t write_timeout_sec, + time_t write_timeout_usec, + std::function callback); + +socket_t create_client_socket( + const std::string &host, const std::string &ip, int port, + int address_family, bool tcp_nodelay, SocketOptions socket_options, + time_t connection_timeout_sec, time_t connection_timeout_usec, + time_t read_timeout_sec, time_t read_timeout_usec, time_t write_timeout_sec, + time_t write_timeout_usec, const std::string &intf, Error &error); + +const char *get_header_value(const Headers &headers, const std::string &key, + size_t id = 0, const char *def = nullptr); + +std::string params_to_query_str(const Params ¶ms); + +void parse_query_text(const std::string &s, Params ¶ms); + +bool parse_multipart_boundary(const std::string &content_type, + std::string &boundary); + +bool parse_range_header(const std::string &s, Ranges &ranges); + +int close_socket(socket_t sock); + +ssize_t send_socket(socket_t sock, const void *ptr, size_t size, int flags); + +ssize_t read_socket(socket_t sock, void *ptr, size_t size, int flags); + +enum class EncodingType { None = 0, Gzip, Brotli }; + +EncodingType encoding_type(const Request &req, const Response &res); + +class BufferStream : public Stream { +public: + BufferStream() = default; + ~BufferStream() override = default; + + bool is_readable() const override; + bool is_writable() const override; + ssize_t read(char *ptr, size_t size) override; + ssize_t write(const char *ptr, size_t size) override; + void get_remote_ip_and_port(std::string &ip, int &port) const override; + void get_local_ip_and_port(std::string &ip, int &port) const override; + socket_t socket() const override; + + const std::string &get_buffer() const; + +private: + std::string buffer; + size_t position = 0; +}; + +class compressor { +public: + virtual ~compressor() = default; + + typedef std::function Callback; + virtual bool compress(const char *data, size_t data_length, bool last, + Callback callback) = 0; +}; + +class decompressor { +public: + virtual ~decompressor() = default; + + virtual bool is_valid() const = 0; + + typedef std::function Callback; + virtual bool decompress(const char *data, size_t data_length, + Callback callback) = 0; +}; + +class nocompressor : public compressor { +public: + ~nocompressor() override = default; + + bool compress(const char *data, size_t data_length, bool /*last*/, + Callback callback) override; +}; + +#ifdef CPPHTTPLIB_ZLIB_SUPPORT +class gzip_compressor : public compressor { +public: + gzip_compressor(); + ~gzip_compressor() override; + + bool compress(const char *data, size_t data_length, bool last, + Callback callback) override; + +private: + bool is_valid_ = false; + z_stream strm_; +}; + +class gzip_decompressor : public decompressor { +public: + gzip_decompressor(); + ~gzip_decompressor() override; + + bool is_valid() const override; + + bool decompress(const char *data, size_t data_length, + Callback callback) override; + +private: + bool is_valid_ = false; + z_stream strm_; +}; +#endif + +#ifdef CPPHTTPLIB_BROTLI_SUPPORT +class brotli_compressor : public compressor { +public: + brotli_compressor(); + ~brotli_compressor(); + + bool compress(const char *data, size_t data_length, bool last, + Callback callback) override; + +private: + BrotliEncoderState *state_ = nullptr; +}; + +class brotli_decompressor : public decompressor { +public: + brotli_decompressor(); + ~brotli_decompressor(); + + bool is_valid() const override; + + bool decompress(const char *data, size_t data_length, + Callback callback) override; + +private: + BrotliDecoderResult decoder_r; + BrotliDecoderState *decoder_s = nullptr; +}; +#endif + +// NOTE: until the read size reaches `fixed_buffer_size`, use `fixed_buffer` +// to store data. The call can set memory on stack for performance. +class stream_line_reader { +public: + stream_line_reader(Stream &strm, char *fixed_buffer, + size_t fixed_buffer_size); + const char *ptr() const; + size_t size() const; + bool end_with_crlf() const; + bool getline(); + +private: + void append(char c); + + Stream &strm_; + char *fixed_buffer_; + const size_t fixed_buffer_size_; + size_t fixed_buffer_used_size_ = 0; + std::string glowable_buffer_; +}; + +class mmap { +public: + mmap(const char *path); + ~mmap(); + + bool open(const char *path); + void close(); + + bool is_open() const; + size_t size() const; + const char *data() const; + +private: +#if defined(_WIN32) + HANDLE hFile_; + HANDLE hMapping_; +#else + int fd_; +#endif + size_t size_; + void *addr_; +}; + +} // namespace detail + // ---------------------------------------------------------------------------- /* - * Implementation + * Implementation that will be part of the .cc file if split into .h + .cc. */ namespace detail { @@ -1207,7 +2309,7 @@ inline bool from_hex_to_i(const std::string &s, size_t i, size_t cnt, val = 0; for (; cnt; i++, cnt--) { if (!s[i]) { return false; } - int v = 0; + auto v = 0; if (is_hex(s[i], v)) { val = val * 16 + v; } else { @@ -1218,7 +2320,7 @@ inline bool from_hex_to_i(const std::string &s, size_t i, size_t cnt, } inline std::string from_i_to_hex(size_t n) { - const char *charset = "0123456789abcdef"; + static const auto charset = "0123456789abcdef"; std::string ret; do { ret = charset[n & 15] + ret; @@ -1227,17 +2329,9 @@ inline std::string from_i_to_hex(size_t n) { return ret; } -inline bool start_with(const std::string &a, const std::string &b) { - if (a.size() < b.size()) { return false; } - for (size_t i = 0; i < b.size(); i++) { - if (std::tolower(a[i]) != std::tolower(b[i])) { return false; } - } - return true; -} - inline size_t to_utf8(int code, char *buff) { if (code < 0x0080) { - buff[0] = (code & 0x7F); + buff[0] = static_cast(code & 0x7F); return 1; } else if (code < 0x0800) { buff[0] = static_cast(0xC0 | ((code >> 6) & 0x1F)); @@ -1276,8 +2370,8 @@ inline std::string base64_encode(const std::string &in) { std::string out; out.reserve(in.size()); - int val = 0; - int valb = -6; + auto val = 0; + auto valb = -6; for (auto c : in) { val = (val << 8) + static_cast(c); @@ -1298,8 +2392,12 @@ inline std::string base64_encode(const std::string &in) { } inline bool is_file(const std::string &path) { +#ifdef _WIN32 + return _access_s(path.c_str(), 0) == 0; +#else struct stat st; return stat(path.c_str(), &st) >= 0 && S_ISREG(st.st_mode); +#endif } inline bool is_dir(const std::string &path) { @@ -1320,6 +2418,11 @@ inline bool is_valid_path(const std::string &path) { // Read component auto beg = i; while (i < path.size() && path[i] != '/') { + if (path[i] == '\0') { + return false; + } else if (path[i] == '\\') { + return false; + } i++; } @@ -1344,8 +2447,30 @@ inline bool is_valid_path(const std::string &path) { return true; } +inline std::string encode_query_param(const std::string &value) { + std::ostringstream escaped; + escaped.fill('0'); + escaped << std::hex; + + for (auto c : value) { + if (std::isalnum(static_cast(c)) || c == '-' || c == '_' || + c == '.' || c == '!' || c == '~' || c == '*' || c == '\'' || c == '(' || + c == ')') { + escaped << c; + } else { + escaped << std::uppercase; + escaped << '%' << std::setw(2) + << static_cast(static_cast(c)); + escaped << std::nouppercase; + } + } + + return escaped.str(); +} + inline std::string encode_url(const std::string &s) { std::string result; + result.reserve(s.size()); for (size_t i = 0; s[i]; i++) { switch (s[i]) { @@ -1382,7 +2507,7 @@ inline std::string decode_url(const std::string &s, for (size_t i = 0; i < s.size(); i++) { if (s[i] == '%' && i + 1 < s.size()) { if (s[i + 1] == 'u') { - int val = 0; + auto val = 0; if (from_hex_to_i(s, i + 2, 4, val)) { // 4 digits Unicode codes char buff[4]; @@ -1393,7 +2518,7 @@ inline std::string decode_url(const std::string &s, result += s[i]; } } else { - int val = 0; + auto val = 0; if (from_hex_to_i(s, i + 1, 2, val)) { // 2 digits hex codes result += static_cast(val); @@ -1446,15 +2571,30 @@ inline std::string trim_copy(const std::string &s) { return s.substr(r.first, r.second - r.first); } -template void split(const char *b, const char *e, char d, Fn fn) { +inline std::string trim_double_quotes_copy(const std::string &s) { + if (s.length() >= 2 && s.front() == '"' && s.back() == '"') { + return s.substr(1, s.size() - 2); + } + return s; +} + +inline void split(const char *b, const char *e, char d, + std::function fn) { + return split(b, e, d, (std::numeric_limits::max)(), std::move(fn)); +} + +inline void split(const char *b, const char *e, char d, size_t m, + std::function fn) { size_t i = 0; size_t beg = 0; + size_t count = 1; while (e ? (b + i < e) : (b[i] != '\0')) { - if (b[i] == d) { + if (b[i] == d && count < m) { auto r = trim(b, e, beg, i); if (r.first < r.second) { fn(&b[r.first], &b[r.second]); } beg = i + 1; + count++; } i++; } @@ -1465,82 +2605,162 @@ template void split(const char *b, const char *e, char d, Fn fn) { } } -// NOTE: until the read size reaches `fixed_buffer_size`, use `fixed_buffer` -// to store data. The call can set memory on stack for performance. -class stream_line_reader { -public: - stream_line_reader(Stream &strm, char *fixed_buffer, size_t fixed_buffer_size) - : strm_(strm), fixed_buffer_(fixed_buffer), - fixed_buffer_size_(fixed_buffer_size) {} +inline stream_line_reader::stream_line_reader(Stream &strm, char *fixed_buffer, + size_t fixed_buffer_size) + : strm_(strm), fixed_buffer_(fixed_buffer), + fixed_buffer_size_(fixed_buffer_size) {} - const char *ptr() const { - if (glowable_buffer_.empty()) { - return fixed_buffer_; - } else { - return glowable_buffer_.data(); - } +inline const char *stream_line_reader::ptr() const { + if (glowable_buffer_.empty()) { + return fixed_buffer_; + } else { + return glowable_buffer_.data(); } +} - size_t size() const { - if (glowable_buffer_.empty()) { - return fixed_buffer_used_size_; - } else { - return glowable_buffer_.size(); - } +inline size_t stream_line_reader::size() const { + if (glowable_buffer_.empty()) { + return fixed_buffer_used_size_; + } else { + return glowable_buffer_.size(); } +} - bool end_with_crlf() const { - auto end = ptr() + size(); - return size() >= 2 && end[-2] == '\r' && end[-1] == '\n'; - } +inline bool stream_line_reader::end_with_crlf() const { + auto end = ptr() + size(); + return size() >= 2 && end[-2] == '\r' && end[-1] == '\n'; +} - bool getline() { - fixed_buffer_used_size_ = 0; - glowable_buffer_.clear(); +inline bool stream_line_reader::getline() { + fixed_buffer_used_size_ = 0; + glowable_buffer_.clear(); - for (size_t i = 0;; i++) { - char byte; - auto n = strm_.read(&byte, 1); + for (size_t i = 0;; i++) { + char byte; + auto n = strm_.read(&byte, 1); - if (n < 0) { + if (n < 0) { + return false; + } else if (n == 0) { + if (i == 0) { return false; - } else if (n == 0) { - if (i == 0) { - return false; - } else { - break; - } + } else { + break; } - - append(byte); - - if (byte == '\n') { break; } } - return true; + append(byte); + + if (byte == '\n') { break; } } -private: - void append(char c) { - if (fixed_buffer_used_size_ < fixed_buffer_size_ - 1) { - fixed_buffer_[fixed_buffer_used_size_++] = c; - fixed_buffer_[fixed_buffer_used_size_] = '\0'; - } else { - if (glowable_buffer_.empty()) { - assert(fixed_buffer_[fixed_buffer_used_size_] == '\0'); - glowable_buffer_.assign(fixed_buffer_, fixed_buffer_used_size_); - } - glowable_buffer_ += c; + return true; +} + +inline void stream_line_reader::append(char c) { + if (fixed_buffer_used_size_ < fixed_buffer_size_ - 1) { + fixed_buffer_[fixed_buffer_used_size_++] = c; + fixed_buffer_[fixed_buffer_used_size_] = '\0'; + } else { + if (glowable_buffer_.empty()) { + assert(fixed_buffer_[fixed_buffer_used_size_] == '\0'); + glowable_buffer_.assign(fixed_buffer_, fixed_buffer_used_size_); } + glowable_buffer_ += c; + } +} + +inline mmap::mmap(const char *path) +#if defined(_WIN32) + : hFile_(NULL), hMapping_(NULL) +#else + : fd_(-1) +#endif + , + size_(0), addr_(nullptr) { + open(path); +} + +inline mmap::~mmap() { close(); } + +inline bool mmap::open(const char *path) { + close(); + +#if defined(_WIN32) + hFile_ = ::CreateFileA(path, GENERIC_READ, FILE_SHARE_READ, NULL, + OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); + + if (hFile_ == INVALID_HANDLE_VALUE) { return false; } + + size_ = ::GetFileSize(hFile_, NULL); + + hMapping_ = ::CreateFileMapping(hFile_, NULL, PAGE_READONLY, 0, 0, NULL); + + if (hMapping_ == NULL) { + close(); + return false; } - Stream &strm_; - char *fixed_buffer_; - const size_t fixed_buffer_size_; - size_t fixed_buffer_used_size_ = 0; - std::string glowable_buffer_; -}; + addr_ = ::MapViewOfFile(hMapping_, FILE_MAP_READ, 0, 0, 0); +#else + fd_ = ::open(path, O_RDONLY); + if (fd_ == -1) { return false; } + struct stat sb; + if (fstat(fd_, &sb) == -1) { + close(); + return false; + } + size_ = static_cast(sb.st_size); + + addr_ = ::mmap(NULL, size_, PROT_READ, MAP_PRIVATE, fd_, 0); +#endif + + if (addr_ == nullptr) { + close(); + return false; + } + + return true; +} + +inline bool mmap::is_open() const { return addr_ != nullptr; } + +inline size_t mmap::size() const { return size_; } + +inline const char *mmap::data() const { + return static_cast(addr_); +} + +inline void mmap::close() { +#if defined(_WIN32) + if (addr_) { + ::UnmapViewOfFile(addr_); + addr_ = nullptr; + } + + if (hMapping_) { + ::CloseHandle(hMapping_); + hMapping_ = NULL; + } + + if (hFile_ != INVALID_HANDLE_VALUE) { + ::CloseHandle(hFile_); + hFile_ = INVALID_HANDLE_VALUE; + } +#else + if (addr_ != nullptr) { + munmap(addr_, size_); + addr_ = nullptr; + } + + if (fd_ != -1) { + ::close(fd_); + fd_ = -1; + } +#endif + size_ = 0; +} inline int close_socket(socket_t sock) { #ifdef _WIN32 return closesocket(sock); @@ -1550,7 +2770,7 @@ inline int close_socket(socket_t sock) { } template inline ssize_t handle_EINTR(T fn) { - ssize_t res = false; + ssize_t res = 0; while (true) { res = fn(); if (res < 0 && errno == EINTR) { continue; } @@ -1559,6 +2779,31 @@ template inline ssize_t handle_EINTR(T fn) { return res; } +inline ssize_t read_socket(socket_t sock, void *ptr, size_t size, int flags) { + return handle_EINTR([&]() { + return recv(sock, +#ifdef _WIN32 + static_cast(ptr), static_cast(size), +#else + ptr, size, +#endif + flags); + }); +} + +inline ssize_t send_socket(socket_t sock, const void *ptr, size_t size, + int flags) { + return handle_EINTR([&]() { + return send(sock, +#ifdef _WIN32 + static_cast(ptr), static_cast(size), +#else + ptr, size, +#endif + flags); + }); +} + inline ssize_t select_read(socket_t sock, time_t sec, time_t usec) { #ifdef CPPHTTPLIB_USE_POLL struct pollfd pfd_read; @@ -1569,6 +2814,10 @@ inline ssize_t select_read(socket_t sock, time_t sec, time_t usec) { return handle_EINTR([&]() { return poll(&pfd_read, 1, timeout); }); #else +#ifndef _WIN32 + if (sock >= FD_SETSIZE) { return -1; } +#endif + fd_set fds; FD_ZERO(&fds); FD_SET(sock, &fds); @@ -1593,6 +2842,10 @@ inline ssize_t select_write(socket_t sock, time_t sec, time_t usec) { return handle_EINTR([&]() { return poll(&pfd_read, 1, timeout); }); #else +#ifndef _WIN32 + if (sock >= FD_SETSIZE) { return -1; } +#endif + fd_set fds; FD_ZERO(&fds); FD_SET(sock, &fds); @@ -1607,7 +2860,8 @@ inline ssize_t select_write(socket_t sock, time_t sec, time_t usec) { #endif } -inline bool wait_until_socket_is_ready(socket_t sock, time_t sec, time_t usec) { +inline Error wait_until_socket_is_ready(socket_t sock, time_t sec, + time_t usec) { #ifdef CPPHTTPLIB_USE_POLL struct pollfd pfd_read; pfd_read.fd = sock; @@ -1617,15 +2871,23 @@ inline bool wait_until_socket_is_ready(socket_t sock, time_t sec, time_t usec) { auto poll_res = handle_EINTR([&]() { return poll(&pfd_read, 1, timeout); }); + if (poll_res == 0) { return Error::ConnectionTimeout; } + if (poll_res > 0 && pfd_read.revents & (POLLIN | POLLOUT)) { - int error = 0; + auto error = 0; socklen_t len = sizeof(error); auto res = getsockopt(sock, SOL_SOCKET, SO_ERROR, reinterpret_cast(&error), &len); - return res >= 0 && !error; + auto successful = res >= 0 && !error; + return successful ? Error::Success : Error::Connection; } - return false; + + return Error::Connection; #else +#ifndef _WIN32 + if (sock >= FD_SETSIZE) { return Error::Connection; } +#endif + fd_set fdsr; FD_ZERO(&fdsr); FD_SET(sock, &fdsr); @@ -1641,17 +2903,31 @@ inline bool wait_until_socket_is_ready(socket_t sock, time_t sec, time_t usec) { return select(static_cast(sock + 1), &fdsr, &fdsw, &fdse, &tv); }); + if (ret == 0) { return Error::ConnectionTimeout; } + if (ret > 0 && (FD_ISSET(sock, &fdsr) || FD_ISSET(sock, &fdsw))) { - int error = 0; + auto error = 0; socklen_t len = sizeof(error); - return getsockopt(sock, SOL_SOCKET, SO_ERROR, - reinterpret_cast(&error), &len) >= 0 && - !error; + auto res = getsockopt(sock, SOL_SOCKET, SO_ERROR, + reinterpret_cast(&error), &len); + auto successful = res >= 0 && !error; + return successful ? Error::Success : Error::Connection; } - return false; + return Error::Connection; #endif } +inline bool is_socket_alive(socket_t sock) { + const auto val = detail::select_read(sock, 0, 0); + if (val == 0) { + return true; + } else if (val < 0 && errno == EBADF) { + return false; + } + char buf[1]; + return detail::read_socket(sock, &buf[0], sizeof(buf), MSG_PEEK) > 0; +} + class SocketStream : public Stream { public: SocketStream(socket_t sock, time_t read_timeout_sec, time_t read_timeout_usec, @@ -1663,6 +2939,8 @@ public: ssize_t read(char *ptr, size_t size) override; ssize_t write(const char *ptr, size_t size) override; void get_remote_ip_and_port(std::string &ip, int &port) const override; + void get_local_ip_and_port(std::string &ip, int &port) const override; + socket_t socket() const override; private: socket_t sock_; @@ -1670,6 +2948,12 @@ private: time_t read_timeout_usec_; time_t write_timeout_sec_; time_t write_timeout_usec_; + + std::vector read_buff_; + size_t read_buff_off_ = 0; + size_t read_buff_content_size_ = 0; + + static const size_t read_buff_size_ = 1024l * 4; }; #ifdef CPPHTTPLIB_OPENSSL_SUPPORT @@ -1685,6 +2969,8 @@ public: ssize_t read(char *ptr, size_t size) override; ssize_t write(const char *ptr, size_t size) override; void get_remote_ip_and_port(std::string &ip, int &port) const override; + void get_local_ip_and_port(std::string &ip, int &port) const override; + socket_t socket() const override; private: socket_t sock_; @@ -1696,24 +2982,6 @@ private: }; #endif -class BufferStream : public Stream { -public: - BufferStream() = default; - ~BufferStream() override = default; - - bool is_readable() const override; - bool is_writable() const override; - ssize_t read(char *ptr, size_t size) override; - ssize_t write(const char *ptr, size_t size) override; - void get_remote_ip_and_port(std::string &ip, int &port) const override; - - const std::string &get_buffer() const; - -private: - std::string buffer; - size_t position = 0; -}; - inline bool keep_alive(socket_t sock, time_t keep_alive_timeout_sec) { using namespace std::chrono; auto start = steady_clock::now(); @@ -1735,12 +3003,14 @@ inline bool keep_alive(socket_t sock, time_t keep_alive_timeout_sec) { template inline bool -process_server_socket_core(socket_t sock, size_t keep_alive_max_count, +process_server_socket_core(const std::atomic &svr_sock, socket_t sock, + size_t keep_alive_max_count, time_t keep_alive_timeout_sec, T callback) { assert(keep_alive_max_count > 0); auto ret = false; auto count = keep_alive_max_count; - while (count > 0 && keep_alive(sock, keep_alive_timeout_sec)) { + while (svr_sock != INVALID_SOCKET && count > 0 && + keep_alive(sock, keep_alive_timeout_sec)) { auto close_connection = count == 1; auto connection_closed = false; ret = callback(close_connection, connection_closed); @@ -1752,12 +3022,13 @@ process_server_socket_core(socket_t sock, size_t keep_alive_max_count, template inline bool -process_server_socket(socket_t sock, size_t keep_alive_max_count, +process_server_socket(const std::atomic &svr_sock, socket_t sock, + size_t keep_alive_max_count, time_t keep_alive_timeout_sec, time_t read_timeout_sec, time_t read_timeout_usec, time_t write_timeout_sec, time_t write_timeout_usec, T callback) { return process_server_socket_core( - sock, keep_alive_max_count, keep_alive_timeout_sec, + svr_sock, sock, keep_alive_max_count, keep_alive_timeout_sec, [&](bool close_connection, bool &connection_closed) { SocketStream strm(sock, read_timeout_sec, read_timeout_usec, write_timeout_sec, write_timeout_usec); @@ -1765,11 +3036,11 @@ process_server_socket(socket_t sock, size_t keep_alive_max_count, }); } -template inline bool process_client_socket(socket_t sock, time_t read_timeout_sec, time_t read_timeout_usec, time_t write_timeout_sec, - time_t write_timeout_usec, T callback) { + time_t write_timeout_usec, + std::function callback) { SocketStream strm(sock, read_timeout_sec, read_timeout_usec, write_timeout_sec, write_timeout_usec); return callback(strm); @@ -1784,23 +3055,61 @@ inline int shutdown_socket(socket_t sock) { } template -socket_t create_socket(const char *host, int port, int socket_flags, - bool tcp_nodelay, SocketOptions socket_options, +socket_t create_socket(const std::string &host, const std::string &ip, int port, + int address_family, int socket_flags, bool tcp_nodelay, + SocketOptions socket_options, BindOrConnect bind_or_connect) { // Get address info + const char *node = nullptr; struct addrinfo hints; struct addrinfo *result; memset(&hints, 0, sizeof(struct addrinfo)); - hints.ai_family = AF_UNSPEC; hints.ai_socktype = SOCK_STREAM; - hints.ai_flags = socket_flags; hints.ai_protocol = 0; + if (!ip.empty()) { + node = ip.c_str(); + // Ask getaddrinfo to convert IP in c-string to address + hints.ai_family = AF_UNSPEC; + hints.ai_flags = AI_NUMERICHOST; + } else { + if (!host.empty()) { node = host.c_str(); } + hints.ai_family = address_family; + hints.ai_flags = socket_flags; + } + +#ifndef _WIN32 + if (hints.ai_family == AF_UNIX) { + const auto addrlen = host.length(); + if (addrlen > sizeof(sockaddr_un::sun_path)) { return INVALID_SOCKET; } + + auto sock = socket(hints.ai_family, hints.ai_socktype, hints.ai_protocol); + if (sock != INVALID_SOCKET) { + sockaddr_un addr{}; + addr.sun_family = AF_UNIX; + std::copy(host.begin(), host.end(), addr.sun_path); + + hints.ai_addr = reinterpret_cast(&addr); + hints.ai_addrlen = static_cast( + sizeof(addr) - sizeof(addr.sun_path) + addrlen); + + fcntl(sock, F_SETFD, FD_CLOEXEC); + if (socket_options) { socket_options(sock); } + + if (!bind_or_connect(sock, hints)) { + close_socket(sock); + sock = INVALID_SOCKET; + } + } + return sock; + } +#endif + auto service = std::to_string(port); - if (getaddrinfo(host, service.c_str(), &hints, &result)) { -#ifdef __linux__ + if (getaddrinfo(node, service.c_str(), &hints, &result)) { +#if defined __linux__ && !defined __ANDROID__ res_init(); #endif return INVALID_SOCKET; @@ -1809,8 +3118,9 @@ socket_t create_socket(const char *host, int port, int socket_flags, for (auto rp = result; rp; rp = rp->ai_next) { // Create a socket #ifdef _WIN32 - auto sock = WSASocketW(rp->ai_family, rp->ai_socktype, rp->ai_protocol, - nullptr, 0, WSA_FLAG_NO_HANDLE_INHERIT); + auto sock = + WSASocketW(rp->ai_family, rp->ai_socktype, rp->ai_protocol, nullptr, 0, + WSA_FLAG_NO_HANDLE_INHERIT | WSA_FLAG_OVERLAPPED); /** * Since the WSA_FLAG_NO_HANDLE_INHERIT is only supported on Windows 7 SP1 * and above the socket creation fails on older Windows Systems. @@ -1834,21 +3144,34 @@ socket_t create_socket(const char *host, int port, int socket_flags, if (sock == INVALID_SOCKET) { continue; } #ifndef _WIN32 - if (fcntl(sock, F_SETFD, FD_CLOEXEC) == -1) { continue; } + if (fcntl(sock, F_SETFD, FD_CLOEXEC) == -1) { + close_socket(sock); + continue; + } #endif if (tcp_nodelay) { - int yes = 1; - setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, reinterpret_cast(&yes), - sizeof(yes)); + auto yes = 1; +#ifdef _WIN32 + setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, + reinterpret_cast(&yes), sizeof(yes)); +#else + setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, + reinterpret_cast(&yes), sizeof(yes)); +#endif } if (socket_options) { socket_options(sock); } if (rp->ai_family == AF_INET6) { - int no = 0; - setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&no), - sizeof(no)); + auto no = 0; +#ifdef _WIN32 + setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, + reinterpret_cast(&no), sizeof(no)); +#else + setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, + reinterpret_cast(&no), sizeof(no)); +#endif } // bind or connect @@ -1883,7 +3206,7 @@ inline bool is_connection_error() { #endif } -inline bool bind_ip_address(socket_t sock, const char *host) { +inline bool bind_ip_address(socket_t sock, const std::string &host) { struct addrinfo hints; struct addrinfo *result; @@ -1892,7 +3215,7 @@ inline bool bind_ip_address(socket_t sock, const char *host) { hints.ai_socktype = SOCK_STREAM; hints.ai_protocol = 0; - if (getaddrinfo(host, "0", &hints, &result)) { return false; } + if (getaddrinfo(host.c_str(), "0", &hints, &result)) { return false; } auto ret = false; for (auto rp = result; rp; rp = rp->ai_next) { @@ -1907,16 +3230,19 @@ inline bool bind_ip_address(socket_t sock, const char *host) { return ret; } -#if !defined _WIN32 && !defined ANDROID +#if !defined _WIN32 && !defined ANDROID && !defined _AIX && !defined __MVS__ #define USE_IF2IP #endif #ifdef USE_IF2IP -inline std::string if2ip(const std::string &ifn) { +inline std::string if2ip(int address_family, const std::string &ifn) { struct ifaddrs *ifap; getifaddrs(&ifap); + std::string addr_candidate; for (auto ifa = ifap; ifa; ifa = ifa->ifa_next) { - if (ifa->ifa_addr && ifn == ifa->ifa_name) { + if (ifa->ifa_addr && ifn == ifa->ifa_name && + (AF_UNSPEC == address_family || + ifa->ifa_addr->sa_family == address_family)) { if (ifa->ifa_addr->sa_family == AF_INET) { auto sa = reinterpret_cast(ifa->ifa_addr); char buf[INET_ADDRSTRLEN]; @@ -1924,48 +3250,96 @@ inline std::string if2ip(const std::string &ifn) { freeifaddrs(ifap); return std::string(buf, INET_ADDRSTRLEN); } + } else if (ifa->ifa_addr->sa_family == AF_INET6) { + auto sa = reinterpret_cast(ifa->ifa_addr); + if (!IN6_IS_ADDR_LINKLOCAL(&sa->sin6_addr)) { + char buf[INET6_ADDRSTRLEN] = {}; + if (inet_ntop(AF_INET6, &sa->sin6_addr, buf, INET6_ADDRSTRLEN)) { + // equivalent to mac's IN6_IS_ADDR_UNIQUE_LOCAL + auto s6_addr_head = sa->sin6_addr.s6_addr[0]; + if (s6_addr_head == 0xfc || s6_addr_head == 0xfd) { + addr_candidate = std::string(buf, INET6_ADDRSTRLEN); + } else { + freeifaddrs(ifap); + return std::string(buf, INET6_ADDRSTRLEN); + } + } + } } } } freeifaddrs(ifap); - return std::string(); + return addr_candidate; } #endif -inline socket_t create_client_socket(const char *host, int port, - bool tcp_nodelay, - SocketOptions socket_options, - time_t timeout_sec, time_t timeout_usec, - const std::string &intf, Error &error) { +inline socket_t create_client_socket( + const std::string &host, const std::string &ip, int port, + int address_family, bool tcp_nodelay, SocketOptions socket_options, + time_t connection_timeout_sec, time_t connection_timeout_usec, + time_t read_timeout_sec, time_t read_timeout_usec, time_t write_timeout_sec, + time_t write_timeout_usec, const std::string &intf, Error &error) { auto sock = create_socket( - host, port, 0, tcp_nodelay, socket_options, - [&](socket_t sock, struct addrinfo &ai) -> bool { + host, ip, port, address_family, 0, tcp_nodelay, std::move(socket_options), + [&](socket_t sock2, struct addrinfo &ai) -> bool { if (!intf.empty()) { #ifdef USE_IF2IP - auto ip = if2ip(intf); - if (ip.empty()) { ip = intf; } - if (!bind_ip_address(sock, ip.c_str())) { + auto ip_from_if = if2ip(address_family, intf); + if (ip_from_if.empty()) { ip_from_if = intf; } + if (!bind_ip_address(sock2, ip_from_if)) { error = Error::BindIPAddress; return false; } #endif } - set_nonblocking(sock, true); + set_nonblocking(sock2, true); auto ret = - ::connect(sock, ai.ai_addr, static_cast(ai.ai_addrlen)); + ::connect(sock2, ai.ai_addr, static_cast(ai.ai_addrlen)); if (ret < 0) { - if (is_connection_error() || - !wait_until_socket_is_ready(sock, timeout_sec, timeout_usec)) { - close_socket(sock); + if (is_connection_error()) { error = Error::Connection; return false; } + error = wait_until_socket_is_ready(sock2, connection_timeout_sec, + connection_timeout_usec); + if (error != Error::Success) { return false; } + } + + set_nonblocking(sock2, false); + + { +#ifdef _WIN32 + auto timeout = static_cast(read_timeout_sec * 1000 + + read_timeout_usec / 1000); + setsockopt(sock2, SOL_SOCKET, SO_RCVTIMEO, + reinterpret_cast(&timeout), sizeof(timeout)); +#else + timeval tv; + tv.tv_sec = static_cast(read_timeout_sec); + tv.tv_usec = static_cast(read_timeout_usec); + setsockopt(sock2, SOL_SOCKET, SO_RCVTIMEO, + reinterpret_cast(&tv), sizeof(tv)); +#endif + } + { + +#ifdef _WIN32 + auto timeout = static_cast(write_timeout_sec * 1000 + + write_timeout_usec / 1000); + setsockopt(sock2, SOL_SOCKET, SO_SNDTIMEO, + reinterpret_cast(&timeout), sizeof(timeout)); +#else + timeval tv; + tv.tv_sec = static_cast(write_timeout_sec); + tv.tv_usec = static_cast(write_timeout_usec); + setsockopt(sock2, SOL_SOCKET, SO_SNDTIMEO, + reinterpret_cast(&tv), sizeof(tv)); +#endif } - set_nonblocking(sock, false); error = Error::Success; return true; }); @@ -1979,21 +3353,34 @@ inline socket_t create_client_socket(const char *host, int port, return sock; } -inline void get_remote_ip_and_port(const struct sockaddr_storage &addr, - socklen_t addr_len, std::string &ip, - int &port) { +inline bool get_ip_and_port(const struct sockaddr_storage &addr, + socklen_t addr_len, std::string &ip, int &port) { if (addr.ss_family == AF_INET) { port = ntohs(reinterpret_cast(&addr)->sin_port); } else if (addr.ss_family == AF_INET6) { port = ntohs(reinterpret_cast(&addr)->sin6_port); + } else { + return false; } std::array ipstr{}; - if (!getnameinfo(reinterpret_cast(&addr), addr_len, - ipstr.data(), static_cast(ipstr.size()), nullptr, - 0, NI_NUMERICHOST)) { - ip = ipstr.data(); + if (getnameinfo(reinterpret_cast(&addr), addr_len, + ipstr.data(), static_cast(ipstr.size()), nullptr, + 0, NI_NUMERICHOST)) { + return false; + } + + ip = ipstr.data(); + return true; +} + +inline void get_local_ip_and_port(socket_t sock, std::string &ip, int &port) { + struct sockaddr_storage addr; + socklen_t addr_len = sizeof(addr); + if (!getsockname(sock, reinterpret_cast(&addr), + &addr_len)) { + get_ip_and_port(addr, addr_len, ip, port); } } @@ -2003,130 +3390,135 @@ inline void get_remote_ip_and_port(socket_t sock, std::string &ip, int &port) { if (!getpeername(sock, reinterpret_cast(&addr), &addr_len)) { - get_remote_ip_and_port(addr, addr_len, ip, port); +#ifndef _WIN32 + if (addr.ss_family == AF_UNIX) { +#if defined(__linux__) + struct ucred ucred; + socklen_t len = sizeof(ucred); + if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &ucred, &len) == 0) { + port = ucred.pid; + } +#elif defined(SOL_LOCAL) && defined(SO_PEERPID) // __APPLE__ + pid_t pid; + socklen_t len = sizeof(pid); + if (getsockopt(sock, SOL_LOCAL, SO_PEERPID, &pid, &len) == 0) { + port = pid; + } +#endif + return; + } +#endif + get_ip_and_port(addr, addr_len, ip, port); } } -inline const char * +inline constexpr unsigned int str2tag_core(const char *s, size_t l, + unsigned int h) { + return (l == 0) + ? h + : str2tag_core( + s + 1, l - 1, + // Unsets the 6 high bits of h, therefore no overflow happens + (((std::numeric_limits::max)() >> 6) & + h * 33) ^ + static_cast(*s)); +} + +inline unsigned int str2tag(const std::string &s) { + return str2tag_core(s.data(), s.size(), 0); +} + +namespace udl { + +inline constexpr unsigned int operator"" _t(const char *s, size_t l) { + return str2tag_core(s, l, 0); +} + +} // namespace udl + +inline std::string find_content_type(const std::string &path, - const std::map &user_data) { + const std::map &user_data, + const std::string &default_content_type) { auto ext = file_extension(path); auto it = user_data.find(ext); - if (it != user_data.end()) { return it->second.c_str(); } + if (it != user_data.end()) { return it->second; } - if (ext == "txt") { - return "text/plain"; - } else if (ext == "html" || ext == "htm") { - return "text/html"; - } else if (ext == "css") { - return "text/css"; - } else if (ext == "jpeg" || ext == "jpg") { - return "image/jpg"; - } else if (ext == "png") { - return "image/png"; - } else if (ext == "gif") { - return "image/gif"; - } else if (ext == "svg") { - return "image/svg+xml"; - } else if (ext == "ico") { - return "image/x-icon"; - } else if (ext == "json") { - return "application/json"; - } else if (ext == "pdf") { - return "application/pdf"; - } else if (ext == "js") { - return "application/javascript"; - } else if (ext == "wasm") { - return "application/wasm"; - } else if (ext == "xml") { - return "application/xml"; - } else if (ext == "xhtml") { - return "application/xhtml+xml"; - } - return nullptr; -} + using udl::operator""_t; -inline const char *status_message(int status) { - switch (status) { - case 100: return "Continue"; - case 101: return "Switching Protocol"; - case 102: return "Processing"; - case 103: return "Early Hints"; - case 200: return "OK"; - case 201: return "Created"; - case 202: return "Accepted"; - case 203: return "Non-Authoritative Information"; - case 204: return "No Content"; - case 205: return "Reset Content"; - case 206: return "Partial Content"; - case 207: return "Multi-Status"; - case 208: return "Already Reported"; - case 226: return "IM Used"; - case 300: return "Multiple Choice"; - case 301: return "Moved Permanently"; - case 302: return "Found"; - case 303: return "See Other"; - case 304: return "Not Modified"; - case 305: return "Use Proxy"; - case 306: return "unused"; - case 307: return "Temporary Redirect"; - case 308: return "Permanent Redirect"; - case 400: return "Bad Request"; - case 401: return "Unauthorized"; - case 402: return "Payment Required"; - case 403: return "Forbidden"; - case 404: return "Not Found"; - case 405: return "Method Not Allowed"; - case 406: return "Not Acceptable"; - case 407: return "Proxy Authentication Required"; - case 408: return "Request Timeout"; - case 409: return "Conflict"; - case 410: return "Gone"; - case 411: return "Length Required"; - case 412: return "Precondition Failed"; - case 413: return "Payload Too Large"; - case 414: return "URI Too Long"; - case 415: return "Unsupported Media Type"; - case 416: return "Range Not Satisfiable"; - case 417: return "Expectation Failed"; - case 418: return "I'm a teapot"; - case 421: return "Misdirected Request"; - case 422: return "Unprocessable Entity"; - case 423: return "Locked"; - case 424: return "Failed Dependency"; - case 425: return "Too Early"; - case 426: return "Upgrade Required"; - case 428: return "Precondition Required"; - case 429: return "Too Many Requests"; - case 431: return "Request Header Fields Too Large"; - case 451: return "Unavailable For Legal Reasons"; - case 501: return "Not Implemented"; - case 502: return "Bad Gateway"; - case 503: return "Service Unavailable"; - case 504: return "Gateway Timeout"; - case 505: return "HTTP Version Not Supported"; - case 506: return "Variant Also Negotiates"; - case 507: return "Insufficient Storage"; - case 508: return "Loop Detected"; - case 510: return "Not Extended"; - case 511: return "Network Authentication Required"; + switch (str2tag(ext)) { + default: return default_content_type; - default: - case 500: return "Internal Server Error"; + case "css"_t: return "text/css"; + case "csv"_t: return "text/csv"; + case "htm"_t: + case "html"_t: return "text/html"; + case "js"_t: + case "mjs"_t: return "text/javascript"; + case "txt"_t: return "text/plain"; + case "vtt"_t: return "text/vtt"; + + case "apng"_t: return "image/apng"; + case "avif"_t: return "image/avif"; + case "bmp"_t: return "image/bmp"; + case "gif"_t: return "image/gif"; + case "png"_t: return "image/png"; + case "svg"_t: return "image/svg+xml"; + case "webp"_t: return "image/webp"; + case "ico"_t: return "image/x-icon"; + case "tif"_t: return "image/tiff"; + case "tiff"_t: return "image/tiff"; + case "jpg"_t: + case "jpeg"_t: return "image/jpeg"; + + case "mp4"_t: return "video/mp4"; + case "mpeg"_t: return "video/mpeg"; + case "webm"_t: return "video/webm"; + + case "mp3"_t: return "audio/mp3"; + case "mpga"_t: return "audio/mpeg"; + case "weba"_t: return "audio/webm"; + case "wav"_t: return "audio/wave"; + + case "otf"_t: return "font/otf"; + case "ttf"_t: return "font/ttf"; + case "woff"_t: return "font/woff"; + case "woff2"_t: return "font/woff2"; + + case "7z"_t: return "application/x-7z-compressed"; + case "atom"_t: return "application/atom+xml"; + case "pdf"_t: return "application/pdf"; + case "json"_t: return "application/json"; + case "rss"_t: return "application/rss+xml"; + case "tar"_t: return "application/x-tar"; + case "xht"_t: + case "xhtml"_t: return "application/xhtml+xml"; + case "xslt"_t: return "application/xslt+xml"; + case "xml"_t: return "application/xml"; + case "gz"_t: return "application/gzip"; + case "zip"_t: return "application/zip"; + case "wasm"_t: return "application/wasm"; } } inline bool can_compress_content_type(const std::string &content_type) { - return (!content_type.find("text/") && content_type != "text/event-stream") || - content_type == "image/svg+xml" || - content_type == "application/javascript" || - content_type == "application/json" || - content_type == "application/xml" || - content_type == "application/xhtml+xml"; -} + using udl::operator""_t; -enum class EncodingType { None = 0, Gzip, Brotli }; + auto tag = str2tag(content_type); + + switch (tag) { + case "image/svg+xml"_t: + case "application/javascript"_t: + case "application/json"_t: + case "application/xml"_t: + case "application/protobuf"_t: + case "application/xhtml+xml"_t: return true; + + default: + return !content_type.rfind("text/", 0) && tag != "text/event-stream"_t; + } +} inline EncodingType encoding_type(const Request &req, const Response &res) { auto ret = @@ -2151,120 +3543,105 @@ inline EncodingType encoding_type(const Request &req, const Response &res) { return EncodingType::None; } -class compressor { -public: - virtual ~compressor(){}; - - typedef std::function Callback; - virtual bool compress(const char *data, size_t data_length, bool last, - Callback callback) = 0; -}; - -class decompressor { -public: - virtual ~decompressor() {} - - virtual bool is_valid() const = 0; - - typedef std::function Callback; - virtual bool decompress(const char *data, size_t data_length, - Callback callback) = 0; -}; - -class nocompressor : public compressor { -public: - ~nocompressor(){}; - - bool compress(const char *data, size_t data_length, bool /*last*/, - Callback callback) override { - if (!data_length) { return true; } - return callback(data, data_length); - } -}; +inline bool nocompressor::compress(const char *data, size_t data_length, + bool /*last*/, Callback callback) { + if (!data_length) { return true; } + return callback(data, data_length); +} #ifdef CPPHTTPLIB_ZLIB_SUPPORT -class gzip_compressor : public compressor { -public: - gzip_compressor() { - std::memset(&strm_, 0, sizeof(strm_)); - strm_.zalloc = Z_NULL; - strm_.zfree = Z_NULL; - strm_.opaque = Z_NULL; +inline gzip_compressor::gzip_compressor() { + std::memset(&strm_, 0, sizeof(strm_)); + strm_.zalloc = Z_NULL; + strm_.zfree = Z_NULL; + strm_.opaque = Z_NULL; - is_valid_ = deflateInit2(&strm_, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 31, 8, - Z_DEFAULT_STRATEGY) == Z_OK; - } + is_valid_ = deflateInit2(&strm_, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 31, 8, + Z_DEFAULT_STRATEGY) == Z_OK; +} - ~gzip_compressor() { deflateEnd(&strm_); } +inline gzip_compressor::~gzip_compressor() { deflateEnd(&strm_); } - bool compress(const char *data, size_t data_length, bool last, - Callback callback) override { - assert(is_valid_); +inline bool gzip_compressor::compress(const char *data, size_t data_length, + bool last, Callback callback) { + assert(is_valid_); - auto flush = last ? Z_FINISH : Z_NO_FLUSH; + do { + constexpr size_t max_avail_in = + (std::numeric_limits::max)(); - strm_.avail_in = static_cast(data_length); + strm_.avail_in = static_cast( + (std::min)(data_length, max_avail_in)); strm_.next_in = const_cast(reinterpret_cast(data)); - int ret = Z_OK; + data_length -= strm_.avail_in; + data += strm_.avail_in; + + auto flush = (last && data_length == 0) ? Z_FINISH : Z_NO_FLUSH; + auto ret = Z_OK; std::array buff{}; do { - strm_.avail_out = buff.size(); + strm_.avail_out = static_cast(buff.size()); strm_.next_out = reinterpret_cast(buff.data()); ret = deflate(&strm_, flush); - assert(ret != Z_STREAM_ERROR); + if (ret == Z_STREAM_ERROR) { return false; } if (!callback(buff.data(), buff.size() - strm_.avail_out)) { return false; } } while (strm_.avail_out == 0); - assert((last && ret == Z_STREAM_END) || (!last && ret == Z_OK)); + assert((flush == Z_FINISH && ret == Z_STREAM_END) || + (flush == Z_NO_FLUSH && ret == Z_OK)); assert(strm_.avail_in == 0); - return true; - } + } while (data_length > 0); -private: - bool is_valid_ = false; - z_stream strm_; -}; + return true; +} -class gzip_decompressor : public decompressor { -public: - gzip_decompressor() { - std::memset(&strm_, 0, sizeof(strm_)); - strm_.zalloc = Z_NULL; - strm_.zfree = Z_NULL; - strm_.opaque = Z_NULL; +inline gzip_decompressor::gzip_decompressor() { + std::memset(&strm_, 0, sizeof(strm_)); + strm_.zalloc = Z_NULL; + strm_.zfree = Z_NULL; + strm_.opaque = Z_NULL; - // 15 is the value of wbits, which should be at the maximum possible value - // to ensure that any gzip stream can be decoded. The offset of 32 specifies - // that the stream type should be automatically detected either gzip or - // deflate. - is_valid_ = inflateInit2(&strm_, 32 + 15) == Z_OK; - } + // 15 is the value of wbits, which should be at the maximum possible value + // to ensure that any gzip stream can be decoded. The offset of 32 specifies + // that the stream type should be automatically detected either gzip or + // deflate. + is_valid_ = inflateInit2(&strm_, 32 + 15) == Z_OK; +} - ~gzip_decompressor() { inflateEnd(&strm_); } +inline gzip_decompressor::~gzip_decompressor() { inflateEnd(&strm_); } - bool is_valid() const override { return is_valid_; } +inline bool gzip_decompressor::is_valid() const { return is_valid_; } - bool decompress(const char *data, size_t data_length, - Callback callback) override { - assert(is_valid_); +inline bool gzip_decompressor::decompress(const char *data, size_t data_length, + Callback callback) { + assert(is_valid_); - int ret = Z_OK; + auto ret = Z_OK; - strm_.avail_in = static_cast(data_length); + do { + constexpr size_t max_avail_in = + (std::numeric_limits::max)(); + + strm_.avail_in = static_cast( + (std::min)(data_length, max_avail_in)); strm_.next_in = const_cast(reinterpret_cast(data)); + data_length -= strm_.avail_in; + data += strm_.avail_in; + std::array buff{}; - while (strm_.avail_in > 0) { - strm_.avail_out = buff.size(); + while (strm_.avail_in > 0 && ret == Z_OK) { + strm_.avail_out = static_cast(buff.size()); strm_.next_out = reinterpret_cast(buff.data()); ret = inflate(&strm_, Z_NO_FLUSH); + assert(ret != Z_STREAM_ERROR); switch (ret) { case Z_NEED_DICT: @@ -2277,118 +3654,107 @@ public: } } - return ret == Z_OK || ret == Z_STREAM_END; - } + if (ret != Z_OK && ret != Z_STREAM_END) { return false; } -private: - bool is_valid_ = false; - z_stream strm_; -}; + } while (data_length > 0); + + return true; +} #endif #ifdef CPPHTTPLIB_BROTLI_SUPPORT -class brotli_compressor : public compressor { -public: - brotli_compressor() { - state_ = BrotliEncoderCreateInstance(nullptr, nullptr, nullptr); - } +inline brotli_compressor::brotli_compressor() { + state_ = BrotliEncoderCreateInstance(nullptr, nullptr, nullptr); +} - ~brotli_compressor() { BrotliEncoderDestroyInstance(state_); } +inline brotli_compressor::~brotli_compressor() { + BrotliEncoderDestroyInstance(state_); +} - bool compress(const char *data, size_t data_length, bool last, - Callback callback) override { - std::array buff{}; +inline bool brotli_compressor::compress(const char *data, size_t data_length, + bool last, Callback callback) { + std::array buff{}; - auto operation = last ? BROTLI_OPERATION_FINISH : BROTLI_OPERATION_PROCESS; - auto available_in = data_length; - auto next_in = reinterpret_cast(data); + auto operation = last ? BROTLI_OPERATION_FINISH : BROTLI_OPERATION_PROCESS; + auto available_in = data_length; + auto next_in = reinterpret_cast(data); - for (;;) { - if (last) { - if (BrotliEncoderIsFinished(state_)) { break; } - } else { - if (!available_in) { break; } - } - - auto available_out = buff.size(); - auto next_out = buff.data(); - - if (!BrotliEncoderCompressStream(state_, operation, &available_in, - &next_in, &available_out, &next_out, - nullptr)) { - return false; - } - - auto output_bytes = buff.size() - available_out; - if (output_bytes) { - callback(reinterpret_cast(buff.data()), output_bytes); - } + for (;;) { + if (last) { + if (BrotliEncoderIsFinished(state_)) { break; } + } else { + if (!available_in) { break; } } - return true; - } + auto available_out = buff.size(); + auto next_out = buff.data(); -private: - BrotliEncoderState *state_ = nullptr; -}; - -class brotli_decompressor : public decompressor { -public: - brotli_decompressor() { - decoder_s = BrotliDecoderCreateInstance(0, 0, 0); - decoder_r = decoder_s ? BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT - : BROTLI_DECODER_RESULT_ERROR; - } - - ~brotli_decompressor() { - if (decoder_s) { BrotliDecoderDestroyInstance(decoder_s); } - } - - bool is_valid() const override { return decoder_s; } - - bool decompress(const char *data, size_t data_length, - Callback callback) override { - if (decoder_r == BROTLI_DECODER_RESULT_SUCCESS || - decoder_r == BROTLI_DECODER_RESULT_ERROR) { - return 0; + if (!BrotliEncoderCompressStream(state_, operation, &available_in, &next_in, + &available_out, &next_out, nullptr)) { + return false; } - const uint8_t *next_in = (const uint8_t *)data; - size_t avail_in = data_length; - size_t total_out; - - decoder_r = BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT; - - std::array buff{}; - while (decoder_r == BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT) { - char *next_out = buff.data(); - size_t avail_out = buff.size(); - - decoder_r = BrotliDecoderDecompressStream( - decoder_s, &avail_in, &next_in, &avail_out, - reinterpret_cast(&next_out), &total_out); - - if (decoder_r == BROTLI_DECODER_RESULT_ERROR) { return false; } - - if (!callback(buff.data(), buff.size() - avail_out)) { return false; } + auto output_bytes = buff.size() - available_out; + if (output_bytes) { + callback(reinterpret_cast(buff.data()), output_bytes); } - - return decoder_r == BROTLI_DECODER_RESULT_SUCCESS || - decoder_r == BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT; } -private: - BrotliDecoderResult decoder_r; - BrotliDecoderState *decoder_s = nullptr; -}; + return true; +} + +inline brotli_decompressor::brotli_decompressor() { + decoder_s = BrotliDecoderCreateInstance(0, 0, 0); + decoder_r = decoder_s ? BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT + : BROTLI_DECODER_RESULT_ERROR; +} + +inline brotli_decompressor::~brotli_decompressor() { + if (decoder_s) { BrotliDecoderDestroyInstance(decoder_s); } +} + +inline bool brotli_decompressor::is_valid() const { return decoder_s; } + +inline bool brotli_decompressor::decompress(const char *data, + size_t data_length, + Callback callback) { + if (decoder_r == BROTLI_DECODER_RESULT_SUCCESS || + decoder_r == BROTLI_DECODER_RESULT_ERROR) { + return 0; + } + + auto next_in = reinterpret_cast(data); + size_t avail_in = data_length; + size_t total_out; + + decoder_r = BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT; + + std::array buff{}; + while (decoder_r == BROTLI_DECODER_RESULT_NEEDS_MORE_OUTPUT) { + char *next_out = buff.data(); + size_t avail_out = buff.size(); + + decoder_r = BrotliDecoderDecompressStream( + decoder_s, &avail_in, &next_in, &avail_out, + reinterpret_cast(&next_out), &total_out); + + if (decoder_r == BROTLI_DECODER_RESULT_ERROR) { return false; } + + if (!callback(buff.data(), buff.size() - avail_out)) { return false; } + } + + return decoder_r == BROTLI_DECODER_RESULT_SUCCESS || + decoder_r == BROTLI_DECODER_RESULT_NEEDS_MORE_INPUT; +} #endif -inline bool has_header(const Headers &headers, const char *key) { +inline bool has_header(const Headers &headers, const std::string &key) { return headers.find(key) != headers.end(); } -inline const char *get_header_value(const Headers &headers, const char *key, - size_t id = 0, const char *def = nullptr) { +inline const char *get_header_value(const Headers &headers, + const std::string &key, size_t id, + const char *def) { auto rng = headers.equal_range(key); auto it = rng.first; std::advance(it, static_cast(id)); @@ -2396,21 +3762,12 @@ inline const char *get_header_value(const Headers &headers, const char *key, return def; } -template -inline T get_header_value(const Headers & /*headers*/, const char * /*key*/, - size_t /*id*/ = 0, uint64_t /*def*/ = 0) {} - -template <> -inline uint64_t get_header_value(const Headers &headers, - const char *key, size_t id, - uint64_t def) { - auto rng = headers.equal_range(key); - auto it = rng.first; - std::advance(it, static_cast(id)); - if (it != rng.second) { - return std::strtoull(it->second.data(), nullptr, 10); +inline bool compare_case_ignore(const std::string &a, const std::string &b) { + if (a.size() != b.size()) { return false; } + for (size_t i = 0; i < b.size(); i++) { + if (::tolower(a[i]) != ::tolower(b[i])) { return false; } } - return def; + return true; } template @@ -2436,7 +3793,14 @@ inline bool parse_header(const char *beg, const char *end, T fn) { } if (p < end) { - fn(std::string(beg, key_end), decode_url(std::string(p, end), false)); + auto key_len = key_end - beg; + if (!key_len) { return false; } + + auto key = std::string(beg, key_end); + auto val = compare_case_ignore(key, "Location") + ? std::string(p, end) + : decode_url(std::string(p, end), false); + fn(std::move(key), std::move(val)); return true; } @@ -2452,15 +3816,26 @@ inline bool read_headers(Stream &strm, Headers &headers) { if (!line_reader.getline()) { return false; } // Check if the line ends with CRLF. + auto line_terminator_len = 2; if (line_reader.end_with_crlf()) { // Blank line indicates end of headers. if (line_reader.size() == 2) { break; } +#ifdef CPPHTTPLIB_ALLOW_LF_AS_LINE_TERMINATOR + } else { + // Blank line indicates end of headers. + if (line_reader.size() == 1) { break; } + line_terminator_len = 1; + } +#else } else { continue; // Skip invalid line. } +#endif - // Exclude CRLF - auto end = line_reader.ptr() + line_reader.size() - 2; + if (line_reader.size() > CPPHTTPLIB_HEADER_MAX_LENGTH) { return false; } + + // Exclude line terminator + auto end = line_reader.ptr() + line_reader.size() - line_terminator_len; parse_header(line_reader.ptr(), end, [&](std::string &&key, std::string &&val) { @@ -2472,7 +3847,8 @@ inline bool read_headers(Stream &strm, Headers &headers) { } inline bool read_content_with_length(Stream &strm, uint64_t len, - Progress progress, ContentReceiver out) { + Progress progress, + ContentReceiverWithProgress out) { char buf[CPPHTTPLIB_RECV_BUFSIZ]; uint64_t r = 0; @@ -2481,8 +3857,7 @@ inline bool read_content_with_length(Stream &strm, uint64_t len, auto n = strm.read(buf, (std::min)(read_len, CPPHTTPLIB_RECV_BUFSIZ)); if (n <= 0) { return false; } - if (!out(buf, static_cast(n))) { return false; } - + if (!out(buf, static_cast(n), r, len)) { return false; } r += static_cast(n); if (progress) { @@ -2504,22 +3879,24 @@ inline void skip_content_with_length(Stream &strm, uint64_t len) { } } -inline bool read_content_without_length(Stream &strm, ContentReceiver out) { +inline bool read_content_without_length(Stream &strm, + ContentReceiverWithProgress out) { char buf[CPPHTTPLIB_RECV_BUFSIZ]; + uint64_t r = 0; for (;;) { auto n = strm.read(buf, CPPHTTPLIB_RECV_BUFSIZ); - if (n < 0) { - return false; - } else if (n == 0) { - return true; - } - if (!out(buf, static_cast(n))) { return false; } + if (n <= 0) { return true; } + + if (!out(buf, static_cast(n), r, 0)) { return false; } + r += static_cast(n); } return true; } -inline bool read_content_chunked(Stream &strm, ContentReceiver out) { +template +inline bool read_content_chunked(Stream &strm, T &x, + ContentReceiverWithProgress out) { const auto bufsiz = 16; char buf[bufsiz]; @@ -2544,110 +3921,122 @@ inline bool read_content_chunked(Stream &strm, ContentReceiver out) { if (!line_reader.getline()) { return false; } - if (strcmp(line_reader.ptr(), "\r\n")) { break; } + if (strcmp(line_reader.ptr(), "\r\n") != 0) { return false; } if (!line_reader.getline()) { return false; } } - if (chunk_len == 0) { - // Reader terminator after chunks - if (!line_reader.getline() || strcmp(line_reader.ptr(), "\r\n")) - return false; + assert(chunk_len == 0); + + // Trailer + if (!line_reader.getline()) { return false; } + + while (strcmp(line_reader.ptr(), "\r\n") != 0) { + if (line_reader.size() > CPPHTTPLIB_HEADER_MAX_LENGTH) { return false; } + + // Exclude line terminator + constexpr auto line_terminator_len = 2; + auto end = line_reader.ptr() + line_reader.size() - line_terminator_len; + + parse_header(line_reader.ptr(), end, + [&](std::string &&key, std::string &&val) { + x.headers.emplace(std::move(key), std::move(val)); + }); + + if (!line_reader.getline()) { return false; } } return true; } inline bool is_chunked_transfer_encoding(const Headers &headers) { - return !strcasecmp(get_header_value(headers, "Transfer-Encoding", 0, ""), - "chunked"); + return compare_case_ignore( + get_header_value(headers, "Transfer-Encoding", 0, ""), "chunked"); } template -bool prepare_content_receiver(T &x, int &status, ContentReceiver receiver, +bool prepare_content_receiver(T &x, int &status, + ContentReceiverWithProgress receiver, bool decompress, U callback) { if (decompress) { std::string encoding = x.get_header_value("Content-Encoding"); - std::shared_ptr decompressor; + std::unique_ptr decompressor; - if (encoding.find("gzip") != std::string::npos || - encoding.find("deflate") != std::string::npos) { + if (encoding == "gzip" || encoding == "deflate") { #ifdef CPPHTTPLIB_ZLIB_SUPPORT - decompressor = std::make_shared(); + decompressor = detail::make_unique(); #else - status = 415; + status = StatusCode::UnsupportedMediaType_415; return false; #endif } else if (encoding.find("br") != std::string::npos) { #ifdef CPPHTTPLIB_BROTLI_SUPPORT - decompressor = std::make_shared(); + decompressor = detail::make_unique(); #else - status = 415; + status = StatusCode::UnsupportedMediaType_415; return false; #endif } if (decompressor) { if (decompressor->is_valid()) { - ContentReceiver out = [&](const char *buf, size_t n) { - return decompressor->decompress( - buf, n, - [&](const char *buf, size_t n) { return receiver(buf, n); }); + ContentReceiverWithProgress out = [&](const char *buf, size_t n, + uint64_t off, uint64_t len) { + return decompressor->decompress(buf, n, + [&](const char *buf2, size_t n2) { + return receiver(buf2, n2, off, len); + }); }; - return callback(out); + return callback(std::move(out)); } else { - status = 500; + status = StatusCode::InternalServerError_500; return false; } } } - ContentReceiver out = [&](const char *buf, size_t n) { - return receiver(buf, n); + ContentReceiverWithProgress out = [&](const char *buf, size_t n, uint64_t off, + uint64_t len) { + return receiver(buf, n, off, len); }; - return callback(out); + return callback(std::move(out)); } template bool read_content(Stream &strm, T &x, size_t payload_max_length, int &status, - Progress progress, ContentReceiver receiver, + Progress progress, ContentReceiverWithProgress receiver, bool decompress) { return prepare_content_receiver( - x, status, receiver, decompress, [&](const ContentReceiver &out) { + x, status, std::move(receiver), decompress, + [&](const ContentReceiverWithProgress &out) { auto ret = true; auto exceed_payload_max_length = false; if (is_chunked_transfer_encoding(x.headers)) { - ret = read_content_chunked(strm, out); + ret = read_content_chunked(strm, x, out); } else if (!has_header(x.headers, "Content-Length")) { ret = read_content_without_length(strm, out); } else { - auto len = get_header_value(x.headers, "Content-Length"); + auto len = get_header_value_u64(x.headers, "Content-Length", 0, 0); if (len > payload_max_length) { exceed_payload_max_length = true; skip_content_with_length(strm, len); ret = false; } else if (len > 0) { - ret = read_content_with_length(strm, len, progress, out); + ret = read_content_with_length(strm, len, std::move(progress), out); } } - if (!ret) { status = exceed_payload_max_length ? 413 : 400; } + if (!ret) { + status = exceed_payload_max_length ? StatusCode::PayloadTooLarge_413 + : StatusCode::BadRequest_400; + } return ret; }); -} +} // namespace detail -template -inline ssize_t write_headers(Stream &strm, const T &info, - const Headers &headers) { +inline ssize_t write_headers(Stream &strm, const Headers &headers) { ssize_t write_len = 0; - for (const auto &x : info.headers) { - if (x.first == "EXCEPTION_WHAT") { continue; } - auto len = - strm.write_format("%s: %s\r\n", x.first.c_str(), x.second.c_str()); - if (len < 0) { return len; } - write_len += len; - } for (const auto &x : headers) { auto len = strm.write_format("%s: %s\r\n", x.first.c_str(), x.second.c_str()); @@ -2671,99 +4060,125 @@ inline bool write_data(Stream &strm, const char *d, size_t l) { } template -inline ssize_t write_content(Stream &strm, ContentProvider content_provider, - size_t offset, size_t length, T is_shutting_down) { - size_t begin_offset = offset; +inline bool write_content(Stream &strm, const ContentProvider &content_provider, + size_t offset, size_t length, T is_shutting_down, + Error &error) { size_t end_offset = offset + length; auto ok = true; DataSink data_sink; - data_sink.write = [&](const char *d, size_t l) { + data_sink.write = [&](const char *d, size_t l) -> bool { if (ok) { - offset += l; - if (!write_data(strm, d, l)) { ok = false; } + if (strm.is_writable() && write_data(strm, d, l)) { + offset += l; + } else { + ok = false; + } } + return ok; }; - data_sink.is_writable = [&](void) { return ok && strm.is_writable(); }; + data_sink.is_writable = [&]() -> bool { return strm.is_writable(); }; while (offset < end_offset && !is_shutting_down()) { - if (!content_provider(offset, end_offset - offset, data_sink)) { - return -1; + if (!strm.is_writable()) { + error = Error::Write; + return false; + } else if (!content_provider(offset, end_offset - offset, data_sink)) { + error = Error::Canceled; + return false; + } else if (!ok) { + error = Error::Write; + return false; } - if (!ok) { return -1; } } - return static_cast(offset - begin_offset); + error = Error::Success; + return true; } template -inline ssize_t write_content_without_length(Stream &strm, - ContentProvider content_provider, - T is_shutting_down) { +inline bool write_content(Stream &strm, const ContentProvider &content_provider, + size_t offset, size_t length, + const T &is_shutting_down) { + auto error = Error::Success; + return write_content(strm, content_provider, offset, length, is_shutting_down, + error); +} + +template +inline bool +write_content_without_length(Stream &strm, + const ContentProvider &content_provider, + const T &is_shutting_down) { size_t offset = 0; auto data_available = true; auto ok = true; DataSink data_sink; - data_sink.write = [&](const char *d, size_t l) { + data_sink.write = [&](const char *d, size_t l) -> bool { if (ok) { offset += l; - if (!write_data(strm, d, l)) { ok = false; } + if (!strm.is_writable() || !write_data(strm, d, l)) { ok = false; } } + return ok; }; + data_sink.is_writable = [&]() -> bool { return strm.is_writable(); }; + data_sink.done = [&](void) { data_available = false; }; - data_sink.is_writable = [&](void) { return ok && strm.is_writable(); }; - while (data_available && !is_shutting_down()) { - if (!content_provider(offset, 0, data_sink)) { return -1; } - if (!ok) { return -1; } + if (!strm.is_writable()) { + return false; + } else if (!content_provider(offset, 0, data_sink)) { + return false; + } else if (!ok) { + return false; + } } - - return static_cast(offset); + return true; } template -inline ssize_t write_content_chunked(Stream &strm, - ContentProvider content_provider, - T is_shutting_down, U &compressor) { +inline bool +write_content_chunked(Stream &strm, const ContentProvider &content_provider, + const T &is_shutting_down, U &compressor, Error &error) { size_t offset = 0; auto data_available = true; - ssize_t total_written_length = 0; auto ok = true; DataSink data_sink; - data_sink.write = [&](const char *d, size_t l) { - if (!ok) { return; } + data_sink.write = [&](const char *d, size_t l) -> bool { + if (ok) { + data_available = l > 0; + offset += l; - data_available = l > 0; - offset += l; - - std::string payload; - if (!compressor.compress(d, l, false, - [&](const char *data, size_t data_len) { - payload.append(data, data_len); - return true; - })) { - ok = false; - return; - } - - if (!payload.empty()) { - // Emit chunked response header and footer for each chunk - auto chunk = from_i_to_hex(payload.size()) + "\r\n" + payload + "\r\n"; - if (write_data(strm, chunk.data(), chunk.size())) { - total_written_length += chunk.size(); + std::string payload; + if (compressor.compress(d, l, false, + [&](const char *data, size_t data_len) { + payload.append(data, data_len); + return true; + })) { + if (!payload.empty()) { + // Emit chunked response header and footer for each chunk + auto chunk = + from_i_to_hex(payload.size()) + "\r\n" + payload + "\r\n"; + if (!strm.is_writable() || + !write_data(strm, chunk.data(), chunk.size())) { + ok = false; + } + } } else { ok = false; - return; } } + return ok; }; - data_sink.done = [&](void) { + data_sink.is_writable = [&]() -> bool { return strm.is_writable(); }; + + auto done_with_trailer = [&](const Headers *trailer) { if (!ok) { return; } data_available = false; @@ -2781,40 +4196,74 @@ inline ssize_t write_content_chunked(Stream &strm, if (!payload.empty()) { // Emit chunked response header and footer for each chunk auto chunk = from_i_to_hex(payload.size()) + "\r\n" + payload + "\r\n"; - if (write_data(strm, chunk.data(), chunk.size())) { - total_written_length += chunk.size(); - } else { + if (!strm.is_writable() || + !write_data(strm, chunk.data(), chunk.size())) { ok = false; return; } } - static const std::string done_marker("0\r\n\r\n"); - if (write_data(strm, done_marker.data(), done_marker.size())) { - total_written_length += done_marker.size(); - } else { + static const std::string done_marker("0\r\n"); + if (!write_data(strm, done_marker.data(), done_marker.size())) { ok = false; } + + // Trailer + if (trailer) { + for (const auto &kv : *trailer) { + std::string field_line = kv.first + ": " + kv.second + "\r\n"; + if (!write_data(strm, field_line.data(), field_line.size())) { + ok = false; + } + } + } + + static const std::string crlf("\r\n"); + if (!write_data(strm, crlf.data(), crlf.size())) { ok = false; } }; - data_sink.is_writable = [&](void) { return ok && strm.is_writable(); }; + data_sink.done = [&](void) { done_with_trailer(nullptr); }; + + data_sink.done_with_trailer = [&](const Headers &trailer) { + done_with_trailer(&trailer); + }; while (data_available && !is_shutting_down()) { - if (!content_provider(offset, 0, data_sink)) { return -1; } - if (!ok) { return -1; } + if (!strm.is_writable()) { + error = Error::Write; + return false; + } else if (!content_provider(offset, 0, data_sink)) { + error = Error::Canceled; + return false; + } else if (!ok) { + error = Error::Write; + return false; + } } - return total_written_length; + error = Error::Success; + return true; +} + +template +inline bool write_content_chunked(Stream &strm, + const ContentProvider &content_provider, + const T &is_shutting_down, U &compressor) { + auto error = Error::Success; + return write_content_chunked(strm, content_provider, is_shutting_down, + compressor, error); } template -inline bool redirect(T &cli, const Request &req, Response &res, - const std::string &path) { +inline bool redirect(T &cli, Request &req, Response &res, + const std::string &path, const std::string &location, + Error &error) { Request new_req = req; new_req.path = path; - new_req.redirect_count -= 1; + new_req.redirect_count_ -= 1; - if (res.status == 303 && (req.method != "GET" && req.method != "HEAD")) { + if (res.status == StatusCode::SeeOther_303 && + (req.method != "GET" && req.method != "HEAD")) { new_req.method = "GET"; new_req.body.clear(); new_req.headers.clear(); @@ -2822,8 +4271,13 @@ inline bool redirect(T &cli, const Request &req, Response &res, Response new_res; - auto ret = cli.send(new_req, new_res); - if (ret) { res = new_res; } + auto ret = cli.send(new_req, new_res, error); + if (ret) { + req = new_req; + res = new_res; + + if (res.location.empty()) { res.location = location; } + } return ret; } @@ -2834,13 +4288,18 @@ inline std::string params_to_query_str(const Params ¶ms) { if (it != params.begin()) { query += "&"; } query += it->first; query += "="; - query += encode_url(it->second); + query += encode_query_param(it->second); } return query; } inline void parse_query_text(const std::string &s, Params ¶ms) { + std::set cache; split(s.data(), s.data() + s.size(), '&', [&](const char *b, const char *e) { + std::string kv(b, e); + if (cache.find(kv) != cache.end()) { return; } + cache.insert(kv); + std::string key; std::string val; split(b, e, '=', [&](const char *b2, const char *e2) { @@ -2859,25 +4318,52 @@ inline void parse_query_text(const std::string &s, Params ¶ms) { inline bool parse_multipart_boundary(const std::string &content_type, std::string &boundary) { - auto pos = content_type.find("boundary="); + auto boundary_keyword = "boundary="; + auto pos = content_type.find(boundary_keyword); if (pos == std::string::npos) { return false; } - boundary = content_type.substr(pos + 9); - if (boundary.length() >= 2 && boundary.front() == '"' && - boundary.back() == '"') { - boundary = boundary.substr(1, boundary.size() - 2); - } + auto end = content_type.find(';', pos); + auto beg = pos + strlen(boundary_keyword); + boundary = trim_double_quotes_copy(content_type.substr(beg, end - beg)); return !boundary.empty(); } +inline void parse_disposition_params(const std::string &s, Params ¶ms) { + std::set cache; + split(s.data(), s.data() + s.size(), ';', [&](const char *b, const char *e) { + std::string kv(b, e); + if (cache.find(kv) != cache.end()) { return; } + cache.insert(kv); + + std::string key; + std::string val; + split(b, e, '=', [&](const char *b2, const char *e2) { + if (key.empty()) { + key.assign(b2, e2); + } else { + val.assign(b2, e2); + } + }); + + if (!key.empty()) { + params.emplace(trim_double_quotes_copy((key)), + trim_double_quotes_copy((val))); + } + }); +} + +#ifdef CPPHTTPLIB_NO_EXCEPTIONS inline bool parse_range_header(const std::string &s, Ranges &ranges) { +#else +inline bool parse_range_header(const std::string &s, Ranges &ranges) try { +#endif static auto re_first_range = std::regex(R"(bytes=(\d*-\d*(?:,\s*\d*-\d*)*))"); std::smatch m; if (std::regex_match(s, m, re_first_range)) { auto pos = static_cast(m.position(1)); auto len = static_cast(m.length(1)); - bool all_valid_ranges = true; + auto all_valid_ranges = true; split(&s[pos], &s[pos + len], ',', [&](const char *b, const char *e) { - if (!all_valid_ranges) return; + if (!all_valid_ranges) { return; } static auto re_another_range = std::regex(R"(\s*(\d*)-(\d*))"); std::cmatch cm; if (std::regex_match(b, e, cm, re_another_range)) { @@ -2901,37 +4387,36 @@ inline bool parse_range_header(const std::string &s, Ranges &ranges) { return all_valid_ranges; } return false; +#ifdef CPPHTTPLIB_NO_EXCEPTIONS } +#else +} catch (...) { return false; } +#endif class MultipartFormDataParser { public: MultipartFormDataParser() = default; - void set_boundary(std::string &&boundary) { boundary_ = boundary; } + void set_boundary(std::string &&boundary) { + boundary_ = boundary; + dash_boundary_crlf_ = dash_ + boundary_ + crlf_; + crlf_dash_boundary_ = crlf_ + dash_ + boundary_; + } bool is_valid() const { return is_valid_; } - template - bool parse(const char *buf, size_t n, T content_callback, U header_callback) { + bool parse(const char *buf, size_t n, const ContentReceiver &content_callback, + const MultipartContentHeader &header_callback) { - static const std::regex re_content_disposition( - "^Content-Disposition:\\s*form-data;\\s*name=\"(.*?)\"(?:;\\s*filename=" - "\"(.*?)\")?\\s*$", - std::regex_constants::icase); - static const std::string dash_ = "--"; - static const std::string crlf_ = "\r\n"; + buf_append(buf, n); - buf_.append(buf, n); // TODO: performance improvement - - while (!buf_.empty()) { + while (buf_size() > 0) { switch (state_) { case 0: { // Initial boundary - auto pattern = dash_ + boundary_ + crlf_; - if (pattern.size() > buf_.size()) { return true; } - auto pos = buf_.find(pattern); - if (pos != 0) { return false; } - buf_.erase(0, pattern.size()); - off_ += pattern.size(); + buf_erase(buf_find(dash_boundary_crlf_)); + if (dash_boundary_crlf_.size() > buf_size()) { return true; } + if (!buf_start_with(dash_boundary_crlf_)) { return false; } + buf_erase(dash_boundary_crlf_.size()); state_ = 1; break; } @@ -2941,113 +4426,116 @@ public: break; } case 2: { // Headers - auto pos = buf_.find(crlf_); - while (pos != std::string::npos) { + auto pos = buf_find(crlf_); + if (pos > CPPHTTPLIB_HEADER_MAX_LENGTH) { return false; } + while (pos < buf_size()) { // Empty line if (pos == 0) { if (!header_callback(file_)) { is_valid_ = false; return false; } - buf_.erase(0, crlf_.size()); - off_ += crlf_.size(); + buf_erase(crlf_.size()); state_ = 3; break; } - static const std::string header_name = "content-type:"; - const auto header = buf_.substr(0, pos); - if (start_with(header, header_name)) { - file_.content_type = trim_copy(header.substr(header_name.size())); - } else { - std::smatch m; - if (std::regex_match(header, m, re_content_disposition)) { - file_.name = m[1]; - file_.filename = m[2]; - } + const auto header = buf_head(pos); + + if (!parse_header(header.data(), header.data() + header.size(), + [&](std::string &&, std::string &&) {})) { + is_valid_ = false; + return false; } - buf_.erase(0, pos + crlf_.size()); - off_ += pos + crlf_.size(); - pos = buf_.find(crlf_); + static const std::string header_content_type = "Content-Type:"; + + if (start_with_case_ignore(header, header_content_type)) { + file_.content_type = + trim_copy(header.substr(header_content_type.size())); + } else { + static const std::regex re_content_disposition( + R"~(^Content-Disposition:\s*form-data;\s*(.*)$)~", + std::regex_constants::icase); + + std::smatch m; + if (std::regex_match(header, m, re_content_disposition)) { + Params params; + parse_disposition_params(m[1], params); + + auto it = params.find("name"); + if (it != params.end()) { + file_.name = it->second; + } else { + is_valid_ = false; + return false; + } + + it = params.find("filename"); + if (it != params.end()) { file_.filename = it->second; } + + it = params.find("filename*"); + if (it != params.end()) { + // Only allow UTF-8 enconnding... + static const std::regex re_rfc5987_encoding( + R"~(^UTF-8''(.+?)$)~", std::regex_constants::icase); + + std::smatch m2; + if (std::regex_match(it->second, m2, re_rfc5987_encoding)) { + file_.filename = decode_url(m2[1], false); // override... + } else { + is_valid_ = false; + return false; + } + } + } + } + buf_erase(pos + crlf_.size()); + pos = buf_find(crlf_); } if (state_ != 3) { return true; } break; } case 3: { // Body - { - auto pattern = crlf_ + dash_; - if (pattern.size() > buf_.size()) { return true; } - - auto pos = buf_.find(pattern); - if (pos == std::string::npos) { - pos = buf_.size(); - while (pos > 0) { - auto c = buf_[pos - 1]; - if (c != '\r' && c != '\n' && c != '-') { break; } - pos--; - } - } - - if (!content_callback(buf_.data(), pos)) { + if (crlf_dash_boundary_.size() > buf_size()) { return true; } + auto pos = buf_find(crlf_dash_boundary_); + if (pos < buf_size()) { + if (!content_callback(buf_data(), pos)) { is_valid_ = false; return false; } - - off_ += pos; - buf_.erase(0, pos); - } - - { - auto pattern = crlf_ + dash_ + boundary_; - if (pattern.size() > buf_.size()) { return true; } - - auto pos = buf_.find(pattern); - if (pos != std::string::npos) { - if (!content_callback(buf_.data(), pos)) { + buf_erase(pos + crlf_dash_boundary_.size()); + state_ = 4; + } else { + auto len = buf_size() - crlf_dash_boundary_.size(); + if (len > 0) { + if (!content_callback(buf_data(), len)) { is_valid_ = false; return false; } - - off_ += pos + pattern.size(); - buf_.erase(0, pos + pattern.size()); - state_ = 4; - } else { - if (!content_callback(buf_.data(), pattern.size())) { - is_valid_ = false; - return false; - } - - off_ += pattern.size(); - buf_.erase(0, pattern.size()); + buf_erase(len); } + return true; } break; } case 4: { // Boundary - if (crlf_.size() > buf_.size()) { return true; } - if (buf_.compare(0, crlf_.size(), crlf_) == 0) { - buf_.erase(0, crlf_.size()); - off_ += crlf_.size(); + if (crlf_.size() > buf_size()) { return true; } + if (buf_start_with(crlf_)) { + buf_erase(crlf_.size()); state_ = 1; } else { - auto pattern = dash_ + crlf_; - if (pattern.size() > buf_.size()) { return true; } - if (buf_.compare(0, pattern.size(), pattern) == 0) { - buf_.erase(0, pattern.size()); - off_ += pattern.size(); + if (dash_.size() > buf_size()) { return true; } + if (buf_start_with(dash_)) { + buf_erase(dash_.size()); is_valid_ = true; - state_ = 5; + buf_erase(buf_size()); // Remove epilogue } else { return true; } } break; } - case 5: { // Done - is_valid_ = false; - return false; - } } } @@ -3061,13 +4549,91 @@ private: file_.content_type.clear(); } - std::string boundary_; + bool start_with_case_ignore(const std::string &a, + const std::string &b) const { + if (a.size() < b.size()) { return false; } + for (size_t i = 0; i < b.size(); i++) { + if (::tolower(a[i]) != ::tolower(b[i])) { return false; } + } + return true; + } + + const std::string dash_ = "--"; + const std::string crlf_ = "\r\n"; + std::string boundary_; + std::string dash_boundary_crlf_; + std::string crlf_dash_boundary_; - std::string buf_; size_t state_ = 0; bool is_valid_ = false; - size_t off_ = 0; MultipartFormData file_; + + // Buffer + bool start_with(const std::string &a, size_t spos, size_t epos, + const std::string &b) const { + if (epos - spos < b.size()) { return false; } + for (size_t i = 0; i < b.size(); i++) { + if (a[i + spos] != b[i]) { return false; } + } + return true; + } + + size_t buf_size() const { return buf_epos_ - buf_spos_; } + + const char *buf_data() const { return &buf_[buf_spos_]; } + + std::string buf_head(size_t l) const { return buf_.substr(buf_spos_, l); } + + bool buf_start_with(const std::string &s) const { + return start_with(buf_, buf_spos_, buf_epos_, s); + } + + size_t buf_find(const std::string &s) const { + auto c = s.front(); + + size_t off = buf_spos_; + while (off < buf_epos_) { + auto pos = off; + while (true) { + if (pos == buf_epos_) { return buf_size(); } + if (buf_[pos] == c) { break; } + pos++; + } + + auto remaining_size = buf_epos_ - pos; + if (s.size() > remaining_size) { return buf_size(); } + + if (start_with(buf_, pos, buf_epos_, s)) { return pos - buf_spos_; } + + off = pos + 1; + } + + return buf_size(); + } + + void buf_append(const char *data, size_t n) { + auto remaining_size = buf_size(); + if (remaining_size > 0 && buf_spos_ > 0) { + for (size_t i = 0; i < remaining_size; i++) { + buf_[i] = buf_[buf_spos_ + i]; + } + } + buf_spos_ = 0; + buf_epos_ = remaining_size; + + if (remaining_size + n > buf_.size()) { buf_.resize(remaining_size + n); } + + for (size_t i = 0; i < n; i++) { + buf_[buf_epos_ + i] = data[i]; + } + buf_epos_ += n; + } + + void buf_erase(size_t size) { buf_spos_ += size; } + + std::string buf_; + size_t buf_spos_ = 0; + size_t buf_epos_ = 0; }; inline std::string to_lower(const char *beg, const char *end) { @@ -3080,60 +4646,174 @@ inline std::string to_lower(const char *beg, const char *end) { return out; } -inline std::string make_multipart_data_boundary() { +inline std::string random_string(size_t length) { static const char data[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"; - std::random_device seed_gen; - std::mt19937 engine(seed_gen()); + // std::random_device might actually be deterministic on some + // platforms, but due to lack of support in the c++ standard library, + // doing better requires either some ugly hacks or breaking portability. + static std::random_device seed_gen; - std::string result = "--cpp-httplib-multipart-data-"; + // Request 128 bits of entropy for initialization + static std::seed_seq seed_sequence{seed_gen(), seed_gen(), seed_gen(), + seed_gen()}; - for (auto i = 0; i < 16; i++) { + static std::mt19937 engine(seed_sequence); + + std::string result; + for (size_t i = 0; i < length; i++) { result += data[engine() % (sizeof(data) - 1)]; } - return result; } -inline std::pair -get_range_offset_and_length(const Request &req, size_t content_length, - size_t index) { - auto r = req.ranges[index]; - - if (r.first == -1 && r.second == -1) { - return std::make_pair(0, content_length); - } - - auto slen = static_cast(content_length); - - if (r.first == -1) { - r.first = slen - r.second; - r.second = slen - 1; - } - - if (r.second == -1) { r.second = slen - 1; } - - return std::make_pair(r.first, r.second - r.first + 1); +inline std::string make_multipart_data_boundary() { + return "--cpp-httplib-multipart-data-" + detail::random_string(16); } -inline std::string make_content_range_header_field(size_t offset, size_t length, - size_t content_length) { +inline bool is_multipart_boundary_chars_valid(const std::string &boundary) { + auto valid = true; + for (size_t i = 0; i < boundary.size(); i++) { + auto c = boundary[i]; + if (!std::isalnum(c) && c != '-' && c != '_') { + valid = false; + break; + } + } + return valid; +} + +template +inline std::string +serialize_multipart_formdata_item_begin(const T &item, + const std::string &boundary) { + std::string body = "--" + boundary + "\r\n"; + body += "Content-Disposition: form-data; name=\"" + item.name + "\""; + if (!item.filename.empty()) { + body += "; filename=\"" + item.filename + "\""; + } + body += "\r\n"; + if (!item.content_type.empty()) { + body += "Content-Type: " + item.content_type + "\r\n"; + } + body += "\r\n"; + + return body; +} + +inline std::string serialize_multipart_formdata_item_end() { return "\r\n"; } + +inline std::string +serialize_multipart_formdata_finish(const std::string &boundary) { + return "--" + boundary + "--\r\n"; +} + +inline std::string +serialize_multipart_formdata_get_content_type(const std::string &boundary) { + return "multipart/form-data; boundary=" + boundary; +} + +inline std::string +serialize_multipart_formdata(const MultipartFormDataItems &items, + const std::string &boundary, bool finish = true) { + std::string body; + + for (const auto &item : items) { + body += serialize_multipart_formdata_item_begin(item, boundary); + body += item.content + serialize_multipart_formdata_item_end(); + } + + if (finish) { body += serialize_multipart_formdata_finish(boundary); } + + return body; +} + +inline bool range_error(Request &req, Response &res) { + if (!req.ranges.empty() && 200 <= res.status && res.status < 300) { + ssize_t contant_len = static_cast( + res.content_length_ ? res.content_length_ : res.body.size()); + + ssize_t prev_first_pos = -1; + ssize_t prev_last_pos = -1; + size_t overwrapping_count = 0; + + // NOTE: The following Range check is based on '14.2. Range' in RFC 9110 + // 'HTTP Semantics' to avoid potential denial-of-service attacks. + // https://www.rfc-editor.org/rfc/rfc9110#section-14.2 + + // Too many ranges + if (req.ranges.size() > CPPHTTPLIB_RANGE_MAX_COUNT) { return true; } + + for (auto &r : req.ranges) { + auto &first_pos = r.first; + auto &last_pos = r.second; + + if (first_pos == -1 && last_pos == -1) { + first_pos = 0; + last_pos = contant_len; + } + + if (first_pos == -1) { + first_pos = contant_len - last_pos; + last_pos = contant_len - 1; + } + + if (last_pos == -1) { last_pos = contant_len - 1; } + + // Range must be within content length + if (!(0 <= first_pos && first_pos <= last_pos && + last_pos <= contant_len - 1)) { + return true; + } + + // Ranges must be in ascending order + if (first_pos <= prev_first_pos) { return true; } + + // Request must not have more than two overlapping ranges + if (first_pos <= prev_last_pos) { + overwrapping_count++; + if (overwrapping_count > 2) { return true; } + } + + prev_first_pos = (std::max)(prev_first_pos, first_pos); + prev_last_pos = (std::max)(prev_last_pos, last_pos); + } + } + + return false; +} + +inline std::pair +get_range_offset_and_length(Range r, size_t content_length) { + assert(r.first != -1 && r.second != -1); + assert(0 <= r.first && r.first < static_cast(content_length)); + assert(r.first <= r.second && + r.second < static_cast(content_length)); + + return std::make_pair(r.first, static_cast(r.second - r.first) + 1); +} + +inline std::string make_content_range_header_field( + const std::pair &offset_and_length, size_t content_length) { + auto st = offset_and_length.first; + auto ed = st + offset_and_length.second - 1; + std::string field = "bytes "; - field += std::to_string(offset); + field += std::to_string(st); field += "-"; - field += std::to_string(offset + length - 1); + field += std::to_string(ed); field += "/"; field += std::to_string(content_length); return field; } template -bool process_multipart_ranges_data(const Request &req, Response &res, +bool process_multipart_ranges_data(const Request &req, const std::string &boundary, const std::string &content_type, - SToken stoken, CToken ctoken, - Content content) { + size_t content_length, SToken stoken, + CToken ctoken, Content content) { for (size_t i = 0; i < req.ranges.size(); i++) { ctoken("--"); stoken(boundary); @@ -3144,52 +4824,53 @@ bool process_multipart_ranges_data(const Request &req, Response &res, ctoken("\r\n"); } - auto offsets = get_range_offset_and_length(req, res.body.size(), i); - auto offset = offsets.first; - auto length = offsets.second; + auto offset_and_length = + get_range_offset_and_length(req.ranges[i], content_length); ctoken("Content-Range: "); - stoken(make_content_range_header_field(offset, length, res.body.size())); + stoken(make_content_range_header_field(offset_and_length, content_length)); ctoken("\r\n"); ctoken("\r\n"); - if (!content(offset, length)) { return false; } + + if (!content(offset_and_length.first, offset_and_length.second)) { + return false; + } ctoken("\r\n"); } ctoken("--"); stoken(boundary); - ctoken("--\r\n"); + ctoken("--"); return true; } -inline std::string make_multipart_ranges_data(const Request &req, Response &res, - const std::string &boundary, - const std::string &content_type) { - std::string data; - +inline void make_multipart_ranges_data(const Request &req, Response &res, + const std::string &boundary, + const std::string &content_type, + size_t content_length, + std::string &data) { process_multipart_ranges_data( - req, res, boundary, content_type, + req, boundary, content_type, content_length, + [&](const std::string &token) { data += token; }, [&](const std::string &token) { data += token; }, - [&](const char *token) { data += token; }, [&](size_t offset, size_t length) { + assert(offset + length <= content_length); data += res.body.substr(offset, length); return true; }); - - return data; } -inline size_t -get_multipart_ranges_data_length(const Request &req, Response &res, - const std::string &boundary, - const std::string &content_type) { +inline size_t get_multipart_ranges_data_length(const Request &req, + const std::string &boundary, + const std::string &content_type, + size_t content_length) { size_t data_length = 0; process_multipart_ranges_data( - req, res, boundary, content_type, + req, boundary, content_type, content_length, + [&](const std::string &token) { data_length += token.size(); }, [&](const std::string &token) { data_length += token.size(); }, - [&](const char *token) { data_length += strlen(token); }, [&](size_t /*offset*/, size_t length) { data_length += length; return true; @@ -3199,33 +4880,21 @@ get_multipart_ranges_data_length(const Request &req, Response &res, } template -inline bool write_multipart_ranges_data(Stream &strm, const Request &req, - Response &res, - const std::string &boundary, - const std::string &content_type, - T is_shutting_down) { +inline bool +write_multipart_ranges_data(Stream &strm, const Request &req, Response &res, + const std::string &boundary, + const std::string &content_type, + size_t content_length, const T &is_shutting_down) { return process_multipart_ranges_data( - req, res, boundary, content_type, + req, boundary, content_type, content_length, + [&](const std::string &token) { strm.write(token); }, [&](const std::string &token) { strm.write(token); }, - [&](const char *token) { strm.write(token); }, [&](size_t offset, size_t length) { return write_content(strm, res.content_provider_, offset, length, - is_shutting_down) >= 0; + is_shutting_down); }); } -inline std::pair -get_range_offset_and_length(const Request &req, const Response &res, - size_t index) { - auto r = req.ranges[index]; - - if (r.second == -1) { - r.second = static_cast(res.content_length_) - 1; - } - - return std::make_pair(r.first, r.second - r.first + 1); -} - inline bool expect_content(const Request &req) { if (req.method == "POST" || req.method == "PUT" || req.method == "PATCH" || req.method == "PRI" || req.method == "DELETE") { @@ -3235,8 +4904,8 @@ inline bool expect_content(const Request &req) { return false; } -inline bool has_crlf(const char *s) { - auto p = s; +inline bool has_crlf(const std::string &s) { + auto p = s.c_str(); while (*p) { if (*p == '\r' || *p == '\n') { return true; } p++; @@ -3245,52 +4914,51 @@ inline bool has_crlf(const char *s) { } #ifdef CPPHTTPLIB_OPENSSL_SUPPORT -template -inline std::string message_digest(const std::string &s, Init init, - Update update, Final final, - size_t digest_length) { - using namespace std; +inline std::string message_digest(const std::string &s, const EVP_MD *algo) { + auto context = std::unique_ptr( + EVP_MD_CTX_new(), EVP_MD_CTX_free); - std::vector md(digest_length, 0); - CTX ctx; - init(&ctx); - update(&ctx, s.data(), s.size()); - final(md.data(), &ctx); + unsigned int hash_length = 0; + unsigned char hash[EVP_MAX_MD_SIZE]; - stringstream ss; - for (auto c : md) { - ss << setfill('0') << setw(2) << hex << (unsigned int)c; + EVP_DigestInit_ex(context.get(), algo, nullptr); + EVP_DigestUpdate(context.get(), s.c_str(), s.size()); + EVP_DigestFinal_ex(context.get(), hash, &hash_length); + + std::stringstream ss; + for (auto i = 0u; i < hash_length; ++i) { + ss << std::hex << std::setw(2) << std::setfill('0') + << static_cast(hash[i]); } + return ss.str(); } inline std::string MD5(const std::string &s) { - return message_digest(s, MD5_Init, MD5_Update, MD5_Final, - MD5_DIGEST_LENGTH); + return message_digest(s, EVP_md5()); } inline std::string SHA_256(const std::string &s) { - return message_digest(s, SHA256_Init, SHA256_Update, SHA256_Final, - SHA256_DIGEST_LENGTH); + return message_digest(s, EVP_sha256()); } inline std::string SHA_512(const std::string &s) { - return message_digest(s, SHA512_Init, SHA512_Update, SHA512_Final, - SHA512_DIGEST_LENGTH); + return message_digest(s, EVP_sha512()); } #endif -#ifdef _WIN32 #ifdef CPPHTTPLIB_OPENSSL_SUPPORT +#ifdef _WIN32 // NOTE: This code came up with the following stackoverflow post: // https://stackoverflow.com/questions/9507184/can-openssl-on-windows-use-the-system-certificate-store inline bool load_system_certs_on_windows(X509_STORE *store) { auto hStore = CertOpenSystemStoreW((HCRYPTPROV_LEGACY)NULL, L"ROOT"); - if (!hStore) { return false; } + auto result = false; PCCERT_CONTEXT pContext = NULL; - while (pContext = CertEnumCertificatesInStore(hStore, pContext)) { + while ((pContext = CertEnumCertificatesInStore(hStore, pContext)) != + nullptr) { auto encoded_cert = static_cast(pContext->pbCertEncoded); @@ -3298,24 +4966,121 @@ inline bool load_system_certs_on_windows(X509_STORE *store) { if (x509) { X509_STORE_add_cert(store, x509); X509_free(x509); + result = true; } } CertFreeCertificateContext(pContext); CertCloseStore(hStore, 0); + return result; +} +#elif defined(CPPHTTPLIB_USE_CERTS_FROM_MACOSX_KEYCHAIN) && defined(__APPLE__) +#if TARGET_OS_OSX +template +using CFObjectPtr = + std::unique_ptr::type, void (*)(CFTypeRef)>; + +inline void cf_object_ptr_deleter(CFTypeRef obj) { + if (obj) { CFRelease(obj); } +} + +inline bool retrieve_certs_from_keychain(CFObjectPtr &certs) { + CFStringRef keys[] = {kSecClass, kSecMatchLimit, kSecReturnRef}; + CFTypeRef values[] = {kSecClassCertificate, kSecMatchLimitAll, + kCFBooleanTrue}; + + CFObjectPtr query( + CFDictionaryCreate(nullptr, reinterpret_cast(keys), values, + sizeof(keys) / sizeof(keys[0]), + &kCFTypeDictionaryKeyCallBacks, + &kCFTypeDictionaryValueCallBacks), + cf_object_ptr_deleter); + + if (!query) { return false; } + + CFTypeRef security_items = nullptr; + if (SecItemCopyMatching(query.get(), &security_items) != errSecSuccess || + CFArrayGetTypeID() != CFGetTypeID(security_items)) { + return false; + } + + certs.reset(reinterpret_cast(security_items)); return true; } -#endif +inline bool retrieve_root_certs_from_keychain(CFObjectPtr &certs) { + CFArrayRef root_security_items = nullptr; + if (SecTrustCopyAnchorCertificates(&root_security_items) != errSecSuccess) { + return false; + } + + certs.reset(root_security_items); + return true; +} + +inline bool add_certs_to_x509_store(CFArrayRef certs, X509_STORE *store) { + auto result = false; + for (auto i = 0; i < CFArrayGetCount(certs); ++i) { + const auto cert = reinterpret_cast( + CFArrayGetValueAtIndex(certs, i)); + + if (SecCertificateGetTypeID() != CFGetTypeID(cert)) { continue; } + + CFDataRef cert_data = nullptr; + if (SecItemExport(cert, kSecFormatX509Cert, 0, nullptr, &cert_data) != + errSecSuccess) { + continue; + } + + CFObjectPtr cert_data_ptr(cert_data, cf_object_ptr_deleter); + + auto encoded_cert = static_cast( + CFDataGetBytePtr(cert_data_ptr.get())); + + auto x509 = + d2i_X509(NULL, &encoded_cert, CFDataGetLength(cert_data_ptr.get())); + + if (x509) { + X509_STORE_add_cert(store, x509); + X509_free(x509); + result = true; + } + } + + return result; +} + +inline bool load_system_certs_on_macos(X509_STORE *store) { + auto result = false; + CFObjectPtr certs(nullptr, cf_object_ptr_deleter); + if (retrieve_certs_from_keychain(certs) && certs) { + result = add_certs_to_x509_store(certs.get(), store); + } + + if (retrieve_root_certs_from_keychain(certs) && certs) { + result = add_certs_to_x509_store(certs.get(), store) || result; + } + + return result; +} +#endif // TARGET_OS_OSX +#endif // _WIN32 +#endif // CPPHTTPLIB_OPENSSL_SUPPORT + +#ifdef _WIN32 class WSInit { public: WSInit() { WSADATA wsaData; - WSAStartup(0x0002, &wsaData); + if (WSAStartup(0x0002, &wsaData) == 0) is_valid_ = true; } - ~WSInit() { WSACleanup(); } + ~WSInit() { + if (is_valid_) WSACleanup(); + } + + bool is_valid_ = false; }; static WSInit wsinit_; @@ -3326,45 +5091,57 @@ inline std::pair make_digest_authentication_header( const Request &req, const std::map &auth, size_t cnonce_count, const std::string &cnonce, const std::string &username, const std::string &password, bool is_proxy = false) { - using namespace std; - - string nc; + std::string nc; { - stringstream ss; - ss << setfill('0') << setw(8) << hex << cnonce_count; + std::stringstream ss; + ss << std::setfill('0') << std::setw(8) << std::hex << cnonce_count; nc = ss.str(); } - auto qop = auth.at("qop"); - if (qop.find("auth-int") != std::string::npos) { - qop = "auth-int"; - } else { - qop = "auth"; + std::string qop; + if (auth.find("qop") != auth.end()) { + qop = auth.at("qop"); + if (qop.find("auth-int") != std::string::npos) { + qop = "auth-int"; + } else if (qop.find("auth") != std::string::npos) { + qop = "auth"; + } else { + qop.clear(); + } } std::string algo = "MD5"; if (auth.find("algorithm") != auth.end()) { algo = auth.at("algorithm"); } - string response; + std::string response; { - auto H = algo == "SHA-256" - ? detail::SHA_256 - : algo == "SHA-512" ? detail::SHA_512 : detail::MD5; + auto H = algo == "SHA-256" ? detail::SHA_256 + : algo == "SHA-512" ? detail::SHA_512 + : detail::MD5; auto A1 = username + ":" + auth.at("realm") + ":" + password; auto A2 = req.method + ":" + req.path; if (qop == "auth-int") { A2 += ":" + H(req.body); } - response = H(H(A1) + ":" + auth.at("nonce") + ":" + nc + ":" + cnonce + - ":" + qop + ":" + H(A2)); + if (qop.empty()) { + response = H(H(A1) + ":" + auth.at("nonce") + ":" + H(A2)); + } else { + response = H(H(A1) + ":" + auth.at("nonce") + ":" + nc + ":" + cnonce + + ":" + qop + ":" + H(A2)); + } } + auto opaque = (auth.find("opaque") != auth.end()) ? auth.at("opaque") : ""; + auto field = "Digest username=\"" + username + "\", realm=\"" + auth.at("realm") + "\", nonce=\"" + auth.at("nonce") + "\", uri=\"" + req.path + "\", algorithm=" + algo + - ", qop=" + qop + ", nc=\"" + nc + "\", cnonce=\"" + cnonce + - "\", response=\"" + response + "\""; + (qop.empty() ? ", response=\"" + : ", qop=" + qop + ", nc=" + nc + ", cnonce=\"" + + cnonce + "\", response=\"") + + response + "\"" + + (opaque.empty() ? "" : ", opaque=\"" + opaque + "\""); auto key = is_proxy ? "Proxy-Authorization" : "Authorization"; return std::make_pair(key, field); @@ -3387,7 +5164,7 @@ inline bool parse_www_authenticate(const Response &res, s = s.substr(pos + 1); auto beg = std::sregex_iterator(s.begin(), s.end(), re); for (auto i = beg; i != std::sregex_iterator(); ++i) { - auto m = *i; + const auto &m = *i; auto key = s.substr(static_cast(m.position(1)), static_cast(m.length(1))); auto val = m.length(2) > 0 @@ -3404,20 +5181,6 @@ inline bool parse_www_authenticate(const Response &res, return false; } -// https://stackoverflow.com/questions/440133/how-do-i-create-a-random-alpha-numeric-string-in-c/440240#answer-440240 -inline std::string random_string(size_t length) { - auto randchar = []() -> char { - const char charset[] = "0123456789" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz"; - const size_t max_index = (sizeof(charset) - 1); - return charset[static_cast(rand()) % max_index]; - }; - std::string str(length, 0); - std::generate_n(str.begin(), length, randchar); - return str; -} - class ContentProviderAdapter { public: explicit ContentProviderAdapter( @@ -3434,27 +5197,74 @@ private: } // namespace detail +inline std::string hosted_at(const std::string &hostname) { + std::vector addrs; + hosted_at(hostname, addrs); + if (addrs.empty()) { return std::string(); } + return addrs[0]; +} + +inline void hosted_at(const std::string &hostname, + std::vector &addrs) { + struct addrinfo hints; + struct addrinfo *result; + + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + hints.ai_protocol = 0; + + if (getaddrinfo(hostname.c_str(), nullptr, &hints, &result)) { +#if defined __linux__ && !defined __ANDROID__ + res_init(); +#endif + return; + } + + for (auto rp = result; rp; rp = rp->ai_next) { + const auto &addr = + *reinterpret_cast(rp->ai_addr); + std::string ip; + auto dummy = -1; + if (detail::get_ip_and_port(addr, sizeof(struct sockaddr_storage), ip, + dummy)) { + addrs.push_back(ip); + } + } + + freeaddrinfo(result); +} + +inline std::string append_query_params(const std::string &path, + const Params ¶ms) { + std::string path_with_query = path; + const static std::regex re("[^?]+\\?.*"); + auto delm = std::regex_match(path, re) ? '&' : '?'; + path_with_query += delm + detail::params_to_query_str(params); + return path_with_query; +} + // Header utilities -inline std::pair make_range_header(Ranges ranges) { +inline std::pair +make_range_header(const Ranges &ranges) { std::string field = "bytes="; auto i = 0; - for (auto r : ranges) { + for (const auto &r : ranges) { if (i != 0) { field += ", "; } if (r.first != -1) { field += std::to_string(r.first); } field += '-'; if (r.second != -1) { field += std::to_string(r.second); } i++; } - return std::make_pair("Range", field); + return std::make_pair("Range", std::move(field)); } inline std::pair make_basic_authentication_header(const std::string &username, - const std::string &password, - bool is_proxy = false) { + const std::string &password, bool is_proxy) { auto field = "Basic " + detail::base64_encode(username + ":" + password); auto key = is_proxy ? "Proxy-Authorization" : "Authorization"; - return std::make_pair(key, field); + return std::make_pair(key, std::move(field)); } inline std::pair @@ -3462,45 +5272,37 @@ make_bearer_token_authentication_header(const std::string &token, bool is_proxy = false) { auto field = "Bearer " + token; auto key = is_proxy ? "Proxy-Authorization" : "Authorization"; - return std::make_pair(key, field); + return std::make_pair(key, std::move(field)); } // Request implementation -inline bool Request::has_header(const char *key) const { +inline bool Request::has_header(const std::string &key) const { return detail::has_header(headers, key); } -inline std::string Request::get_header_value(const char *key, size_t id) const { +inline std::string Request::get_header_value(const std::string &key, + size_t id) const { return detail::get_header_value(headers, key, id, ""); } -template -inline T Request::get_header_value(const char *key, size_t id) const { - return detail::get_header_value(headers, key, id, 0); -} - -inline size_t Request::get_header_value_count(const char *key) const { +inline size_t Request::get_header_value_count(const std::string &key) const { auto r = headers.equal_range(key); return static_cast(std::distance(r.first, r.second)); } -inline void Request::set_header(const char *key, const char *val) { +inline void Request::set_header(const std::string &key, + const std::string &val) { if (!detail::has_crlf(key) && !detail::has_crlf(val)) { headers.emplace(key, val); } } -inline void Request::set_header(const char *key, const std::string &val) { - if (!detail::has_crlf(key) && !detail::has_crlf(val.c_str())) { - headers.emplace(key, val); - } -} - -inline bool Request::has_param(const char *key) const { +inline bool Request::has_param(const std::string &key) const { return params.find(key) != params.end(); } -inline std::string Request::get_param_value(const char *key, size_t id) const { +inline std::string Request::get_param_value(const std::string &key, + size_t id) const { auto rng = params.equal_range(key); auto it = rng.first; std::advance(it, static_cast(id)); @@ -3508,118 +5310,139 @@ inline std::string Request::get_param_value(const char *key, size_t id) const { return std::string(); } -inline size_t Request::get_param_value_count(const char *key) const { +inline size_t Request::get_param_value_count(const std::string &key) const { auto r = params.equal_range(key); return static_cast(std::distance(r.first, r.second)); } inline bool Request::is_multipart_form_data() const { const auto &content_type = get_header_value("Content-Type"); - return !content_type.find("multipart/form-data"); + return !content_type.rfind("multipart/form-data", 0); } -inline bool Request::has_file(const char *key) const { +inline bool Request::has_file(const std::string &key) const { return files.find(key) != files.end(); } -inline MultipartFormData Request::get_file_value(const char *key) const { +inline MultipartFormData Request::get_file_value(const std::string &key) const { auto it = files.find(key); if (it != files.end()) { return it->second; } return MultipartFormData(); } +inline std::vector +Request::get_file_values(const std::string &key) const { + std::vector values; + auto rng = files.equal_range(key); + for (auto it = rng.first; it != rng.second; it++) { + values.push_back(it->second); + } + return values; +} + // Response implementation -inline bool Response::has_header(const char *key) const { +inline bool Response::has_header(const std::string &key) const { return headers.find(key) != headers.end(); } -inline std::string Response::get_header_value(const char *key, +inline std::string Response::get_header_value(const std::string &key, size_t id) const { return detail::get_header_value(headers, key, id, ""); } -template -inline T Response::get_header_value(const char *key, size_t id) const { - return detail::get_header_value(headers, key, id, 0); -} - -inline size_t Response::get_header_value_count(const char *key) const { +inline size_t Response::get_header_value_count(const std::string &key) const { auto r = headers.equal_range(key); return static_cast(std::distance(r.first, r.second)); } -inline void Response::set_header(const char *key, const char *val) { +inline void Response::set_header(const std::string &key, + const std::string &val) { if (!detail::has_crlf(key) && !detail::has_crlf(val)) { headers.emplace(key, val); } } -inline void Response::set_header(const char *key, const std::string &val) { - if (!detail::has_crlf(key) && !detail::has_crlf(val.c_str())) { - headers.emplace(key, val); - } -} - -inline void Response::set_redirect(const char *url, int stat) { +inline void Response::set_redirect(const std::string &url, int stat) { if (!detail::has_crlf(url)) { set_header("Location", url); if (300 <= stat && stat < 400) { this->status = stat; } else { - this->status = 302; + this->status = StatusCode::Found_302; } } } -inline void Response::set_redirect(const std::string &url, int stat) { - set_redirect(url.c_str(), stat); -} - inline void Response::set_content(const char *s, size_t n, - const char *content_type) { + const std::string &content_type) { body.assign(s, n); + + auto rng = headers.equal_range("Content-Type"); + headers.erase(rng.first, rng.second); set_header("Content-Type", content_type); } -inline void Response::set_content(std::string s, const char *content_type) { +inline void Response::set_content(const std::string &s, + const std::string &content_type) { + set_content(s.data(), s.size(), content_type); +} + +inline void Response::set_content(std::string &&s, + const std::string &content_type) { body = std::move(s); + + auto rng = headers.equal_range("Content-Type"); + headers.erase(rng.first, rng.second); set_header("Content-Type", content_type); } -inline void -Response::set_content_provider(size_t in_length, const char *content_type, - ContentProvider provider, - const std::function &resource_releaser) { - assert(in_length > 0); +inline void Response::set_content_provider( + size_t in_length, const std::string &content_type, ContentProvider provider, + ContentProviderResourceReleaser resource_releaser) { set_header("Content-Type", content_type); content_length_ = in_length; - content_provider_ = std::move(provider); - content_provider_resource_releaser_ = resource_releaser; - is_chunked_content_provider = false; + if (in_length > 0) { content_provider_ = std::move(provider); } + content_provider_resource_releaser_ = std::move(resource_releaser); + is_chunked_content_provider_ = false; } -inline void -Response::set_content_provider(const char *content_type, - ContentProviderWithoutLength provider, - const std::function &resource_releaser) { +inline void Response::set_content_provider( + const std::string &content_type, ContentProviderWithoutLength provider, + ContentProviderResourceReleaser resource_releaser) { set_header("Content-Type", content_type); content_length_ = 0; content_provider_ = detail::ContentProviderAdapter(std::move(provider)); - content_provider_resource_releaser_ = resource_releaser; - is_chunked_content_provider = false; + content_provider_resource_releaser_ = std::move(resource_releaser); + is_chunked_content_provider_ = false; } inline void Response::set_chunked_content_provider( - const char *content_type, ContentProviderWithoutLength provider, - const std::function &resource_releaser) { + const std::string &content_type, ContentProviderWithoutLength provider, + ContentProviderResourceReleaser resource_releaser) { set_header("Content-Type", content_type); content_length_ = 0; content_provider_ = detail::ContentProviderAdapter(std::move(provider)); - content_provider_resource_releaser_ = resource_releaser; - is_chunked_content_provider = true; + content_provider_resource_releaser_ = std::move(resource_releaser); + is_chunked_content_provider_ = true; } -// Rstream implementation +// Result implementation +inline bool Result::has_request_header(const std::string &key) const { + return request_headers_.find(key) != request_headers_.end(); +} + +inline std::string Result::get_request_header_value(const std::string &key, + size_t id) const { + return detail::get_header_value(request_headers_, key, id, ""); +} + +inline size_t +Result::get_request_header_value_count(const std::string &key) const { + auto r = request_headers_.equal_range(key); + return static_cast(std::distance(r.first, r.second)); +} + +// Stream implementation inline ssize_t Stream::write(const char *ptr) { return write(ptr, strlen(ptr)); } @@ -3628,40 +5451,6 @@ inline ssize_t Stream::write(const std::string &s) { return write(s.data(), s.size()); } -template -inline ssize_t Stream::write_format(const char *fmt, const Args &... args) { - const auto bufsiz = 2048; - std::array buf; - -#if defined(_MSC_VER) && _MSC_VER < 1900 - auto sn = _snprintf_s(buf.data(), bufsiz - 1, buf.size() - 1, fmt, args...); -#else - auto sn = snprintf(buf.data(), buf.size() - 1, fmt, args...); -#endif - if (sn <= 0) { return sn; } - - auto n = static_cast(sn); - - if (n >= buf.size() - 1) { - std::vector glowable_buf(buf.size()); - - while (n >= glowable_buf.size() - 1) { - glowable_buf.resize(glowable_buf.size() * 2); -#if defined(_MSC_VER) && _MSC_VER < 1900 - n = static_cast(_snprintf_s(&glowable_buf[0], glowable_buf.size(), - glowable_buf.size() - 1, fmt, - args...)); -#else - n = static_cast( - snprintf(&glowable_buf[0], glowable_buf.size() - 1, fmt, args...)); -#endif - } - return write(&glowable_buf[0], n); - } else { - return write(buf.data(), n); - } -} - namespace detail { // Socket stream implementation @@ -3672,42 +5461,74 @@ inline SocketStream::SocketStream(socket_t sock, time_t read_timeout_sec, : sock_(sock), read_timeout_sec_(read_timeout_sec), read_timeout_usec_(read_timeout_usec), write_timeout_sec_(write_timeout_sec), - write_timeout_usec_(write_timeout_usec) {} + write_timeout_usec_(write_timeout_usec), read_buff_(read_buff_size_, 0) {} -inline SocketStream::~SocketStream() {} +inline SocketStream::~SocketStream() = default; inline bool SocketStream::is_readable() const { return select_read(sock_, read_timeout_sec_, read_timeout_usec_) > 0; } inline bool SocketStream::is_writable() const { - return select_write(sock_, write_timeout_sec_, write_timeout_usec_) > 0; + return select_write(sock_, write_timeout_sec_, write_timeout_usec_) > 0 && + is_socket_alive(sock_); } inline ssize_t SocketStream::read(char *ptr, size_t size) { +#ifdef _WIN32 + size = + (std::min)(size, static_cast((std::numeric_limits::max)())); +#else + size = (std::min)(size, + static_cast((std::numeric_limits::max)())); +#endif + + if (read_buff_off_ < read_buff_content_size_) { + auto remaining_size = read_buff_content_size_ - read_buff_off_; + if (size <= remaining_size) { + memcpy(ptr, read_buff_.data() + read_buff_off_, size); + read_buff_off_ += size; + return static_cast(size); + } else { + memcpy(ptr, read_buff_.data() + read_buff_off_, remaining_size); + read_buff_off_ += remaining_size; + return static_cast(remaining_size); + } + } + if (!is_readable()) { return -1; } -#ifdef _WIN32 - if (size > static_cast((std::numeric_limits::max)())) { - return -1; + read_buff_off_ = 0; + read_buff_content_size_ = 0; + + if (size < read_buff_size_) { + auto n = read_socket(sock_, read_buff_.data(), read_buff_size_, + CPPHTTPLIB_RECV_FLAGS); + if (n <= 0) { + return n; + } else if (n <= static_cast(size)) { + memcpy(ptr, read_buff_.data(), static_cast(n)); + return n; + } else { + memcpy(ptr, read_buff_.data(), size); + read_buff_off_ = size; + read_buff_content_size_ = static_cast(n); + return static_cast(size); + } + } else { + return read_socket(sock_, ptr, size, CPPHTTPLIB_RECV_FLAGS); } - return recv(sock_, ptr, static_cast(size), 0); -#else - return handle_EINTR([&]() { return recv(sock_, ptr, size, 0); }); -#endif } inline ssize_t SocketStream::write(const char *ptr, size_t size) { if (!is_writable()) { return -1; } -#ifdef _WIN32 - if (size > static_cast((std::numeric_limits::max)())) { - return -1; - } - return send(sock_, ptr, static_cast(size), 0); -#else - return handle_EINTR([&]() { return send(sock_, ptr, size, 0); }); +#if defined(_WIN32) && !defined(_WIN64) + size = + (std::min)(size, static_cast((std::numeric_limits::max)())); #endif + + return send_socket(sock_, ptr, size, CPPHTTPLIB_SEND_FLAGS); } inline void SocketStream::get_remote_ip_and_port(std::string &ip, @@ -3715,13 +5536,20 @@ inline void SocketStream::get_remote_ip_and_port(std::string &ip, return detail::get_remote_ip_and_port(sock_, ip, port); } +inline void SocketStream::get_local_ip_and_port(std::string &ip, + int &port) const { + return detail::get_local_ip_and_port(sock_, ip, port); +} + +inline socket_t SocketStream::socket() const { return sock_; } + // Buffer stream implementation inline bool BufferStream::is_readable() const { return true; } inline bool BufferStream::is_writable() const { return true; } inline ssize_t BufferStream::read(char *ptr, size_t size) { -#if defined(_MSC_VER) && _MSC_VER <= 1900 +#if defined(_MSC_VER) && _MSC_VER < 1910 auto len_read = buffer._Copy_s(ptr, size, size, position); #else auto len_read = buffer.copy(ptr, size, position); @@ -3738,98 +5566,206 @@ inline ssize_t BufferStream::write(const char *ptr, size_t size) { inline void BufferStream::get_remote_ip_and_port(std::string & /*ip*/, int & /*port*/) const {} +inline void BufferStream::get_local_ip_and_port(std::string & /*ip*/, + int & /*port*/) const {} + +inline socket_t BufferStream::socket() const { return 0; } + inline const std::string &BufferStream::get_buffer() const { return buffer; } +inline PathParamsMatcher::PathParamsMatcher(const std::string &pattern) { + // One past the last ending position of a path param substring + std::size_t last_param_end = 0; + +#ifndef CPPHTTPLIB_NO_EXCEPTIONS + // Needed to ensure that parameter names are unique during matcher + // construction + // If exceptions are disabled, only last duplicate path + // parameter will be set + std::unordered_set param_name_set; +#endif + + while (true) { + const auto marker_pos = pattern.find(marker, last_param_end); + if (marker_pos == std::string::npos) { break; } + + static_fragments_.push_back( + pattern.substr(last_param_end, marker_pos - last_param_end)); + + const auto param_name_start = marker_pos + 1; + + auto sep_pos = pattern.find(separator, param_name_start); + if (sep_pos == std::string::npos) { sep_pos = pattern.length(); } + + auto param_name = + pattern.substr(param_name_start, sep_pos - param_name_start); + +#ifndef CPPHTTPLIB_NO_EXCEPTIONS + if (param_name_set.find(param_name) != param_name_set.cend()) { + std::string msg = "Encountered path parameter '" + param_name + + "' multiple times in route pattern '" + pattern + "'."; + throw std::invalid_argument(msg); + } +#endif + + param_names_.push_back(std::move(param_name)); + + last_param_end = sep_pos + 1; + } + + if (last_param_end < pattern.length()) { + static_fragments_.push_back(pattern.substr(last_param_end)); + } +} + +inline bool PathParamsMatcher::match(Request &request) const { + request.matches = std::smatch(); + request.path_params.clear(); + request.path_params.reserve(param_names_.size()); + + // One past the position at which the path matched the pattern last time + std::size_t starting_pos = 0; + for (size_t i = 0; i < static_fragments_.size(); ++i) { + const auto &fragment = static_fragments_[i]; + + if (starting_pos + fragment.length() > request.path.length()) { + return false; + } + + // Avoid unnecessary allocation by using strncmp instead of substr + + // comparison + if (std::strncmp(request.path.c_str() + starting_pos, fragment.c_str(), + fragment.length()) != 0) { + return false; + } + + starting_pos += fragment.length(); + + // Should only happen when we have a static fragment after a param + // Example: '/users/:id/subscriptions' + // The 'subscriptions' fragment here does not have a corresponding param + if (i >= param_names_.size()) { continue; } + + auto sep_pos = request.path.find(separator, starting_pos); + if (sep_pos == std::string::npos) { sep_pos = request.path.length(); } + + const auto ¶m_name = param_names_[i]; + + request.path_params.emplace( + param_name, request.path.substr(starting_pos, sep_pos - starting_pos)); + + // Mark everythin up to '/' as matched + starting_pos = sep_pos + 1; + } + // Returns false if the path is longer than the pattern + return starting_pos >= request.path.length(); +} + +inline bool RegexMatcher::match(Request &request) const { + request.path_params.clear(); + return std::regex_match(request.path, request.matches, regex_); +} + } // namespace detail // HTTP server implementation inline Server::Server() : new_task_queue( - [] { return new ThreadPool(CPPHTTPLIB_THREAD_POOL_COUNT); }), - svr_sock_(INVALID_SOCKET), is_running_(false) { + [] { return new ThreadPool(CPPHTTPLIB_THREAD_POOL_COUNT); }) { #ifndef _WIN32 signal(SIGPIPE, SIG_IGN); #endif } -inline Server::~Server() {} +inline Server::~Server() = default; -inline Server &Server::Get(const char *pattern, Handler handler) { - get_handlers_.push_back(std::make_pair(std::regex(pattern), handler)); +inline std::unique_ptr +Server::make_matcher(const std::string &pattern) { + if (pattern.find("/:") != std::string::npos) { + return detail::make_unique(pattern); + } else { + return detail::make_unique(pattern); + } +} + +inline Server &Server::Get(const std::string &pattern, Handler handler) { + get_handlers_.emplace_back(make_matcher(pattern), std::move(handler)); return *this; } -inline Server &Server::Post(const char *pattern, Handler handler) { - post_handlers_.push_back(std::make_pair(std::regex(pattern), handler)); +inline Server &Server::Post(const std::string &pattern, Handler handler) { + post_handlers_.emplace_back(make_matcher(pattern), std::move(handler)); return *this; } -inline Server &Server::Post(const char *pattern, +inline Server &Server::Post(const std::string &pattern, HandlerWithContentReader handler) { - post_handlers_for_content_reader_.push_back( - std::make_pair(std::regex(pattern), handler)); + post_handlers_for_content_reader_.emplace_back(make_matcher(pattern), + std::move(handler)); return *this; } -inline Server &Server::Put(const char *pattern, Handler handler) { - put_handlers_.push_back(std::make_pair(std::regex(pattern), handler)); +inline Server &Server::Put(const std::string &pattern, Handler handler) { + put_handlers_.emplace_back(make_matcher(pattern), std::move(handler)); return *this; } -inline Server &Server::Put(const char *pattern, +inline Server &Server::Put(const std::string &pattern, HandlerWithContentReader handler) { - put_handlers_for_content_reader_.push_back( - std::make_pair(std::regex(pattern), handler)); + put_handlers_for_content_reader_.emplace_back(make_matcher(pattern), + std::move(handler)); return *this; } -inline Server &Server::Patch(const char *pattern, Handler handler) { - patch_handlers_.push_back(std::make_pair(std::regex(pattern), handler)); +inline Server &Server::Patch(const std::string &pattern, Handler handler) { + patch_handlers_.emplace_back(make_matcher(pattern), std::move(handler)); return *this; } -inline Server &Server::Patch(const char *pattern, +inline Server &Server::Patch(const std::string &pattern, HandlerWithContentReader handler) { - patch_handlers_for_content_reader_.push_back( - std::make_pair(std::regex(pattern), handler)); + patch_handlers_for_content_reader_.emplace_back(make_matcher(pattern), + std::move(handler)); return *this; } -inline Server &Server::Delete(const char *pattern, Handler handler) { - delete_handlers_.push_back(std::make_pair(std::regex(pattern), handler)); +inline Server &Server::Delete(const std::string &pattern, Handler handler) { + delete_handlers_.emplace_back(make_matcher(pattern), std::move(handler)); return *this; } -inline Server &Server::Delete(const char *pattern, +inline Server &Server::Delete(const std::string &pattern, HandlerWithContentReader handler) { - delete_handlers_for_content_reader_.push_back( - std::make_pair(std::regex(pattern), handler)); + delete_handlers_for_content_reader_.emplace_back(make_matcher(pattern), + std::move(handler)); return *this; } -inline Server &Server::Options(const char *pattern, Handler handler) { - options_handlers_.push_back(std::make_pair(std::regex(pattern), handler)); +inline Server &Server::Options(const std::string &pattern, Handler handler) { + options_handlers_.emplace_back(make_matcher(pattern), std::move(handler)); return *this; } -inline bool Server::set_base_dir(const char *dir, const char *mount_point) { +inline bool Server::set_base_dir(const std::string &dir, + const std::string &mount_point) { return set_mount_point(mount_point, dir); } -inline bool Server::set_mount_point(const char *mount_point, const char *dir) { +inline bool Server::set_mount_point(const std::string &mount_point, + const std::string &dir, Headers headers) { if (detail::is_dir(dir)) { - std::string mnt = mount_point ? mount_point : "/"; + std::string mnt = !mount_point.empty() ? mount_point : "/"; if (!mnt.empty() && mnt[0] == '/') { - base_dirs_.emplace_back(mnt, dir); + base_dirs_.push_back({mnt, dir, std::move(headers)}); return true; } } return false; } -inline bool Server::remove_mount_point(const char *mount_point) { +inline bool Server::remove_mount_point(const std::string &mount_point) { for (auto it = base_dirs_.begin(); it != base_dirs_.end(); ++it) { - if (it->first == mount_point) { + if (it->mount_point == mount_point) { base_dirs_.erase(it); return true; } @@ -3837,75 +5773,148 @@ inline bool Server::remove_mount_point(const char *mount_point) { return false; } -inline void Server::set_file_extension_and_mimetype_mapping(const char *ext, - const char *mime) { +inline Server & +Server::set_file_extension_and_mimetype_mapping(const std::string &ext, + const std::string &mime) { file_extension_and_mimetype_map_[ext] = mime; + return *this; } -inline void Server::set_file_request_handler(Handler handler) { +inline Server &Server::set_default_file_mimetype(const std::string &mime) { + default_file_mimetype_ = mime; + return *this; +} + +inline Server &Server::set_file_request_handler(Handler handler) { file_request_handler_ = std::move(handler); + return *this; } -inline void Server::set_error_handler(Handler handler) { +inline Server &Server::set_error_handler(HandlerWithResponse handler) { error_handler_ = std::move(handler); + return *this; } -inline void Server::set_tcp_nodelay(bool on) { tcp_nodelay_ = on; } - -inline void Server::set_socket_options(SocketOptions socket_options) { - socket_options_ = socket_options; +inline Server &Server::set_error_handler(Handler handler) { + error_handler_ = [handler](const Request &req, Response &res) { + handler(req, res); + return HandlerResponse::Handled; + }; + return *this; } -inline void Server::set_logger(Logger logger) { logger_ = std::move(logger); } +inline Server &Server::set_exception_handler(ExceptionHandler handler) { + exception_handler_ = std::move(handler); + return *this; +} -inline void +inline Server &Server::set_pre_routing_handler(HandlerWithResponse handler) { + pre_routing_handler_ = std::move(handler); + return *this; +} + +inline Server &Server::set_post_routing_handler(Handler handler) { + post_routing_handler_ = std::move(handler); + return *this; +} + +inline Server &Server::set_logger(Logger logger) { + logger_ = std::move(logger); + return *this; +} + +inline Server & Server::set_expect_100_continue_handler(Expect100ContinueHandler handler) { expect_100_continue_handler_ = std::move(handler); + return *this; } -inline void Server::set_keep_alive_max_count(size_t count) { +inline Server &Server::set_address_family(int family) { + address_family_ = family; + return *this; +} + +inline Server &Server::set_tcp_nodelay(bool on) { + tcp_nodelay_ = on; + return *this; +} + +inline Server &Server::set_socket_options(SocketOptions socket_options) { + socket_options_ = std::move(socket_options); + return *this; +} + +inline Server &Server::set_default_headers(Headers headers) { + default_headers_ = std::move(headers); + return *this; +} + +inline Server &Server::set_header_writer( + std::function const &writer) { + header_writer_ = writer; + return *this; +} + +inline Server &Server::set_keep_alive_max_count(size_t count) { keep_alive_max_count_ = count; + return *this; } -inline void Server::set_keep_alive_timeout(time_t sec) { +inline Server &Server::set_keep_alive_timeout(time_t sec) { keep_alive_timeout_sec_ = sec; + return *this; } -inline void Server::set_read_timeout(time_t sec, time_t usec) { +inline Server &Server::set_read_timeout(time_t sec, time_t usec) { read_timeout_sec_ = sec; read_timeout_usec_ = usec; + return *this; } -inline void Server::set_write_timeout(time_t sec, time_t usec) { +inline Server &Server::set_write_timeout(time_t sec, time_t usec) { write_timeout_sec_ = sec; write_timeout_usec_ = usec; + return *this; } -inline void Server::set_idle_interval(time_t sec, time_t usec) { +inline Server &Server::set_idle_interval(time_t sec, time_t usec) { idle_interval_sec_ = sec; idle_interval_usec_ = usec; + return *this; } -inline void Server::set_payload_max_length(size_t length) { +inline Server &Server::set_payload_max_length(size_t length) { payload_max_length_ = length; + return *this; } -inline bool Server::bind_to_port(const char *host, int port, int socket_flags) { - if (bind_internal(host, port, socket_flags) < 0) return false; - return true; +inline bool Server::bind_to_port(const std::string &host, int port, + int socket_flags) { + return bind_internal(host, port, socket_flags) >= 0; } -inline int Server::bind_to_any_port(const char *host, int socket_flags) { +inline int Server::bind_to_any_port(const std::string &host, int socket_flags) { return bind_internal(host, 0, socket_flags); } -inline bool Server::listen_after_bind() { return listen_internal(); } +inline bool Server::listen_after_bind() { + auto se = detail::scope_exit([&]() { done_ = true; }); + return listen_internal(); +} -inline bool Server::listen(const char *host, int port, int socket_flags) { +inline bool Server::listen(const std::string &host, int port, + int socket_flags) { + auto se = detail::scope_exit([&]() { done_ = true; }); return bind_to_port(host, port, socket_flags) && listen_internal(); } inline bool Server::is_running() const { return is_running_; } +inline void Server::wait_until_ready() const { + while (!is_running() && !done_) { + std::this_thread::sleep_for(std::chrono::milliseconds{1}); + } +} + inline void Server::stop() { if (is_running_) { assert(svr_sock_ != INVALID_SOCKET); @@ -3915,43 +5924,99 @@ inline void Server::stop() { } } -inline bool Server::parse_request_line(const char *s, Request &req) { - const static std::regex re( - "(GET|HEAD|POST|PUT|DELETE|CONNECT|OPTIONS|TRACE|PATCH|PRI) " - "(([^?]+)(?:\\?(.*?))?) (HTTP/1\\.[01])\r\n"); +inline bool Server::parse_request_line(const char *s, Request &req) const { + auto len = strlen(s); + if (len < 2 || s[len - 2] != '\r' || s[len - 1] != '\n') { return false; } + len -= 2; - std::cmatch m; - if (std::regex_match(s, m, re)) { - req.version = std::string(m[5]); - req.method = std::string(m[1]); - req.target = std::string(m[2]); - req.path = detail::decode_url(m[3], false); + { + size_t count = 0; - // Parse query text - auto len = std::distance(m[4].first, m[4].second); - if (len > 0) { detail::parse_query_text(m[4], req.params); } + detail::split(s, s + len, ' ', [&](const char *b, const char *e) { + switch (count) { + case 0: req.method = std::string(b, e); break; + case 1: req.target = std::string(b, e); break; + case 2: req.version = std::string(b, e); break; + default: break; + } + count++; + }); - return true; + if (count != 3) { return false; } } - return false; + static const std::set methods{ + "GET", "HEAD", "POST", "PUT", "DELETE", + "CONNECT", "OPTIONS", "TRACE", "PATCH", "PRI"}; + + if (methods.find(req.method) == methods.end()) { return false; } + + if (req.version != "HTTP/1.1" && req.version != "HTTP/1.0") { return false; } + + { + // Skip URL fragment + for (size_t i = 0; i < req.target.size(); i++) { + if (req.target[i] == '#') { + req.target.erase(i); + break; + } + } + + size_t count = 0; + + detail::split(req.target.data(), req.target.data() + req.target.size(), '?', + 2, [&](const char *b, const char *e) { + switch (count) { + case 0: + req.path = detail::decode_url(std::string(b, e), false); + break; + case 1: { + if (e - b > 0) { + detail::parse_query_text(std::string(b, e), req.params); + } + break; + } + default: break; + } + count++; + }); + + if (count > 2) { return false; } + } + + return true; } inline bool Server::write_response(Stream &strm, bool close_connection, - const Request &req, Response &res) { + Request &req, Response &res) { + // NOTE: `req.ranges` should be empty, otherwise it will be applied + // incorrectly to the error content. + req.ranges.clear(); + return write_response_core(strm, close_connection, req, res, false); +} + +inline bool Server::write_response_with_content(Stream &strm, + bool close_connection, + const Request &req, + Response &res) { + return write_response_core(strm, close_connection, req, res, true); +} + +inline bool Server::write_response_core(Stream &strm, bool close_connection, + const Request &req, Response &res, + bool need_apply_ranges) { assert(res.status != -1); - if (400 <= res.status && error_handler_) { error_handler_(req, res); } - - detail::BufferStream bstrm; - - // Response line - if (!bstrm.write_format("HTTP/1.1 %d %s\r\n", res.status, - detail::status_message(res.status))) { - return false; + if (400 <= res.status && error_handler_ && + error_handler_(req, res) == HandlerResponse::Handled) { + need_apply_ranges = true; } - // Headers + std::string content_type; + std::string boundary; + if (need_apply_ranges) { apply_ranges(req, res, content_type, boundary); } + + // Prepare additional headers if (close_connection || req.get_header_value("Connection") == "close") { res.set_header("Connection", "close"); } else { @@ -3966,125 +6031,44 @@ inline bool Server::write_response(Stream &strm, bool close_connection, res.set_header("Content-Type", "text/plain"); } + if (!res.has_header("Content-Length") && res.body.empty() && + !res.content_length_ && !res.content_provider_) { + res.set_header("Content-Length", "0"); + } + if (!res.has_header("Accept-Ranges") && req.method == "HEAD") { res.set_header("Accept-Ranges", "bytes"); } - std::string content_type; - std::string boundary; + if (post_routing_handler_) { post_routing_handler_(req, res); } - if (req.ranges.size() > 1) { - boundary = detail::make_multipart_data_boundary(); + // Response line and headers + { + detail::BufferStream bstrm; - auto it = res.headers.find("Content-Type"); - if (it != res.headers.end()) { - content_type = it->second; - res.headers.erase(it); + if (!bstrm.write_format("HTTP/1.1 %d %s\r\n", res.status, + status_message(res.status))) { + return false; } - res.headers.emplace("Content-Type", - "multipart/byteranges; boundary=" + boundary); + if (!header_writer_(bstrm, res.headers)) { return false; } + + // Flush buffer + auto &data = bstrm.get_buffer(); + detail::write_data(strm, data.data(), data.size()); } - auto type = detail::encoding_type(req, res); - - if (res.body.empty()) { - if (res.content_length_ > 0) { - size_t length = 0; - if (req.ranges.empty()) { - length = res.content_length_; - } else if (req.ranges.size() == 1) { - auto offsets = - detail::get_range_offset_and_length(req, res.content_length_, 0); - auto offset = offsets.first; - length = offsets.second; - auto content_range = detail::make_content_range_header_field( - offset, length, res.content_length_); - res.set_header("Content-Range", content_range); - } else { - length = detail::get_multipart_ranges_data_length(req, res, boundary, - content_type); - } - res.set_header("Content-Length", std::to_string(length)); - } else { - if (res.content_provider_) { - if (res.is_chunked_content_provider) { - res.set_header("Transfer-Encoding", "chunked"); - if (type == detail::EncodingType::Gzip) { - res.set_header("Content-Encoding", "gzip"); - } else if (type == detail::EncodingType::Brotli) { - res.set_header("Content-Encoding", "br"); - } - } - } else { - res.set_header("Content-Length", "0"); - } - } - } else { - if (req.ranges.empty()) { - ; - } else if (req.ranges.size() == 1) { - auto offsets = - detail::get_range_offset_and_length(req, res.body.size(), 0); - auto offset = offsets.first; - auto length = offsets.second; - auto content_range = detail::make_content_range_header_field( - offset, length, res.body.size()); - res.set_header("Content-Range", content_range); - res.body = res.body.substr(offset, length); - } else { - res.body = - detail::make_multipart_ranges_data(req, res, boundary, content_type); - } - - if (type != detail::EncodingType::None) { - std::shared_ptr compressor; - - if (type == detail::EncodingType::Gzip) { -#ifdef CPPHTTPLIB_ZLIB_SUPPORT - compressor = std::make_shared(); - res.set_header("Content-Encoding", "gzip"); -#endif - } else if (type == detail::EncodingType::Brotli) { -#ifdef CPPHTTPLIB_BROTLI_SUPPORT - compressor = std::make_shared(); - res.set_header("Content-Encoding", "brotli"); -#endif - } - - if (compressor) { - std::string compressed; - - if (!compressor->compress(res.body.data(), res.body.size(), true, - [&](const char *data, size_t data_len) { - compressed.append(data, data_len); - return true; - })) { - return false; - } - - res.body.swap(compressed); - } - } - - auto length = std::to_string(res.body.size()); - res.set_header("Content-Length", length); - } - - if (!detail::write_headers(bstrm, res, Headers())) { return false; } - - // Flush buffer - auto &data = bstrm.get_buffer(); - strm.write(data.data(), data.size()); - // Body auto ret = true; if (req.method != "HEAD") { if (!res.body.empty()) { - if (!strm.write(res.body)) { ret = false; } + if (!detail::write_data(strm, res.body.data(), res.body.size())) { + ret = false; + } } else if (res.content_provider_) { - if (!write_content_with_provider(strm, req, res, boundary, - content_type)) { + if (write_content_with_provider(strm, req, res, boundary, content_type)) { + res.content_provider_success_ = true; + } else { ret = false; } } @@ -4106,59 +6090,50 @@ Server::write_content_with_provider(Stream &strm, const Request &req, if (res.content_length_ > 0) { if (req.ranges.empty()) { - if (detail::write_content(strm, res.content_provider_, 0, - res.content_length_, is_shutting_down) < 0) { - return false; - } + return detail::write_content(strm, res.content_provider_, 0, + res.content_length_, is_shutting_down); } else if (req.ranges.size() == 1) { - auto offsets = - detail::get_range_offset_and_length(req, res.content_length_, 0); - auto offset = offsets.first; - auto length = offsets.second; - if (detail::write_content(strm, res.content_provider_, offset, length, - is_shutting_down) < 0) { - return false; - } + auto offset_and_length = detail::get_range_offset_and_length( + req.ranges[0], res.content_length_); + + return detail::write_content(strm, res.content_provider_, + offset_and_length.first, + offset_and_length.second, is_shutting_down); } else { - if (!detail::write_multipart_ranges_data( - strm, req, res, boundary, content_type, is_shutting_down)) { - return false; - } + return detail::write_multipart_ranges_data( + strm, req, res, boundary, content_type, res.content_length_, + is_shutting_down); } } else { - if (res.is_chunked_content_provider) { + if (res.is_chunked_content_provider_) { auto type = detail::encoding_type(req, res); - std::shared_ptr compressor; + std::unique_ptr compressor; if (type == detail::EncodingType::Gzip) { #ifdef CPPHTTPLIB_ZLIB_SUPPORT - compressor = std::make_shared(); + compressor = detail::make_unique(); #endif } else if (type == detail::EncodingType::Brotli) { #ifdef CPPHTTPLIB_BROTLI_SUPPORT - compressor = std::make_shared(); + compressor = detail::make_unique(); #endif } else { - compressor = std::make_shared(); + compressor = detail::make_unique(); } assert(compressor != nullptr); - if (detail::write_content_chunked(strm, res.content_provider_, - is_shutting_down, *compressor) < 0) { - return false; - } + return detail::write_content_chunked(strm, res.content_provider_, + is_shutting_down, *compressor); } else { - if (detail::write_content_without_length(strm, res.content_provider_, - is_shutting_down) < 0) { - return false; - } + return detail::write_content_without_length(strm, res.content_provider_, + is_shutting_down); } } - return true; } inline bool Server::read_content(Stream &strm, Request &req, Response &res) { MultipartFormDataMap::iterator cur; + auto file_count = 0; if (read_content_core( strm, req, res, // Regular @@ -4169,6 +6144,9 @@ inline bool Server::read_content(Stream &strm, Request &req, Response &res) { }, // Multipart [&](const MultipartFormData &file) { + if (file_count++ == CPPHTTPLIB_MULTIPART_FORM_DATA_FILE_MAX_COUNT) { + return false; + } cur = req.files.emplace(file.name, file); return true; }, @@ -4180,6 +6158,10 @@ inline bool Server::read_content(Stream &strm, Request &req, Response &res) { })) { const auto &content_type = req.get_header_value("Content-Type"); if (!content_type.find("application/x-www-form-urlencoded")) { + if (req.body.size() > CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH) { + res.status = StatusCode::PayloadTooLarge_413; // NOTE: should be 414? + return false; + } detail::parse_query_text(req.body, req.params); } return true; @@ -4191,43 +6173,46 @@ inline bool Server::read_content_with_content_receiver( Stream &strm, Request &req, Response &res, ContentReceiver receiver, MultipartContentHeader multipart_header, ContentReceiver multipart_receiver) { - return read_content_core(strm, req, res, receiver, multipart_header, - multipart_receiver); + return read_content_core(strm, req, res, std::move(receiver), + std::move(multipart_header), + std::move(multipart_receiver)); } -inline bool Server::read_content_core(Stream &strm, Request &req, Response &res, - ContentReceiver receiver, - MultipartContentHeader mulitpart_header, - ContentReceiver multipart_receiver) { +inline bool +Server::read_content_core(Stream &strm, Request &req, Response &res, + ContentReceiver receiver, + MultipartContentHeader multipart_header, + ContentReceiver multipart_receiver) const { detail::MultipartFormDataParser multipart_form_data_parser; - ContentReceiver out; + ContentReceiverWithProgress out; if (req.is_multipart_form_data()) { const auto &content_type = req.get_header_value("Content-Type"); std::string boundary; if (!detail::parse_multipart_boundary(content_type, boundary)) { - res.status = 400; + res.status = StatusCode::BadRequest_400; return false; } multipart_form_data_parser.set_boundary(std::move(boundary)); - out = [&](const char *buf, size_t n) { + out = [&](const char *buf, size_t n, uint64_t /*off*/, uint64_t /*len*/) { /* For debug size_t pos = 0; while (pos < n) { - auto read_size = std::min(1, n - pos); + auto read_size = (std::min)(1, n - pos); auto ret = multipart_form_data_parser.parse( - buf + pos, read_size, multipart_receiver, mulitpart_header); + buf + pos, read_size, multipart_receiver, multipart_header); if (!ret) { return false; } pos += read_size; } return true; */ return multipart_form_data_parser.parse(buf, n, multipart_receiver, - mulitpart_header); + multipart_header); }; } else { - out = receiver; + out = [receiver](const char *buf, size_t n, uint64_t /*off*/, + uint64_t /*len*/) { return receiver(buf, n); }; } if (req.method == "DELETE" && !req.has_header("Content-Length")) { @@ -4241,7 +6226,7 @@ inline bool Server::read_content_core(Stream &strm, Request &req, Response &res, if (req.is_multipart_form_data()) { if (!multipart_form_data_parser.is_valid()) { - res.status = 400; + res.status = StatusCode::BadRequest_400; return false; } } @@ -4249,28 +6234,37 @@ inline bool Server::read_content_core(Stream &strm, Request &req, Response &res, return true; } -inline bool Server::handle_file_request(Request &req, Response &res, +inline bool Server::handle_file_request(const Request &req, Response &res, bool head) { - for (const auto &kv : base_dirs_) { - const auto &mount_point = kv.first; - const auto &base_dir = kv.second; - + for (const auto &entry : base_dirs_) { // Prefix match - if (!req.path.compare(0, mount_point.size(), mount_point)) { - std::string sub_path = "/" + req.path.substr(mount_point.size()); + if (!req.path.compare(0, entry.mount_point.size(), entry.mount_point)) { + std::string sub_path = "/" + req.path.substr(entry.mount_point.size()); if (detail::is_valid_path(sub_path)) { - auto path = base_dir + sub_path; + auto path = entry.base_dir + sub_path; if (path.back() == '/') { path += "index.html"; } if (detail::is_file(path)) { - detail::read_file(path, res.body); - auto type = - detail::find_content_type(path, file_extension_and_mimetype_map_); - if (type) { res.set_header("Content-Type", type); } - res.status = 200; + for (const auto &kv : entry.headers) { + res.set_header(kv.first, kv.second); + } + + auto mm = std::make_shared(path.c_str()); + if (!mm->is_open()) { return false; } + + res.set_content_provider( + mm->size(), + detail::find_content_type(path, file_extension_and_mimetype_map_, + default_file_mimetype_), + [mm](size_t offset, size_t length, DataSink &sink) -> bool { + sink.write(mm->data() + offset, length); + return true; + }); + if (!head && file_request_handler_) { file_request_handler_(req, res); } + return true; } } @@ -4280,22 +6274,23 @@ inline bool Server::handle_file_request(Request &req, Response &res, } inline socket_t -Server::create_server_socket(const char *host, int port, int socket_flags, +Server::create_server_socket(const std::string &host, int port, + int socket_flags, SocketOptions socket_options) const { return detail::create_socket( - host, port, socket_flags, tcp_nodelay_, socket_options, + host, std::string(), port, address_family_, socket_flags, tcp_nodelay_, + std::move(socket_options), [](socket_t sock, struct addrinfo &ai) -> bool { if (::bind(sock, ai.ai_addr, static_cast(ai.ai_addrlen))) { return false; } - if (::listen(sock, 5)) { // Listen through 5 channels - return false; - } + if (::listen(sock, CPPHTTPLIB_LISTEN_BACKLOG)) { return false; } return true; }); } -inline int Server::bind_internal(const char *host, int port, int socket_flags) { +inline int Server::bind_internal(const std::string &host, int port, + int socket_flags) { if (!is_valid()) { return -1; } svr_sock_ = create_server_socket(host, port, socket_flags, socket_options_); @@ -4323,6 +6318,7 @@ inline int Server::bind_internal(const char *host, int port, int socket_flags) { inline bool Server::listen_internal() { auto ret = true; is_running_ = true; + auto se = detail::scope_exit([&]() { is_running_ = false; }); { std::unique_ptr task_queue(new_task_queue()); @@ -4348,6 +6344,8 @@ inline bool Server::listen_internal() { // Try to accept new connections after a short sleep. std::this_thread::sleep_for(std::chrono::milliseconds(1)); continue; + } else if (errno == EINTR || errno == EAGAIN) { + continue; } if (svr_sock_ != INVALID_SOCKET) { detail::close_socket(svr_sock_); @@ -4358,23 +6356,57 @@ inline bool Server::listen_internal() { break; } -#if __cplusplus > 201703L - task_queue->enqueue([=, this]() { process_and_close_socket(sock); }); + { +#ifdef _WIN32 + auto timeout = static_cast(read_timeout_sec_ * 1000 + + read_timeout_usec_ / 1000); + setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, + reinterpret_cast(&timeout), sizeof(timeout)); #else - task_queue->enqueue([=]() { process_and_close_socket(sock); }); + timeval tv; + tv.tv_sec = static_cast(read_timeout_sec_); + tv.tv_usec = static_cast(read_timeout_usec_); + setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, + reinterpret_cast(&tv), sizeof(tv)); #endif + } + { + +#ifdef _WIN32 + auto timeout = static_cast(write_timeout_sec_ * 1000 + + write_timeout_usec_ / 1000); + setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, + reinterpret_cast(&timeout), sizeof(timeout)); +#else + timeval tv; + tv.tv_sec = static_cast(write_timeout_sec_); + tv.tv_usec = static_cast(write_timeout_usec_); + setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, + reinterpret_cast(&tv), sizeof(tv)); +#endif + } + + if (!task_queue->enqueue( + [this, sock]() { process_and_close_socket(sock); })) { + detail::shutdown_socket(sock); + detail::close_socket(sock); + } } task_queue->shutdown(); } - is_running_ = false; return ret; } inline bool Server::routing(Request &req, Response &res, Stream &strm) { + if (pre_routing_handler_ && + pre_routing_handler_(req, res) == HandlerResponse::Handled) { + return true; + } + // File handler - bool is_head_request = req.method == "HEAD"; + auto is_head_request = req.method == "HEAD"; if ((req.method == "GET" || is_head_request) && handle_file_request(req, res, is_head_request)) { return true; @@ -4385,32 +6417,37 @@ inline bool Server::routing(Request &req, Response &res, Stream &strm) { { ContentReader reader( [&](ContentReceiver receiver) { - return read_content_with_content_receiver(strm, req, res, receiver, - nullptr, nullptr); + return read_content_with_content_receiver( + strm, req, res, std::move(receiver), nullptr, nullptr); }, [&](MultipartContentHeader header, ContentReceiver receiver) { return read_content_with_content_receiver(strm, req, res, nullptr, - header, receiver); + std::move(header), + std::move(receiver)); }); if (req.method == "POST") { if (dispatch_request_for_content_reader( - req, res, reader, post_handlers_for_content_reader_)) { + req, res, std::move(reader), + post_handlers_for_content_reader_)) { return true; } } else if (req.method == "PUT") { if (dispatch_request_for_content_reader( - req, res, reader, put_handlers_for_content_reader_)) { + req, res, std::move(reader), + put_handlers_for_content_reader_)) { return true; } } else if (req.method == "PATCH") { if (dispatch_request_for_content_reader( - req, res, reader, patch_handlers_for_content_reader_)) { + req, res, std::move(reader), + patch_handlers_for_content_reader_)) { return true; } } else if (req.method == "DELETE") { if (dispatch_request_for_content_reader( - req, res, reader, delete_handlers_for_content_reader_)) { + req, res, std::move(reader), + delete_handlers_for_content_reader_)) { return true; } } @@ -4435,41 +6472,137 @@ inline bool Server::routing(Request &req, Response &res, Stream &strm) { return dispatch_request(req, res, patch_handlers_); } - res.status = 400; + res.status = StatusCode::BadRequest_400; return false; } inline bool Server::dispatch_request(Request &req, Response &res, - const Handlers &handlers) { + const Handlers &handlers) const { + for (const auto &x : handlers) { + const auto &matcher = x.first; + const auto &handler = x.second; - try { - for (const auto &x : handlers) { - const auto &pattern = x.first; - const auto &handler = x.second; - - if (std::regex_match(req.path, req.matches, pattern)) { - handler(req, res); - return true; - } + if (matcher->match(req)) { + handler(req, res); + return true; } - } catch (const std::exception &ex) { - res.status = 500; - res.set_header("EXCEPTION_WHAT", ex.what()); - } catch (...) { - res.status = 500; - res.set_header("EXCEPTION_WHAT", "UNKNOWN"); } return false; } +inline void Server::apply_ranges(const Request &req, Response &res, + std::string &content_type, + std::string &boundary) const { + if (req.ranges.size() > 1) { + auto it = res.headers.find("Content-Type"); + if (it != res.headers.end()) { + content_type = it->second; + res.headers.erase(it); + } + + boundary = detail::make_multipart_data_boundary(); + + res.set_header("Content-Type", + "multipart/byteranges; boundary=" + boundary); + } + + auto type = detail::encoding_type(req, res); + + if (res.body.empty()) { + if (res.content_length_ > 0) { + size_t length = 0; + if (req.ranges.empty()) { + length = res.content_length_; + } else if (req.ranges.size() == 1) { + auto offset_and_length = detail::get_range_offset_and_length( + req.ranges[0], res.content_length_); + + length = offset_and_length.second; + + auto content_range = detail::make_content_range_header_field( + offset_and_length, res.content_length_); + res.set_header("Content-Range", content_range); + } else { + length = detail::get_multipart_ranges_data_length( + req, boundary, content_type, res.content_length_); + } + res.set_header("Content-Length", std::to_string(length)); + } else { + if (res.content_provider_) { + if (res.is_chunked_content_provider_) { + res.set_header("Transfer-Encoding", "chunked"); + if (type == detail::EncodingType::Gzip) { + res.set_header("Content-Encoding", "gzip"); + } else if (type == detail::EncodingType::Brotli) { + res.set_header("Content-Encoding", "br"); + } + } + } + } + } else { + if (req.ranges.empty()) { + ; + } else if (req.ranges.size() == 1) { + auto offset_and_length = + detail::get_range_offset_and_length(req.ranges[0], res.body.size()); + auto offset = offset_and_length.first; + auto length = offset_and_length.second; + + auto content_range = detail::make_content_range_header_field( + offset_and_length, res.body.size()); + res.set_header("Content-Range", content_range); + + assert(offset + length <= res.body.size()); + res.body = res.body.substr(offset, length); + } else { + std::string data; + detail::make_multipart_ranges_data(req, res, boundary, content_type, + res.body.size(), data); + res.body.swap(data); + } + + if (type != detail::EncodingType::None) { + std::unique_ptr compressor; + std::string content_encoding; + + if (type == detail::EncodingType::Gzip) { +#ifdef CPPHTTPLIB_ZLIB_SUPPORT + compressor = detail::make_unique(); + content_encoding = "gzip"; +#endif + } else if (type == detail::EncodingType::Brotli) { +#ifdef CPPHTTPLIB_BROTLI_SUPPORT + compressor = detail::make_unique(); + content_encoding = "br"; +#endif + } + + if (compressor) { + std::string compressed; + if (compressor->compress(res.body.data(), res.body.size(), true, + [&](const char *data, size_t data_len) { + compressed.append(data, data_len); + return true; + })) { + res.body.swap(compressed); + res.set_header("Content-Encoding", content_encoding); + } + } + } + + auto length = std::to_string(res.body.size()); + res.set_header("Content-Length", length); + } +} + inline bool Server::dispatch_request_for_content_reader( Request &req, Response &res, ContentReader content_reader, - const HandlersForContentReader &handlers) { + const HandlersForContentReader &handlers) const { for (const auto &x : handlers) { - const auto &pattern = x.first; + const auto &matcher = x.first; const auto &handler = x.second; - if (std::regex_match(req.path, req.matches, pattern)) { + if (matcher->match(req)) { handler(req, res, content_reader); return true; } @@ -4489,22 +6622,37 @@ Server::process_request(Stream &strm, bool close_connection, if (!line_reader.getline()) { return false; } Request req; - Response res; + Response res; res.version = "HTTP/1.1"; + res.headers = default_headers_; + +#ifdef _WIN32 + // TODO: Increase FD_SETSIZE statically (libzmq), dynamically (MySQL). +#else +#ifndef CPPHTTPLIB_USE_POLL + // Socket file descriptor exceeded FD_SETSIZE... + if (strm.socket() >= FD_SETSIZE) { + Headers dummy; + detail::read_headers(strm, dummy); + res.status = StatusCode::InternalServerError_500; + return write_response(strm, close_connection, req, res); + } +#endif +#endif // Check if the request URI doesn't exceed the limit if (line_reader.size() > CPPHTTPLIB_REQUEST_URI_MAX_LENGTH) { Headers dummy; detail::read_headers(strm, dummy); - res.status = 414; + res.status = StatusCode::UriTooLong_414; return write_response(strm, close_connection, req, res); } // Request line and headers if (!parse_request_line(line_reader.ptr(), req) || !detail::read_headers(strm, req.headers)) { - res.status = 400; + res.status = StatusCode::BadRequest_400; return write_response(strm, close_connection, req, res); } @@ -4521,46 +6669,100 @@ Server::process_request(Stream &strm, bool close_connection, req.set_header("REMOTE_ADDR", req.remote_addr); req.set_header("REMOTE_PORT", std::to_string(req.remote_port)); + strm.get_local_ip_and_port(req.local_addr, req.local_port); + req.set_header("LOCAL_ADDR", req.local_addr); + req.set_header("LOCAL_PORT", std::to_string(req.local_port)); + if (req.has_header("Range")) { const auto &range_header_value = req.get_header_value("Range"); if (!detail::parse_range_header(range_header_value, req.ranges)) { - // TODO: error + res.status = StatusCode::RangeNotSatisfiable_416; + return write_response(strm, close_connection, req, res); } } if (setup_request) { setup_request(req); } if (req.get_header_value("Expect") == "100-continue") { - auto status = 100; + int status = StatusCode::Continue_100; if (expect_100_continue_handler_) { status = expect_100_continue_handler_(req, res); } switch (status) { - case 100: - case 417: + case StatusCode::Continue_100: + case StatusCode::ExpectationFailed_417: strm.write_format("HTTP/1.1 %d %s\r\n\r\n", status, - detail::status_message(status)); + status_message(status)); break; default: return write_response(strm, close_connection, req, res); } } - // Rounting - if (routing(req, res, strm)) { - if (res.status == -1) { res.status = req.ranges.empty() ? 200 : 206; } - } else { - if (res.status == -1) { res.status = 404; } + // Routing + auto routed = false; +#ifdef CPPHTTPLIB_NO_EXCEPTIONS + routed = routing(req, res, strm); +#else + try { + routed = routing(req, res, strm); + } catch (std::exception &e) { + if (exception_handler_) { + auto ep = std::current_exception(); + exception_handler_(req, res, ep); + routed = true; + } else { + res.status = StatusCode::InternalServerError_500; + std::string val; + auto s = e.what(); + for (size_t i = 0; s[i]; i++) { + switch (s[i]) { + case '\r': val += "\\r"; break; + case '\n': val += "\\n"; break; + default: val += s[i]; break; + } + } + res.set_header("EXCEPTION_WHAT", val); + } + } catch (...) { + if (exception_handler_) { + auto ep = std::current_exception(); + exception_handler_(req, res, ep); + routed = true; + } else { + res.status = StatusCode::InternalServerError_500; + res.set_header("EXCEPTION_WHAT", "UNKNOWN"); + } } +#endif + if (routed) { + if (res.status == -1) { + res.status = req.ranges.empty() ? StatusCode::OK_200 + : StatusCode::PartialContent_206; + } - return write_response(strm, close_connection, req, res); + if (detail::range_error(req, res)) { + res.body.clear(); + res.content_length_ = 0; + res.content_provider_ = nullptr; + res.status = StatusCode::RangeNotSatisfiable_416; + return write_response(strm, close_connection, req, res); + } + + return write_response_with_content(strm, close_connection, req, res); + } else { + if (res.status == -1) { res.status = StatusCode::NotFound_404; } + + return write_response(strm, close_connection, req, res); + } } inline bool Server::is_valid() const { return true; } inline bool Server::process_and_close_socket(socket_t sock) { auto ret = detail::process_server_socket( - sock, keep_alive_max_count_, keep_alive_timeout_sec_, read_timeout_sec_, - read_timeout_usec_, write_timeout_sec_, write_timeout_usec_, + svr_sock_, sock, keep_alive_max_count_, keep_alive_timeout_sec_, + read_timeout_sec_, read_timeout_usec_, write_timeout_sec_, + write_timeout_usec_, [this](Stream &strm, bool close_connection, bool &connection_closed) { return process_request(strm, close_connection, connection_closed, nullptr); @@ -4582,15 +6784,17 @@ inline ClientImpl::ClientImpl(const std::string &host, int port, const std::string &client_cert_path, const std::string &client_key_path) : host_(host), port_(port), - host_and_port_(host_ + ":" + std::to_string(port_)), + host_and_port_(adjust_host_string(host) + ":" + std::to_string(port)), client_cert_path_(client_cert_path), client_key_path_(client_key_path) {} -inline ClientImpl::~ClientImpl() { stop_core(); } +inline ClientImpl::~ClientImpl() { + std::lock_guard guard(socket_mutex_); + shutdown_socket(socket_); + close_socket(socket_); +} inline bool ClientImpl::is_valid() const { return true; } -inline Error ClientImpl::get_last_error() const { return error_; } - inline void ClientImpl::copy_settings(const ClientImpl &rhs) { client_cert_path_ = rhs.client_cert_path_; client_key_path_ = rhs.client_key_path_; @@ -4608,6 +6812,8 @@ inline void ClientImpl::copy_settings(const ClientImpl &rhs) { #endif keep_alive_ = rhs.keep_alive_; follow_location_ = rhs.follow_location_; + url_encode_ = rhs.url_encode_; + address_family_ = rhs.address_family_; tcp_nodelay_ = rhs.tcp_nodelay_; socket_options_ = rhs.socket_options_; compress_ = rhs.compress_; @@ -4622,56 +6828,102 @@ inline void ClientImpl::copy_settings(const ClientImpl &rhs) { proxy_digest_auth_username_ = rhs.proxy_digest_auth_username_; proxy_digest_auth_password_ = rhs.proxy_digest_auth_password_; #endif +#ifdef CPPHTTPLIB_OPENSSL_SUPPORT + ca_cert_file_path_ = rhs.ca_cert_file_path_; + ca_cert_dir_path_ = rhs.ca_cert_dir_path_; + ca_cert_store_ = rhs.ca_cert_store_; +#endif #ifdef CPPHTTPLIB_OPENSSL_SUPPORT server_certificate_verification_ = rhs.server_certificate_verification_; #endif logger_ = rhs.logger_; } -inline socket_t ClientImpl::create_client_socket() const { +inline socket_t ClientImpl::create_client_socket(Error &error) const { if (!proxy_host_.empty() && proxy_port_ != -1) { return detail::create_client_socket( - proxy_host_.c_str(), proxy_port_, tcp_nodelay_, socket_options_, - connection_timeout_sec_, connection_timeout_usec_, interface_, error_); + proxy_host_, std::string(), proxy_port_, address_family_, tcp_nodelay_, + socket_options_, connection_timeout_sec_, connection_timeout_usec_, + read_timeout_sec_, read_timeout_usec_, write_timeout_sec_, + write_timeout_usec_, interface_, error); } + + // Check is custom IP specified for host_ + std::string ip; + auto it = addr_map_.find(host_); + if (it != addr_map_.end()) { ip = it->second; } + return detail::create_client_socket( - host_.c_str(), port_, tcp_nodelay_, socket_options_, - connection_timeout_sec_, connection_timeout_usec_, interface_, error_); + host_, ip, port_, address_family_, tcp_nodelay_, socket_options_, + connection_timeout_sec_, connection_timeout_usec_, read_timeout_sec_, + read_timeout_usec_, write_timeout_sec_, write_timeout_usec_, interface_, + error); } -inline bool ClientImpl::create_and_connect_socket(Socket &socket) { - auto sock = create_client_socket(); +inline bool ClientImpl::create_and_connect_socket(Socket &socket, + Error &error) { + auto sock = create_client_socket(error); if (sock == INVALID_SOCKET) { return false; } socket.sock = sock; return true; } -inline void ClientImpl::close_socket(Socket &socket, - bool /*process_socket_ret*/) { - detail::close_socket(socket.sock); - socket_.sock = INVALID_SOCKET; -#ifdef CPPHTTPLIB_OPENSSL_SUPPORT - socket_.ssl = nullptr; -#endif +inline void ClientImpl::shutdown_ssl(Socket & /*socket*/, + bool /*shutdown_gracefully*/) { + // If there are any requests in flight from threads other than us, then it's + // a thread-unsafe race because individual ssl* objects are not thread-safe. + assert(socket_requests_in_flight_ == 0 || + socket_requests_are_from_thread_ == std::this_thread::get_id()); } -inline bool ClientImpl::read_response_line(Stream &strm, Response &res) { - std::array buf; +inline void ClientImpl::shutdown_socket(Socket &socket) const { + if (socket.sock == INVALID_SOCKET) { return; } + detail::shutdown_socket(socket.sock); +} + +inline void ClientImpl::close_socket(Socket &socket) { + // If there are requests in flight in another thread, usually closing + // the socket will be fine and they will simply receive an error when + // using the closed socket, but it is still a bug since rarely the OS + // may reassign the socket id to be used for a new socket, and then + // suddenly they will be operating on a live socket that is different + // than the one they intended! + assert(socket_requests_in_flight_ == 0 || + socket_requests_are_from_thread_ == std::this_thread::get_id()); + + // It is also a bug if this happens while SSL is still active +#ifdef CPPHTTPLIB_OPENSSL_SUPPORT + assert(socket.ssl == nullptr); +#endif + if (socket.sock == INVALID_SOCKET) { return; } + detail::close_socket(socket.sock); + socket.sock = INVALID_SOCKET; +} + +inline bool ClientImpl::read_response_line(Stream &strm, const Request &req, + Response &res) const { + std::array buf{}; detail::stream_line_reader line_reader(strm, buf.data(), buf.size()); if (!line_reader.getline()) { return false; } - const static std::regex re("(HTTP/1\\.[01]) (\\d+) (.*?)\r\n"); +#ifdef CPPHTTPLIB_ALLOW_LF_AS_LINE_TERMINATOR + const static std::regex re("(HTTP/1\\.[01]) (\\d{3})(?: (.*?))?\r?\n"); +#else + const static std::regex re("(HTTP/1\\.[01]) (\\d{3})(?: (.*?))?\r\n"); +#endif std::cmatch m; - if (!std::regex_match(line_reader.ptr(), m, re)) { return false; } + if (!std::regex_match(line_reader.ptr(), m, re)) { + return req.method == "CONNECT"; + } res.version = std::string(m[1]); res.status = std::stoi(std::string(m[2])); res.reason = std::string(m[3]); // Ignore '100 Continue' - while (res.status == 100) { + while (res.status == StatusCode::Continue_100) { if (!line_reader.getline()) { return false; } // CRLF if (!line_reader.getline()) { return false; } // next response line @@ -4684,80 +6936,166 @@ inline bool ClientImpl::read_response_line(Stream &strm, Response &res) { return true; } -inline bool ClientImpl::send(const Request &req, Response &res) { +inline bool ClientImpl::send(Request &req, Response &res, Error &error) { std::lock_guard request_mutex_guard(request_mutex_); + auto ret = send_(req, res, error); + if (error == Error::SSLPeerCouldBeClosed_) { + assert(!ret); + ret = send_(req, res, error); + } + return ret; +} +inline bool ClientImpl::send_(Request &req, Response &res, Error &error) { { std::lock_guard guard(socket_mutex_); + // Set this to false immediately - if it ever gets set to true by the end of + // the request, we know another thread instructed us to close the socket. + socket_should_be_closed_when_request_is_done_ = false; + auto is_alive = false; if (socket_.is_open()) { - is_alive = detail::select_write(socket_.sock, 0, 0) > 0; - if (!is_alive) { close_socket(socket_, false); } + is_alive = detail::is_socket_alive(socket_.sock); + if (!is_alive) { + // Attempt to avoid sigpipe by shutting down nongracefully if it seems + // like the other side has already closed the connection Also, there + // cannot be any requests in flight from other threads since we locked + // request_mutex_, so safe to close everything immediately + const bool shutdown_gracefully = false; + shutdown_ssl(socket_, shutdown_gracefully); + shutdown_socket(socket_); + close_socket(socket_); + } } if (!is_alive) { - if (!create_and_connect_socket(socket_)) { return false; } + if (!create_and_connect_socket(socket_, error)) { return false; } #ifdef CPPHTTPLIB_OPENSSL_SUPPORT // TODO: refactoring if (is_ssl()) { auto &scli = static_cast(*this); if (!proxy_host_.empty() && proxy_port_ != -1) { - bool success = false; - if (!scli.connect_with_proxy(socket_, res, success)) { + auto success = false; + if (!scli.connect_with_proxy(socket_, res, success, error)) { return success; } } - if (!scli.initialize_ssl(socket_)) { return false; } + if (!scli.initialize_ssl(socket_, error)) { return false; } } #endif } + + // Mark the current socket as being in use so that it cannot be closed by + // anyone else while this request is ongoing, even though we will be + // releasing the mutex. + if (socket_requests_in_flight_ > 1) { + assert(socket_requests_are_from_thread_ == std::this_thread::get_id()); + } + socket_requests_in_flight_ += 1; + socket_requests_are_from_thread_ = std::this_thread::get_id(); } + for (const auto &header : default_headers_) { + if (req.headers.find(header.first) == req.headers.end()) { + req.headers.insert(header); + } + } + + auto ret = false; auto close_connection = !keep_alive_; - auto ret = process_socket(socket_, [&](Stream &strm) { - return handle_request(strm, req, res, close_connection); + auto se = detail::scope_exit([&]() { + // Briefly lock mutex in order to mark that a request is no longer ongoing + std::lock_guard guard(socket_mutex_); + socket_requests_in_flight_ -= 1; + if (socket_requests_in_flight_ <= 0) { + assert(socket_requests_in_flight_ == 0); + socket_requests_are_from_thread_ = std::thread::id(); + } + + if (socket_should_be_closed_when_request_is_done_ || close_connection || + !ret) { + shutdown_ssl(socket_, true); + shutdown_socket(socket_); + close_socket(socket_); + } }); - if (close_connection || !ret) { stop_core(); } + ret = process_socket(socket_, [&](Stream &strm) { + return handle_request(strm, req, res, close_connection, error); + }); if (!ret) { - if (error_ == Error::Success) { error_ = Error::Unknown; } + if (error == Error::Success) { error = Error::Unknown; } } return ret; } -inline bool ClientImpl::handle_request(Stream &strm, const Request &req, - Response &res, bool close_connection) { +inline Result ClientImpl::send(const Request &req) { + auto req2 = req; + return send_(std::move(req2)); +} + +inline Result ClientImpl::send_(Request &&req) { + auto res = detail::make_unique(); + auto error = Error::Success; + auto ret = send(req, *res, error); + return Result{ret ? std::move(res) : nullptr, error, std::move(req.headers)}; +} + +inline bool ClientImpl::handle_request(Stream &strm, Request &req, + Response &res, bool close_connection, + Error &error) { if (req.path.empty()) { - error_ = Error::Connection; + error = Error::Connection; return false; } + auto req_save = req; + bool ret; if (!is_ssl() && !proxy_host_.empty() && proxy_port_ != -1) { auto req2 = req; req2.path = "http://" + host_and_port_ + req.path; - ret = process_request(strm, req2, res, close_connection); + ret = process_request(strm, req2, res, close_connection, error); + req = req2; + req.path = req_save.path; } else { - ret = process_request(strm, req, res, close_connection); + ret = process_request(strm, req, res, close_connection, error); } if (!ret) { return false; } + if (res.get_header_value("Connection") == "close" || + (res.version == "HTTP/1.0" && res.reason != "Connection established")) { + // TODO this requires a not-entirely-obvious chain of calls to be correct + // for this to be safe. + + // This is safe to call because handle_request is only called by send_ + // which locks the request mutex during the process. It would be a bug + // to call it from a different thread since it's a thread-safety issue + // to do these things to the socket if another thread is using the socket. + std::lock_guard guard(socket_mutex_); + shutdown_ssl(socket_, true); + shutdown_socket(socket_); + close_socket(socket_); + } + if (300 < res.status && res.status < 400 && follow_location_) { - ret = redirect(req, res); + req = req_save; + ret = redirect(req, res, error); } #ifdef CPPHTTPLIB_OPENSSL_SUPPORT - if ((res.status == 401 || res.status == 407) && + if ((res.status == StatusCode::Unauthorized_401 || + res.status == StatusCode::ProxyAuthenticationRequired_407) && req.authorization_count_ < 5) { - auto is_proxy = res.status == 407; + auto is_proxy = res.status == StatusCode::ProxyAuthenticationRequired_407; const auto &username = is_proxy ? proxy_digest_auth_username_ : digest_auth_username_; const auto &password = @@ -4768,15 +7106,15 @@ inline bool ClientImpl::handle_request(Stream &strm, const Request &req, if (detail::parse_www_authenticate(res, auth, is_proxy)) { Request new_req = req; new_req.authorization_count_ += 1; - auto key = is_proxy ? "Proxy-Authorization" : "Authorization"; - new_req.headers.erase(key); + new_req.headers.erase(is_proxy ? "Proxy-Authorization" + : "Authorization"); new_req.headers.insert(detail::make_digest_authentication_header( req, auth, new_req.authorization_count_, detail::random_string(10), username, password, is_proxy)); Response new_res; - ret = send(new_req, new_res); + ret = send(new_req, new_res, error); if (ret) { res = new_res; } } } @@ -4786,17 +7124,17 @@ inline bool ClientImpl::handle_request(Stream &strm, const Request &req, return ret; } -inline bool ClientImpl::redirect(const Request &req, Response &res) { - if (req.redirect_count == 0) { - error_ = Error::ExceedRedirectCount; +inline bool ClientImpl::redirect(Request &req, Response &res, Error &error) { + if (req.redirect_count_ == 0) { + error = Error::ExceedRedirectCount; return false; } - auto location = detail::decode_url(res.get_header_value("location"), true); + auto location = res.get_header_value("location"); if (location.empty()) { return false; } const static std::regex re( - R"(^(?:(https?):)?(?://([^:/?#]*)(?::(\d+))?)?([^?#]*(?:\?[^#]*)?)(?:#.*)?)"); + R"((?:(https?):)?(?://(?:\[([\d:]+)\]|([^:/?#]+))(?::(\d+))?)?([^?#]*)(\?[^#]*)?(?:#.*)?)"); std::smatch m; if (!std::regex_match(location, m, re)) { return false; } @@ -4805,8 +7143,10 @@ inline bool ClientImpl::redirect(const Request &req, Response &res) { auto next_scheme = m[1].str(); auto next_host = m[2].str(); - auto port_str = m[3].str(); - auto next_path = m[4].str(); + if (next_host.empty()) { next_host = m[3].str(); } + auto port_str = m[4].str(); + auto next_path = m[5].str(); + auto next_query = m[6].str(); auto next_port = port_; if (!port_str.empty()) { @@ -4819,179 +7159,200 @@ inline bool ClientImpl::redirect(const Request &req, Response &res) { if (next_host.empty()) { next_host = host_; } if (next_path.empty()) { next_path = "/"; } + auto path = detail::decode_url(next_path, true) + next_query; + if (next_scheme == scheme && next_host == host_ && next_port == port_) { - return detail::redirect(*this, req, res, next_path); + return detail::redirect(*this, req, res, path, location, error); } else { if (next_scheme == "https") { #ifdef CPPHTTPLIB_OPENSSL_SUPPORT - SSLClient cli(next_host.c_str(), next_port); + SSLClient cli(next_host, next_port); cli.copy_settings(*this); - auto ret = detail::redirect(cli, req, res, next_path); - if (!ret) { error_ = cli.get_last_error(); } - return ret; + if (ca_cert_store_) { cli.set_ca_cert_store(ca_cert_store_); } + return detail::redirect(cli, req, res, path, location, error); #else return false; #endif } else { - ClientImpl cli(next_host.c_str(), next_port); + ClientImpl cli(next_host, next_port); cli.copy_settings(*this); - auto ret = detail::redirect(cli, req, res, next_path); - if (!ret) { error_ = cli.get_last_error(); } - return ret; + return detail::redirect(cli, req, res, path, location, error); } } } -inline bool ClientImpl::write_request(Stream &strm, const Request &req, - bool close_connection) { - detail::BufferStream bstrm; +inline bool ClientImpl::write_content_with_provider(Stream &strm, + const Request &req, + Error &error) const { + auto is_shutting_down = []() { return false; }; - // Request line - const auto &path = detail::encode_url(req.path); + if (req.is_chunked_content_provider_) { + // TODO: Brotli support + std::unique_ptr compressor; +#ifdef CPPHTTPLIB_ZLIB_SUPPORT + if (compress_) { + compressor = detail::make_unique(); + } else +#endif + { + compressor = detail::make_unique(); + } - bstrm.write_format("%s %s HTTP/1.1\r\n", req.method.c_str(), path.c_str()); + return detail::write_content_chunked(strm, req.content_provider_, + is_shutting_down, *compressor, error); + } else { + return detail::write_content(strm, req.content_provider_, 0, + req.content_length_, is_shutting_down, error); + } +} - // Additonal headers - Headers headers; - if (close_connection) { headers.emplace("Connection", "close"); } +inline bool ClientImpl::write_request(Stream &strm, Request &req, + bool close_connection, Error &error) { + // Prepare additional headers + if (close_connection) { + if (!req.has_header("Connection")) { + req.set_header("Connection", "close"); + } + } if (!req.has_header("Host")) { if (is_ssl()) { if (port_ == 443) { - headers.emplace("Host", host_); + req.set_header("Host", host_); } else { - headers.emplace("Host", host_and_port_); + req.set_header("Host", host_and_port_); } } else { if (port_ == 80) { - headers.emplace("Host", host_); + req.set_header("Host", host_); } else { - headers.emplace("Host", host_and_port_); + req.set_header("Host", host_and_port_); } } } - if (!req.has_header("Accept")) { headers.emplace("Accept", "*/*"); } + if (!req.has_header("Accept")) { req.set_header("Accept", "*/*"); } +#ifndef CPPHTTPLIB_NO_DEFAULT_USER_AGENT if (!req.has_header("User-Agent")) { - headers.emplace("User-Agent", "cpp-httplib/0.7"); + auto agent = std::string("cpp-httplib/") + CPPHTTPLIB_VERSION; + req.set_header("User-Agent", agent); } +#endif if (req.body.empty()) { - if (req.content_provider) { - auto length = std::to_string(req.content_length); - headers.emplace("Content-Length", length); + if (req.content_provider_) { + if (!req.is_chunked_content_provider_) { + if (!req.has_header("Content-Length")) { + auto length = std::to_string(req.content_length_); + req.set_header("Content-Length", length); + } + } } else { - headers.emplace("Content-Length", "0"); + if (req.method == "POST" || req.method == "PUT" || + req.method == "PATCH") { + req.set_header("Content-Length", "0"); + } } } else { if (!req.has_header("Content-Type")) { - headers.emplace("Content-Type", "text/plain"); + req.set_header("Content-Type", "text/plain"); } if (!req.has_header("Content-Length")) { auto length = std::to_string(req.body.size()); - headers.emplace("Content-Length", length); + req.set_header("Content-Length", length); } } - if (!basic_auth_password_.empty()) { - headers.insert(make_basic_authentication_header( - basic_auth_username_, basic_auth_password_, false)); + if (!basic_auth_password_.empty() || !basic_auth_username_.empty()) { + if (!req.has_header("Authorization")) { + req.headers.insert(make_basic_authentication_header( + basic_auth_username_, basic_auth_password_, false)); + } } if (!proxy_basic_auth_username_.empty() && !proxy_basic_auth_password_.empty()) { - headers.insert(make_basic_authentication_header( - proxy_basic_auth_username_, proxy_basic_auth_password_, true)); + if (!req.has_header("Proxy-Authorization")) { + req.headers.insert(make_basic_authentication_header( + proxy_basic_auth_username_, proxy_basic_auth_password_, true)); + } } if (!bearer_token_auth_token_.empty()) { - headers.insert(make_bearer_token_authentication_header( - bearer_token_auth_token_, false)); + if (!req.has_header("Authorization")) { + req.headers.insert(make_bearer_token_authentication_header( + bearer_token_auth_token_, false)); + } } if (!proxy_bearer_token_auth_token_.empty()) { - headers.insert(make_bearer_token_authentication_header( - proxy_bearer_token_auth_token_, true)); + if (!req.has_header("Proxy-Authorization")) { + req.headers.insert(make_bearer_token_authentication_header( + proxy_bearer_token_auth_token_, true)); + } } - detail::write_headers(bstrm, req, headers); + // Request line and headers + { + detail::BufferStream bstrm; - // Flush buffer - auto &data = bstrm.get_buffer(); - if (!detail::write_data(strm, data.data(), data.size())) { - error_ = Error::Write; - return false; + const auto &path = url_encode_ ? detail::encode_url(req.path) : req.path; + bstrm.write_format("%s %s HTTP/1.1\r\n", req.method.c_str(), path.c_str()); + + header_writer_(bstrm, req.headers); + + // Flush buffer + auto &data = bstrm.get_buffer(); + if (!detail::write_data(strm, data.data(), data.size())) { + error = Error::Write; + return false; + } } // Body if (req.body.empty()) { - if (req.content_provider) { - size_t offset = 0; - size_t end_offset = req.content_length; + return write_content_with_provider(strm, req, error); + } - bool ok = true; - - DataSink data_sink; - data_sink.write = [&](const char *d, size_t l) { - if (ok) { - if (detail::write_data(strm, d, l)) { - offset += l; - } else { - ok = false; - } - } - }; - data_sink.is_writable = [&](void) { return ok && strm.is_writable(); }; - - while (offset < end_offset) { - if (!req.content_provider(offset, end_offset - offset, data_sink)) { - error_ = Error::Canceled; - return false; - } - if (!ok) { - error_ = Error::Write; - return false; - } - } - } - } else { - return detail::write_data(strm, req.body.data(), req.body.size()); + if (!detail::write_data(strm, req.body.data(), req.body.size())) { + error = Error::Write; + return false; } return true; } -inline std::shared_ptr ClientImpl::send_with_content_provider( - const char *method, const char *path, const Headers &headers, - const std::string &body, size_t content_length, - ContentProvider content_provider, const char *content_type) { - - Request req; - req.method = method; - req.headers = default_headers_; - req.headers.insert(headers.begin(), headers.end()); - req.path = path; - - if (content_type) { req.headers.emplace("Content-Type", content_type); } +inline std::unique_ptr ClientImpl::send_with_content_provider( + Request &req, const char *body, size_t content_length, + ContentProvider content_provider, + ContentProviderWithoutLength content_provider_without_length, + const std::string &content_type, Error &error) { + if (!content_type.empty()) { req.set_header("Content-Type", content_type); } #ifdef CPPHTTPLIB_ZLIB_SUPPORT - if (compress_) { + if (compress_) { req.set_header("Content-Encoding", "gzip"); } +#endif + +#ifdef CPPHTTPLIB_ZLIB_SUPPORT + if (compress_ && !content_provider_without_length) { + // TODO: Brotli support detail::gzip_compressor compressor; if (content_provider) { auto ok = true; size_t offset = 0; - DataSink data_sink; - data_sink.write = [&](const char *data, size_t data_len) { + + data_sink.write = [&](const char *data, size_t data_len) -> bool { if (ok) { auto last = offset + data_len == content_length; auto ret = compressor.compress( - data, data_len, last, [&](const char *data, size_t data_len) { - req.body.append(data, data_len); + data, data_len, last, + [&](const char *compressed_data, size_t compressed_data_len) { + req.body.append(compressed_data, compressed_data_len); return true; }); @@ -5001,429 +7362,712 @@ inline std::shared_ptr ClientImpl::send_with_content_provider( ok = false; } } + return ok; }; - data_sink.is_writable = [&](void) { return ok && true; }; while (ok && offset < content_length) { if (!content_provider(offset, content_length - offset, data_sink)) { - error_ = Error::Canceled; + error = Error::Canceled; return nullptr; } } } else { - if (!compressor.compress(body.data(), body.size(), true, + if (!compressor.compress(body, content_length, true, [&](const char *data, size_t data_len) { req.body.append(data, data_len); return true; })) { + error = Error::Compression; return nullptr; } } - - req.headers.emplace("Content-Encoding", "gzip"); } else #endif { if (content_provider) { - req.content_length = content_length; - req.content_provider = content_provider; + req.content_length_ = content_length; + req.content_provider_ = std::move(content_provider); + req.is_chunked_content_provider_ = false; + } else if (content_provider_without_length) { + req.content_length_ = 0; + req.content_provider_ = detail::ContentProviderAdapter( + std::move(content_provider_without_length)); + req.is_chunked_content_provider_ = true; + req.set_header("Transfer-Encoding", "chunked"); } else { - req.body = body; + req.body.assign(body, content_length); } } - auto res = std::make_shared(); - - return send(req, *res) ? res : nullptr; + auto res = detail::make_unique(); + return send(req, *res, error) ? std::move(res) : nullptr; } -inline bool ClientImpl::process_request(Stream &strm, const Request &req, - Response &res, bool close_connection) { +inline Result ClientImpl::send_with_content_provider( + const std::string &method, const std::string &path, const Headers &headers, + const char *body, size_t content_length, ContentProvider content_provider, + ContentProviderWithoutLength content_provider_without_length, + const std::string &content_type) { + Request req; + req.method = method; + req.headers = headers; + req.path = path; + + auto error = Error::Success; + + auto res = send_with_content_provider( + req, body, content_length, std::move(content_provider), + std::move(content_provider_without_length), content_type, error); + + return Result{std::move(res), error, std::move(req.headers)}; +} + +inline std::string +ClientImpl::adjust_host_string(const std::string &host) const { + if (host.find(':') != std::string::npos) { return "[" + host + "]"; } + return host; +} + +inline bool ClientImpl::process_request(Stream &strm, Request &req, + Response &res, bool close_connection, + Error &error) { // Send request - if (!write_request(strm, req, close_connection)) { return false; } + if (!write_request(strm, req, close_connection, error)) { return false; } + +#ifdef CPPHTTPLIB_OPENSSL_SUPPORT + if (is_ssl()) { + auto is_proxy_enabled = !proxy_host_.empty() && proxy_port_ != -1; + if (!is_proxy_enabled) { + char buf[1]; + if (SSL_peek(socket_.ssl, buf, 1) == 0 && + SSL_get_error(socket_.ssl, 0) == SSL_ERROR_ZERO_RETURN) { + error = Error::SSLPeerCouldBeClosed_; + return false; + } + } + } +#endif // Receive response and headers - if (!read_response_line(strm, res) || + if (!read_response_line(strm, req, res) || !detail::read_headers(strm, res.headers)) { - error_ = Error::Read; + error = Error::Read; return false; } - if (req.response_handler) { - if (!req.response_handler(res)) { - error_ = Error::Canceled; - return false; - } - } - // Body - if (req.method != "HEAD" && req.method != "CONNECT") { + if ((res.status != StatusCode::NoContent_204) && req.method != "HEAD" && + req.method != "CONNECT") { + auto redirect = 300 < res.status && res.status < 400 && follow_location_; + + if (req.response_handler && !redirect) { + if (!req.response_handler(res)) { + error = Error::Canceled; + return false; + } + } + auto out = req.content_receiver - ? static_cast([&](const char *buf, size_t n) { - auto ret = req.content_receiver(buf, n); - if (!ret) { error_ = Error::Canceled; } - return ret; - }) - : static_cast([&](const char *buf, size_t n) { - if (res.body.size() + n > res.body.max_size()) { return false; } - res.body.append(buf, n); - return true; - }); + ? static_cast( + [&](const char *buf, size_t n, uint64_t off, uint64_t len) { + if (redirect) { return true; } + auto ret = req.content_receiver(buf, n, off, len); + if (!ret) { error = Error::Canceled; } + return ret; + }) + : static_cast( + [&](const char *buf, size_t n, uint64_t /*off*/, + uint64_t /*len*/) { + if (res.body.size() + n > res.body.max_size()) { + return false; + } + res.body.append(buf, n); + return true; + }); auto progress = [&](uint64_t current, uint64_t total) { - if (!req.progress) { return true; } + if (!req.progress || redirect) { return true; } auto ret = req.progress(current, total); - if (!ret) { error_ = Error::Canceled; } + if (!ret) { error = Error::Canceled; } return ret; }; int dummy_status; if (!detail::read_content(strm, res, (std::numeric_limits::max)(), - dummy_status, progress, out, decompress_)) { - if (error_ != Error::Canceled) { error_ = Error::Read; } + dummy_status, std::move(progress), std::move(out), + decompress_)) { + if (error != Error::Canceled) { error = Error::Read; } return false; } } - if (res.get_header_value("Connection") == "close" || - (res.version == "HTTP/1.0" && res.reason != "Connection established")) { - stop_core(); - } - // Log if (logger_) { logger_(req, res); } return true; } +inline ContentProviderWithoutLength ClientImpl::get_multipart_content_provider( + const std::string &boundary, const MultipartFormDataItems &items, + const MultipartFormDataProviderItems &provider_items) const { + size_t cur_item = 0; + size_t cur_start = 0; + // cur_item and cur_start are copied to within the std::function and maintain + // state between successive calls + return [&, cur_item, cur_start](size_t offset, + DataSink &sink) mutable -> bool { + if (!offset && !items.empty()) { + sink.os << detail::serialize_multipart_formdata(items, boundary, false); + return true; + } else if (cur_item < provider_items.size()) { + if (!cur_start) { + const auto &begin = detail::serialize_multipart_formdata_item_begin( + provider_items[cur_item], boundary); + offset += begin.size(); + cur_start = offset; + sink.os << begin; + } + + DataSink cur_sink; + auto has_data = true; + cur_sink.write = sink.write; + cur_sink.done = [&]() { has_data = false; }; + + if (!provider_items[cur_item].provider(offset - cur_start, cur_sink)) { + return false; + } + + if (!has_data) { + sink.os << detail::serialize_multipart_formdata_item_end(); + cur_item++; + cur_start = 0; + } + return true; + } else { + sink.os << detail::serialize_multipart_formdata_finish(boundary); + sink.done(); + return true; + } + }; +} + inline bool -ClientImpl::process_socket(Socket &socket, +ClientImpl::process_socket(const Socket &socket, std::function callback) { - return detail::process_client_socket(socket.sock, read_timeout_sec_, - read_timeout_usec_, write_timeout_sec_, - write_timeout_usec_, callback); + return detail::process_client_socket( + socket.sock, read_timeout_sec_, read_timeout_usec_, write_timeout_sec_, + write_timeout_usec_, std::move(callback)); } inline bool ClientImpl::is_ssl() const { return false; } -inline Result ClientImpl::Get(const char *path) { +inline Result ClientImpl::Get(const std::string &path) { return Get(path, Headers(), Progress()); } -inline Result ClientImpl::Get(const char *path, Progress progress) { +inline Result ClientImpl::Get(const std::string &path, Progress progress) { return Get(path, Headers(), std::move(progress)); } -inline Result ClientImpl::Get(const char *path, const Headers &headers) { +inline Result ClientImpl::Get(const std::string &path, const Headers &headers) { return Get(path, headers, Progress()); } -inline Result ClientImpl::Get(const char *path, const Headers &headers, +inline Result ClientImpl::Get(const std::string &path, const Headers &headers, Progress progress) { Request req; req.method = "GET"; req.path = path; - req.headers = default_headers_; - req.headers.insert(headers.begin(), headers.end()); + req.headers = headers; req.progress = std::move(progress); - auto res = std::make_shared(); - auto ret = send(req, *res); - return Result{ret ? res : nullptr, get_last_error()}; + return send_(std::move(req)); } -inline Result ClientImpl::Get(const char *path, +inline Result ClientImpl::Get(const std::string &path, ContentReceiver content_receiver) { return Get(path, Headers(), nullptr, std::move(content_receiver), nullptr); } -inline Result ClientImpl::Get(const char *path, +inline Result ClientImpl::Get(const std::string &path, ContentReceiver content_receiver, Progress progress) { return Get(path, Headers(), nullptr, std::move(content_receiver), std::move(progress)); } -inline Result ClientImpl::Get(const char *path, const Headers &headers, +inline Result ClientImpl::Get(const std::string &path, const Headers &headers, ContentReceiver content_receiver) { return Get(path, headers, nullptr, std::move(content_receiver), nullptr); } -inline Result ClientImpl::Get(const char *path, const Headers &headers, +inline Result ClientImpl::Get(const std::string &path, const Headers &headers, ContentReceiver content_receiver, Progress progress) { return Get(path, headers, nullptr, std::move(content_receiver), std::move(progress)); } -inline Result ClientImpl::Get(const char *path, +inline Result ClientImpl::Get(const std::string &path, ResponseHandler response_handler, ContentReceiver content_receiver) { - return Get(path, Headers(), std::move(response_handler), content_receiver, - nullptr); + return Get(path, Headers(), std::move(response_handler), + std::move(content_receiver), nullptr); } -inline Result ClientImpl::Get(const char *path, const Headers &headers, +inline Result ClientImpl::Get(const std::string &path, const Headers &headers, ResponseHandler response_handler, ContentReceiver content_receiver) { - return Get(path, headers, std::move(response_handler), content_receiver, - nullptr); + return Get(path, headers, std::move(response_handler), + std::move(content_receiver), nullptr); } -inline Result ClientImpl::Get(const char *path, +inline Result ClientImpl::Get(const std::string &path, ResponseHandler response_handler, ContentReceiver content_receiver, Progress progress) { - return Get(path, Headers(), std::move(response_handler), content_receiver, - progress); + return Get(path, Headers(), std::move(response_handler), + std::move(content_receiver), std::move(progress)); } -inline Result ClientImpl::Get(const char *path, const Headers &headers, +inline Result ClientImpl::Get(const std::string &path, const Headers &headers, ResponseHandler response_handler, ContentReceiver content_receiver, Progress progress) { Request req; req.method = "GET"; req.path = path; - req.headers = default_headers_; - req.headers.insert(headers.begin(), headers.end()); + req.headers = headers; req.response_handler = std::move(response_handler); - req.content_receiver = std::move(content_receiver); + req.content_receiver = + [content_receiver](const char *data, size_t data_length, + uint64_t /*offset*/, uint64_t /*total_length*/) { + return content_receiver(data, data_length); + }; req.progress = std::move(progress); - auto res = std::make_shared(); - auto ret = send(req, *res); - return Result{ret ? res : nullptr, get_last_error()}; + return send_(std::move(req)); } -inline Result ClientImpl::Head(const char *path) { +inline Result ClientImpl::Get(const std::string &path, const Params ¶ms, + const Headers &headers, Progress progress) { + if (params.empty()) { return Get(path, headers); } + + std::string path_with_query = append_query_params(path, params); + return Get(path_with_query, headers, std::move(progress)); +} + +inline Result ClientImpl::Get(const std::string &path, const Params ¶ms, + const Headers &headers, + ContentReceiver content_receiver, + Progress progress) { + return Get(path, params, headers, nullptr, std::move(content_receiver), + std::move(progress)); +} + +inline Result ClientImpl::Get(const std::string &path, const Params ¶ms, + const Headers &headers, + ResponseHandler response_handler, + ContentReceiver content_receiver, + Progress progress) { + if (params.empty()) { + return Get(path, headers, std::move(response_handler), + std::move(content_receiver), std::move(progress)); + } + + std::string path_with_query = append_query_params(path, params); + return Get(path_with_query, headers, std::move(response_handler), + std::move(content_receiver), std::move(progress)); +} + +inline Result ClientImpl::Head(const std::string &path) { return Head(path, Headers()); } -inline Result ClientImpl::Head(const char *path, const Headers &headers) { +inline Result ClientImpl::Head(const std::string &path, + const Headers &headers) { Request req; req.method = "HEAD"; - req.headers = default_headers_; - req.headers.insert(headers.begin(), headers.end()); + req.headers = headers; req.path = path; - auto res = std::make_shared(); - auto ret = send(req, *res); - return Result{ret ? res : nullptr, get_last_error()}; + return send_(std::move(req)); } -inline Result ClientImpl::Post(const char *path) { - return Post(path, std::string(), nullptr); +inline Result ClientImpl::Post(const std::string &path) { + return Post(path, std::string(), std::string()); } -inline Result ClientImpl::Post(const char *path, const std::string &body, - const char *content_type) { +inline Result ClientImpl::Post(const std::string &path, + const Headers &headers) { + return Post(path, headers, nullptr, 0, std::string()); +} + +inline Result ClientImpl::Post(const std::string &path, const char *body, + size_t content_length, + const std::string &content_type) { + return Post(path, Headers(), body, content_length, content_type); +} + +inline Result ClientImpl::Post(const std::string &path, const Headers &headers, + const char *body, size_t content_length, + const std::string &content_type) { + return send_with_content_provider("POST", path, headers, body, content_length, + nullptr, nullptr, content_type); +} + +inline Result ClientImpl::Post(const std::string &path, const std::string &body, + const std::string &content_type) { return Post(path, Headers(), body, content_type); } -inline Result ClientImpl::Post(const char *path, const Headers &headers, +inline Result ClientImpl::Post(const std::string &path, const Headers &headers, const std::string &body, - const char *content_type) { - auto ret = send_with_content_provider("POST", path, headers, body, 0, nullptr, - content_type); - return Result{ret, get_last_error()}; + const std::string &content_type) { + return send_with_content_provider("POST", path, headers, body.data(), + body.size(), nullptr, nullptr, + content_type); } -inline Result ClientImpl::Post(const char *path, const Params ¶ms) { +inline Result ClientImpl::Post(const std::string &path, const Params ¶ms) { return Post(path, Headers(), params); } -inline Result ClientImpl::Post(const char *path, size_t content_length, +inline Result ClientImpl::Post(const std::string &path, size_t content_length, ContentProvider content_provider, - const char *content_type) { - return Post(path, Headers(), content_length, content_provider, content_type); + const std::string &content_type) { + return Post(path, Headers(), content_length, std::move(content_provider), + content_type); } -inline Result ClientImpl::Post(const char *path, const Headers &headers, +inline Result ClientImpl::Post(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return Post(path, Headers(), std::move(content_provider), content_type); +} + +inline Result ClientImpl::Post(const std::string &path, const Headers &headers, size_t content_length, ContentProvider content_provider, - const char *content_type) { - auto ret = send_with_content_provider("POST", path, headers, std::string(), - content_length, content_provider, - content_type); - return Result{ret, get_last_error()}; + const std::string &content_type) { + return send_with_content_provider("POST", path, headers, nullptr, + content_length, std::move(content_provider), + nullptr, content_type); } -inline Result ClientImpl::Post(const char *path, const Headers &headers, +inline Result ClientImpl::Post(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return send_with_content_provider("POST", path, headers, nullptr, 0, nullptr, + std::move(content_provider), content_type); +} + +inline Result ClientImpl::Post(const std::string &path, const Headers &headers, const Params ¶ms) { auto query = detail::params_to_query_str(params); return Post(path, headers, query, "application/x-www-form-urlencoded"); } -inline Result ClientImpl::Post(const char *path, +inline Result ClientImpl::Post(const std::string &path, const MultipartFormDataItems &items) { return Post(path, Headers(), items); } -inline Result ClientImpl::Post(const char *path, const Headers &headers, +inline Result ClientImpl::Post(const std::string &path, const Headers &headers, const MultipartFormDataItems &items) { - auto boundary = detail::make_multipart_data_boundary(); + const auto &boundary = detail::make_multipart_data_boundary(); + const auto &content_type = + detail::serialize_multipart_formdata_get_content_type(boundary); + const auto &body = detail::serialize_multipart_formdata(items, boundary); + return Post(path, headers, body, content_type); +} - std::string body; - - for (const auto &item : items) { - body += "--" + boundary + "\r\n"; - body += "Content-Disposition: form-data; name=\"" + item.name + "\""; - if (!item.filename.empty()) { - body += "; filename=\"" + item.filename + "\""; - } - body += "\r\n"; - if (!item.content_type.empty()) { - body += "Content-Type: " + item.content_type + "\r\n"; - } - body += "\r\n"; - body += item.content + "\r\n"; +inline Result ClientImpl::Post(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const std::string &boundary) { + if (!detail::is_multipart_boundary_chars_valid(boundary)) { + return Result{nullptr, Error::UnsupportedMultipartBoundaryChars}; } - body += "--" + boundary + "--\r\n"; - - std::string content_type = "multipart/form-data; boundary=" + boundary; - return Post(path, headers, body, content_type.c_str()); + const auto &content_type = + detail::serialize_multipart_formdata_get_content_type(boundary); + const auto &body = detail::serialize_multipart_formdata(items, boundary); + return Post(path, headers, body, content_type); } -inline Result ClientImpl::Put(const char *path) { - return Put(path, std::string(), nullptr); +inline Result +ClientImpl::Post(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const MultipartFormDataProviderItems &provider_items) { + const auto &boundary = detail::make_multipart_data_boundary(); + const auto &content_type = + detail::serialize_multipart_formdata_get_content_type(boundary); + return send_with_content_provider( + "POST", path, headers, nullptr, 0, nullptr, + get_multipart_content_provider(boundary, items, provider_items), + content_type); } -inline Result ClientImpl::Put(const char *path, const std::string &body, - const char *content_type) { +inline Result ClientImpl::Put(const std::string &path) { + return Put(path, std::string(), std::string()); +} + +inline Result ClientImpl::Put(const std::string &path, const char *body, + size_t content_length, + const std::string &content_type) { + return Put(path, Headers(), body, content_length, content_type); +} + +inline Result ClientImpl::Put(const std::string &path, const Headers &headers, + const char *body, size_t content_length, + const std::string &content_type) { + return send_with_content_provider("PUT", path, headers, body, content_length, + nullptr, nullptr, content_type); +} + +inline Result ClientImpl::Put(const std::string &path, const std::string &body, + const std::string &content_type) { return Put(path, Headers(), body, content_type); } -inline Result ClientImpl::Put(const char *path, const Headers &headers, +inline Result ClientImpl::Put(const std::string &path, const Headers &headers, const std::string &body, - const char *content_type) { - auto ret = send_with_content_provider("PUT", path, headers, body, 0, nullptr, - content_type); - return Result{ret, get_last_error()}; + const std::string &content_type) { + return send_with_content_provider("PUT", path, headers, body.data(), + body.size(), nullptr, nullptr, + content_type); } -inline Result ClientImpl::Put(const char *path, size_t content_length, +inline Result ClientImpl::Put(const std::string &path, size_t content_length, ContentProvider content_provider, - const char *content_type) { - return Put(path, Headers(), content_length, content_provider, content_type); + const std::string &content_type) { + return Put(path, Headers(), content_length, std::move(content_provider), + content_type); } -inline Result ClientImpl::Put(const char *path, const Headers &headers, +inline Result ClientImpl::Put(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return Put(path, Headers(), std::move(content_provider), content_type); +} + +inline Result ClientImpl::Put(const std::string &path, const Headers &headers, size_t content_length, ContentProvider content_provider, - const char *content_type) { - auto ret = send_with_content_provider("PUT", path, headers, std::string(), - content_length, content_provider, - content_type); - return Result{ret, get_last_error()}; + const std::string &content_type) { + return send_with_content_provider("PUT", path, headers, nullptr, + content_length, std::move(content_provider), + nullptr, content_type); } -inline Result ClientImpl::Put(const char *path, const Params ¶ms) { +inline Result ClientImpl::Put(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return send_with_content_provider("PUT", path, headers, nullptr, 0, nullptr, + std::move(content_provider), content_type); +} + +inline Result ClientImpl::Put(const std::string &path, const Params ¶ms) { return Put(path, Headers(), params); } -inline Result ClientImpl::Put(const char *path, const Headers &headers, +inline Result ClientImpl::Put(const std::string &path, const Headers &headers, const Params ¶ms) { auto query = detail::params_to_query_str(params); return Put(path, headers, query, "application/x-www-form-urlencoded"); } -inline Result ClientImpl::Patch(const char *path, const std::string &body, - const char *content_type) { +inline Result ClientImpl::Put(const std::string &path, + const MultipartFormDataItems &items) { + return Put(path, Headers(), items); +} + +inline Result ClientImpl::Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items) { + const auto &boundary = detail::make_multipart_data_boundary(); + const auto &content_type = + detail::serialize_multipart_formdata_get_content_type(boundary); + const auto &body = detail::serialize_multipart_formdata(items, boundary); + return Put(path, headers, body, content_type); +} + +inline Result ClientImpl::Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const std::string &boundary) { + if (!detail::is_multipart_boundary_chars_valid(boundary)) { + return Result{nullptr, Error::UnsupportedMultipartBoundaryChars}; + } + + const auto &content_type = + detail::serialize_multipart_formdata_get_content_type(boundary); + const auto &body = detail::serialize_multipart_formdata(items, boundary); + return Put(path, headers, body, content_type); +} + +inline Result +ClientImpl::Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const MultipartFormDataProviderItems &provider_items) { + const auto &boundary = detail::make_multipart_data_boundary(); + const auto &content_type = + detail::serialize_multipart_formdata_get_content_type(boundary); + return send_with_content_provider( + "PUT", path, headers, nullptr, 0, nullptr, + get_multipart_content_provider(boundary, items, provider_items), + content_type); +} +inline Result ClientImpl::Patch(const std::string &path) { + return Patch(path, std::string(), std::string()); +} + +inline Result ClientImpl::Patch(const std::string &path, const char *body, + size_t content_length, + const std::string &content_type) { + return Patch(path, Headers(), body, content_length, content_type); +} + +inline Result ClientImpl::Patch(const std::string &path, const Headers &headers, + const char *body, size_t content_length, + const std::string &content_type) { + return send_with_content_provider("PATCH", path, headers, body, + content_length, nullptr, nullptr, + content_type); +} + +inline Result ClientImpl::Patch(const std::string &path, + const std::string &body, + const std::string &content_type) { return Patch(path, Headers(), body, content_type); } -inline Result ClientImpl::Patch(const char *path, const Headers &headers, +inline Result ClientImpl::Patch(const std::string &path, const Headers &headers, const std::string &body, - const char *content_type) { - auto ret = send_with_content_provider("PATCH", path, headers, body, 0, - nullptr, content_type); - return Result{ret, get_last_error()}; + const std::string &content_type) { + return send_with_content_provider("PATCH", path, headers, body.data(), + body.size(), nullptr, nullptr, + content_type); } -inline Result ClientImpl::Patch(const char *path, size_t content_length, +inline Result ClientImpl::Patch(const std::string &path, size_t content_length, ContentProvider content_provider, - const char *content_type) { - return Patch(path, Headers(), content_length, content_provider, content_type); + const std::string &content_type) { + return Patch(path, Headers(), content_length, std::move(content_provider), + content_type); } -inline Result ClientImpl::Patch(const char *path, const Headers &headers, +inline Result ClientImpl::Patch(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return Patch(path, Headers(), std::move(content_provider), content_type); +} + +inline Result ClientImpl::Patch(const std::string &path, const Headers &headers, size_t content_length, ContentProvider content_provider, - const char *content_type) { - auto ret = send_with_content_provider("PATCH", path, headers, std::string(), - content_length, content_provider, - content_type); - return Result{ret, get_last_error()}; + const std::string &content_type) { + return send_with_content_provider("PATCH", path, headers, nullptr, + content_length, std::move(content_provider), + nullptr, content_type); } -inline Result ClientImpl::Delete(const char *path) { - return Delete(path, Headers(), std::string(), nullptr); +inline Result ClientImpl::Patch(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return send_with_content_provider("PATCH", path, headers, nullptr, 0, nullptr, + std::move(content_provider), content_type); } -inline Result ClientImpl::Delete(const char *path, const std::string &body, - const char *content_type) { - return Delete(path, Headers(), body, content_type); +inline Result ClientImpl::Delete(const std::string &path) { + return Delete(path, Headers(), std::string(), std::string()); } -inline Result ClientImpl::Delete(const char *path, const Headers &headers) { - return Delete(path, headers, std::string(), nullptr); +inline Result ClientImpl::Delete(const std::string &path, + const Headers &headers) { + return Delete(path, headers, std::string(), std::string()); } -inline Result ClientImpl::Delete(const char *path, const Headers &headers, - const std::string &body, - const char *content_type) { +inline Result ClientImpl::Delete(const std::string &path, const char *body, + size_t content_length, + const std::string &content_type) { + return Delete(path, Headers(), body, content_length, content_type); +} + +inline Result ClientImpl::Delete(const std::string &path, + const Headers &headers, const char *body, + size_t content_length, + const std::string &content_type) { Request req; req.method = "DELETE"; - req.headers = default_headers_; - req.headers.insert(headers.begin(), headers.end()); + req.headers = headers; req.path = path; - if (content_type) { req.headers.emplace("Content-Type", content_type); } - req.body = body; + if (!content_type.empty()) { req.set_header("Content-Type", content_type); } + req.body.assign(body, content_length); - auto res = std::make_shared(); - auto ret = send(req, *res); - return Result{ret ? res : nullptr, get_last_error()}; + return send_(std::move(req)); } -inline Result ClientImpl::Options(const char *path) { +inline Result ClientImpl::Delete(const std::string &path, + const std::string &body, + const std::string &content_type) { + return Delete(path, Headers(), body.data(), body.size(), content_type); +} + +inline Result ClientImpl::Delete(const std::string &path, + const Headers &headers, + const std::string &body, + const std::string &content_type) { + return Delete(path, headers, body.data(), body.size(), content_type); +} + +inline Result ClientImpl::Options(const std::string &path) { return Options(path, Headers()); } -inline Result ClientImpl::Options(const char *path, const Headers &headers) { +inline Result ClientImpl::Options(const std::string &path, + const Headers &headers) { Request req; req.method = "OPTIONS"; - req.headers = default_headers_; - req.headers.insert(headers.begin(), headers.end()); + req.headers = headers; req.path = path; - auto res = std::make_shared(); - auto ret = send(req, *res); - return Result{ret ? res : nullptr, get_last_error()}; + return send_(std::move(req)); } +inline void ClientImpl::stop() { + std::lock_guard guard(socket_mutex_); + + // If there is anything ongoing right now, the ONLY thread-safe thing we can + // do is to shutdown_socket, so that threads using this socket suddenly + // discover they can't read/write any more and error out. Everything else + // (closing the socket, shutting ssl down) is unsafe because these actions are + // not thread-safe. + if (socket_requests_in_flight_ > 0) { + shutdown_socket(socket_); + + // Aside from that, we set a flag for the socket to be closed when we're + // done. + socket_should_be_closed_when_request_is_done_ = true; + return; + } + + // Otherwise, still holding the mutex, we can shut everything down ourselves + shutdown_ssl(socket_, true); + shutdown_socket(socket_); + close_socket(socket_); +} + +inline std::string ClientImpl::host() const { return host_; } + +inline int ClientImpl::port() const { return port_; } + inline size_t ClientImpl::is_socket_open() const { std::lock_guard guard(socket_mutex_); return socket_.is_open(); } -inline void ClientImpl::stop() { - stop_core(); - error_ = Error::Canceled; -} - -inline void ClientImpl::stop_core() { - std::lock_guard guard(socket_mutex_); - if (socket_.is_open()) { - detail::shutdown_socket(socket_.sock); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - close_socket(socket_, true); - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - } -} +inline socket_t ClientImpl::socket() const { return socket_.sock; } inline void ClientImpl::set_connection_timeout(time_t sec, time_t usec) { connection_timeout_sec_ = sec; @@ -5440,19 +8084,19 @@ inline void ClientImpl::set_write_timeout(time_t sec, time_t usec) { write_timeout_usec_ = usec; } -inline void ClientImpl::set_basic_auth(const char *username, - const char *password) { +inline void ClientImpl::set_basic_auth(const std::string &username, + const std::string &password) { basic_auth_username_ = username; basic_auth_password_ = password; } -inline void ClientImpl::set_bearer_token_auth(const char *token) { +inline void ClientImpl::set_bearer_token_auth(const std::string &token) { bearer_token_auth_token_ = token; } #ifdef CPPHTTPLIB_OPENSSL_SUPPORT -inline void ClientImpl::set_digest_auth(const char *username, - const char *password) { +inline void ClientImpl::set_digest_auth(const std::string &username, + const std::string &password) { digest_auth_username_ = username; digest_auth_password_ = password; } @@ -5462,46 +8106,101 @@ inline void ClientImpl::set_keep_alive(bool on) { keep_alive_ = on; } inline void ClientImpl::set_follow_location(bool on) { follow_location_ = on; } +inline void ClientImpl::set_url_encode(bool on) { url_encode_ = on; } + +inline void +ClientImpl::set_hostname_addr_map(std::map addr_map) { + addr_map_ = std::move(addr_map); +} + inline void ClientImpl::set_default_headers(Headers headers) { default_headers_ = std::move(headers); } +inline void ClientImpl::set_header_writer( + std::function const &writer) { + header_writer_ = writer; +} + +inline void ClientImpl::set_address_family(int family) { + address_family_ = family; +} + inline void ClientImpl::set_tcp_nodelay(bool on) { tcp_nodelay_ = on; } inline void ClientImpl::set_socket_options(SocketOptions socket_options) { - socket_options_ = socket_options; + socket_options_ = std::move(socket_options); } inline void ClientImpl::set_compress(bool on) { compress_ = on; } inline void ClientImpl::set_decompress(bool on) { decompress_ = on; } -inline void ClientImpl::set_interface(const char *intf) { interface_ = intf; } +inline void ClientImpl::set_interface(const std::string &intf) { + interface_ = intf; +} -inline void ClientImpl::set_proxy(const char *host, int port) { +inline void ClientImpl::set_proxy(const std::string &host, int port) { proxy_host_ = host; proxy_port_ = port; } -inline void ClientImpl::set_proxy_basic_auth(const char *username, - const char *password) { +inline void ClientImpl::set_proxy_basic_auth(const std::string &username, + const std::string &password) { proxy_basic_auth_username_ = username; proxy_basic_auth_password_ = password; } -inline void ClientImpl::set_proxy_bearer_token_auth(const char *token) { +inline void ClientImpl::set_proxy_bearer_token_auth(const std::string &token) { proxy_bearer_token_auth_token_ = token; } #ifdef CPPHTTPLIB_OPENSSL_SUPPORT -inline void ClientImpl::set_proxy_digest_auth(const char *username, - const char *password) { +inline void ClientImpl::set_proxy_digest_auth(const std::string &username, + const std::string &password) { proxy_digest_auth_username_ = username; proxy_digest_auth_password_ = password; } -#endif -#ifdef CPPHTTPLIB_OPENSSL_SUPPORT +inline void ClientImpl::set_ca_cert_path(const std::string &ca_cert_file_path, + const std::string &ca_cert_dir_path) { + ca_cert_file_path_ = ca_cert_file_path; + ca_cert_dir_path_ = ca_cert_dir_path; +} + +inline void ClientImpl::set_ca_cert_store(X509_STORE *ca_cert_store) { + if (ca_cert_store && ca_cert_store != ca_cert_store_) { + ca_cert_store_ = ca_cert_store; + } +} + +inline X509_STORE *ClientImpl::create_ca_cert_store(const char *ca_cert, + std::size_t size) const { + auto mem = BIO_new_mem_buf(ca_cert, static_cast(size)); + if (!mem) { return nullptr; } + + auto inf = PEM_X509_INFO_read_bio(mem, nullptr, nullptr, nullptr); + if (!inf) { + BIO_free_all(mem); + return nullptr; + } + + auto cts = X509_STORE_new(); + if (cts) { + for (auto i = 0; i < static_cast(sk_X509_INFO_num(inf)); i++) { + auto itmp = sk_X509_INFO_value(inf, i); + if (!itmp) { continue; } + + if (itmp->x509) { X509_STORE_add_cert(cts, itmp->x509); } + if (itmp->crl) { X509_STORE_add_crl(cts, itmp->crl); } + } + } + + sk_X509_INFO_pop_free(inf, X509_INFO_free); + BIO_free_all(mem); + return cts; +} + inline void ClientImpl::enable_server_certificate_verification(bool enabled) { server_certificate_verification_ = enabled; } @@ -5527,7 +8226,9 @@ inline SSL *ssl_new(socket_t sock, SSL_CTX *ctx, std::mutex &ctx_mutex, } if (ssl) { + set_nonblocking(sock, true); auto bio = BIO_new_socket(static_cast(sock), BIO_NOCLOSE); + BIO_set_nbio(bio, 1); SSL_set_bio(ssl, bio, bio); if (!setup(ssl) || SSL_connect_or_accept(ssl) != 1) { @@ -5536,32 +8237,58 @@ inline SSL *ssl_new(socket_t sock, SSL_CTX *ctx, std::mutex &ctx_mutex, std::lock_guard guard(ctx_mutex); SSL_free(ssl); } + set_nonblocking(sock, false); return nullptr; } + BIO_set_nbio(bio, 0); + set_nonblocking(sock, false); } return ssl; } inline void ssl_delete(std::mutex &ctx_mutex, SSL *ssl, - bool process_socket_ret) { - if (process_socket_ret) { - SSL_shutdown(ssl); // shutdown only if not already closed by remote - } + bool shutdown_gracefully) { + // sometimes we may want to skip this to try to avoid SIGPIPE if we know + // the remote has closed the network connection + // Note that it is not always possible to avoid SIGPIPE, this is merely a + // best-efforts. + if (shutdown_gracefully) { SSL_shutdown(ssl); } std::lock_guard guard(ctx_mutex); SSL_free(ssl); } +template +bool ssl_connect_or_accept_nonblocking(socket_t sock, SSL *ssl, + U ssl_connect_or_accept, + time_t timeout_sec, + time_t timeout_usec) { + auto res = 0; + while ((res = ssl_connect_or_accept(ssl)) != 1) { + auto err = SSL_get_error(ssl, res); + switch (err) { + case SSL_ERROR_WANT_READ: + if (select_read(sock, timeout_sec, timeout_usec) > 0) { continue; } + break; + case SSL_ERROR_WANT_WRITE: + if (select_write(sock, timeout_sec, timeout_usec) > 0) { continue; } + break; + default: break; + } + return false; + } + return true; +} + template -inline bool -process_server_socket_ssl(SSL *ssl, socket_t sock, size_t keep_alive_max_count, - time_t keep_alive_timeout_sec, - time_t read_timeout_sec, time_t read_timeout_usec, - time_t write_timeout_sec, time_t write_timeout_usec, - T callback) { +inline bool process_server_socket_ssl( + const std::atomic &svr_sock, SSL *ssl, socket_t sock, + size_t keep_alive_max_count, time_t keep_alive_timeout_sec, + time_t read_timeout_sec, time_t read_timeout_usec, time_t write_timeout_sec, + time_t write_timeout_usec, T callback) { return process_server_socket_core( - sock, keep_alive_max_count, keep_alive_timeout_sec, + svr_sock, sock, keep_alive_max_count, keep_alive_timeout_sec, [&](bool close_connection, bool &connection_closed) { SSLSocketStream strm(sock, ssl, read_timeout_sec, read_timeout_usec, write_timeout_sec, write_timeout_usec); @@ -5579,55 +8306,12 @@ process_client_socket_ssl(SSL *ssl, socket_t sock, time_t read_timeout_sec, return callback(strm); } -#if OPENSSL_VERSION_NUMBER < 0x10100000L -static std::shared_ptr> openSSL_locks_; - -class SSLThreadLocks { -public: - SSLThreadLocks() { - openSSL_locks_ = - std::make_shared>(CRYPTO_num_locks()); - CRYPTO_set_locking_callback(locking_callback); - } - - ~SSLThreadLocks() { CRYPTO_set_locking_callback(nullptr); } - -private: - static void locking_callback(int mode, int type, const char * /*file*/, - int /*line*/) { - auto &lk = (*openSSL_locks_)[static_cast(type)]; - if (mode & CRYPTO_LOCK) { - lk.lock(); - } else { - lk.unlock(); - } - } -}; - -#endif - class SSLInit { public: SSLInit() { -#if OPENSSL_VERSION_NUMBER < 0x1010001fL - SSL_load_error_strings(); - SSL_library_init(); -#else OPENSSL_init_ssl( OPENSSL_INIT_LOAD_SSL_STRINGS | OPENSSL_INIT_LOAD_CRYPTO_STRINGS, NULL); -#endif } - - ~SSLInit() { -#if OPENSSL_VERSION_NUMBER < 0x1010001fL - ERR_free_strings(); -#endif - } - -private: -#if OPENSSL_VERSION_NUMBER < 0x10100000L - SSLThreadLocks thread_init_; -#endif }; // SSL socket stream implementation @@ -5640,44 +8324,80 @@ inline SSLSocketStream::SSLSocketStream(socket_t sock, SSL *ssl, read_timeout_usec_(read_timeout_usec), write_timeout_sec_(write_timeout_sec), write_timeout_usec_(write_timeout_usec) { - { - timeval tv; - tv.tv_sec = static_cast(read_timeout_sec); - tv.tv_usec = static_cast(read_timeout_usec); - - setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, reinterpret_cast(&tv), - sizeof(tv)); - } - { - timeval tv; - tv.tv_sec = static_cast(write_timeout_sec); - tv.tv_usec = static_cast(write_timeout_usec); - - setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, reinterpret_cast(&tv), - sizeof(tv)); - } + SSL_clear_mode(ssl, SSL_MODE_AUTO_RETRY); } -inline SSLSocketStream::~SSLSocketStream() {} +inline SSLSocketStream::~SSLSocketStream() = default; inline bool SSLSocketStream::is_readable() const { return detail::select_read(sock_, read_timeout_sec_, read_timeout_usec_) > 0; } inline bool SSLSocketStream::is_writable() const { - return detail::select_write(sock_, write_timeout_sec_, write_timeout_usec_) > - 0; + return select_write(sock_, write_timeout_sec_, write_timeout_usec_) > 0 && + is_socket_alive(sock_); } inline ssize_t SSLSocketStream::read(char *ptr, size_t size) { - if (SSL_pending(ssl_) > 0 || is_readable()) { + if (SSL_pending(ssl_) > 0) { return SSL_read(ssl_, ptr, static_cast(size)); + } else if (is_readable()) { + auto ret = SSL_read(ssl_, ptr, static_cast(size)); + if (ret < 0) { + auto err = SSL_get_error(ssl_, ret); + auto n = 1000; +#ifdef _WIN32 + while (--n >= 0 && (err == SSL_ERROR_WANT_READ || + (err == SSL_ERROR_SYSCALL && + WSAGetLastError() == WSAETIMEDOUT))) { +#else + while (--n >= 0 && err == SSL_ERROR_WANT_READ) { +#endif + if (SSL_pending(ssl_) > 0) { + return SSL_read(ssl_, ptr, static_cast(size)); + } else if (is_readable()) { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + ret = SSL_read(ssl_, ptr, static_cast(size)); + if (ret >= 0) { return ret; } + err = SSL_get_error(ssl_, ret); + } else { + return -1; + } + } + } + return ret; } return -1; } inline ssize_t SSLSocketStream::write(const char *ptr, size_t size) { - if (is_writable()) { return SSL_write(ssl_, ptr, static_cast(size)); } + if (is_writable()) { + auto handle_size = static_cast( + std::min(size, (std::numeric_limits::max)())); + + auto ret = SSL_write(ssl_, ptr, static_cast(handle_size)); + if (ret < 0) { + auto err = SSL_get_error(ssl_, ret); + auto n = 1000; +#ifdef _WIN32 + while (--n >= 0 && (err == SSL_ERROR_WANT_WRITE || + (err == SSL_ERROR_SYSCALL && + WSAGetLastError() == WSAETIMEDOUT))) { +#else + while (--n >= 0 && err == SSL_ERROR_WANT_WRITE) { +#endif + if (is_writable()) { + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + ret = SSL_write(ssl_, ptr, static_cast(handle_size)); + if (ret >= 0) { return ret; } + err = SSL_get_error(ssl_, ret); + } else { + return -1; + } + } + } + return ret; + } return -1; } @@ -5686,6 +8406,13 @@ inline void SSLSocketStream::get_remote_ip_and_port(std::string &ip, detail::get_remote_ip_and_port(sock_, ip, port); } +inline void SSLSocketStream::get_local_ip_and_port(std::string &ip, + int &port) const { + detail::get_local_ip_and_port(sock_, ip, port); +} + +inline socket_t SSLSocketStream::socket() const { return sock_; } + static SSLInit sslinit_; } // namespace detail @@ -5693,18 +8420,23 @@ static SSLInit sslinit_; // SSL HTTP server implementation inline SSLServer::SSLServer(const char *cert_path, const char *private_key_path, const char *client_ca_cert_file_path, - const char *client_ca_cert_dir_path) { - ctx_ = SSL_CTX_new(SSLv23_server_method()); + const char *client_ca_cert_dir_path, + const char *private_key_password) { + ctx_ = SSL_CTX_new(TLS_server_method()); if (ctx_) { SSL_CTX_set_options(ctx_, - SSL_OP_ALL | SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | - SSL_OP_NO_COMPRESSION | + SSL_OP_NO_COMPRESSION | SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION); - // auto ecdh = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); - // SSL_CTX_set_tmp_ecdh(ctx_, ecdh); - // EC_KEY_free(ecdh); + SSL_CTX_set_min_proto_version(ctx_, TLS1_1_VERSION); + + // add default password callback before opening encrypted private key + if (private_key_password != nullptr && (private_key_password[0] != '\0')) { + SSL_CTX_set_default_passwd_cb_userdata( + ctx_, + reinterpret_cast(const_cast(private_key_password))); + } if (SSL_CTX_use_certificate_chain_file(ctx_, cert_path) != 1 || SSL_CTX_use_PrivateKey_file(ctx_, private_key_path, SSL_FILETYPE_PEM) != @@ -5712,46 +8444,46 @@ inline SSLServer::SSLServer(const char *cert_path, const char *private_key_path, SSL_CTX_free(ctx_); ctx_ = nullptr; } else if (client_ca_cert_file_path || client_ca_cert_dir_path) { - // if (client_ca_cert_file_path) { - // auto list = SSL_load_client_CA_file(client_ca_cert_file_path); - // SSL_CTX_set_client_CA_list(ctx_, list); - // } - SSL_CTX_load_verify_locations(ctx_, client_ca_cert_file_path, client_ca_cert_dir_path); SSL_CTX_set_verify( - ctx_, - SSL_VERIFY_PEER | - SSL_VERIFY_FAIL_IF_NO_PEER_CERT, // SSL_VERIFY_CLIENT_ONCE, - nullptr); + ctx_, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, nullptr); } } } inline SSLServer::SSLServer(X509 *cert, EVP_PKEY *private_key, X509_STORE *client_ca_cert_store) { - ctx_ = SSL_CTX_new(SSLv23_server_method()); + ctx_ = SSL_CTX_new(TLS_server_method()); if (ctx_) { SSL_CTX_set_options(ctx_, - SSL_OP_ALL | SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3 | - SSL_OP_NO_COMPRESSION | + SSL_OP_NO_COMPRESSION | SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION); + SSL_CTX_set_min_proto_version(ctx_, TLS1_1_VERSION); + if (SSL_CTX_use_certificate(ctx_, cert) != 1 || SSL_CTX_use_PrivateKey(ctx_, private_key) != 1) { SSL_CTX_free(ctx_); ctx_ = nullptr; } else if (client_ca_cert_store) { - SSL_CTX_set_cert_store(ctx_, client_ca_cert_store); SSL_CTX_set_verify( - ctx_, - SSL_VERIFY_PEER | - SSL_VERIFY_FAIL_IF_NO_PEER_CERT, // SSL_VERIFY_CLIENT_ONCE, - nullptr); + ctx_, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, nullptr); + } + } +} + +inline SSLServer::SSLServer( + const std::function &setup_ssl_ctx_callback) { + ctx_ = SSL_CTX_new(TLS_method()); + if (ctx_) { + if (!setup_ssl_ctx_callback(*ctx_)) { + SSL_CTX_free(ctx_); + ctx_ = nullptr; } } } @@ -5762,13 +8494,21 @@ inline SSLServer::~SSLServer() { inline bool SSLServer::is_valid() const { return ctx_; } -inline bool SSLServer::process_and_close_socket(socket_t sock) { - auto ssl = detail::ssl_new(sock, ctx_, ctx_mutex_, SSL_accept, - [](SSL * /*ssl*/) { return true; }); +inline SSL_CTX *SSLServer::ssl_context() const { return ctx_; } +inline bool SSLServer::process_and_close_socket(socket_t sock) { + auto ssl = detail::ssl_new( + sock, ctx_, ctx_mutex_, + [&](SSL *ssl2) { + return detail::ssl_connect_or_accept_nonblocking( + sock, ssl2, SSL_accept, read_timeout_sec_, read_timeout_usec_); + }, + [](SSL * /*ssl2*/) { return true; }); + + auto ret = false; if (ssl) { - auto ret = detail::process_server_socket_ssl( - ssl, sock, keep_alive_max_count_, keep_alive_timeout_sec_, + ret = detail::process_server_socket_ssl( + svr_sock_, ssl, sock, keep_alive_max_count_, keep_alive_timeout_sec_, read_timeout_sec_, read_timeout_usec_, write_timeout_sec_, write_timeout_usec_, [this, ssl](Stream &strm, bool close_connection, @@ -5777,12 +8517,15 @@ inline bool SSLServer::process_and_close_socket(socket_t sock) { [&](Request &req) { req.ssl = ssl; }); }); - detail::ssl_delete(ctx_mutex_, ssl, ret); - return ret; + // Shutdown gracefully if the result seemed successful, non-gracefully if + // the connection appeared to be closed. + const bool shutdown_gracefully = ret; + detail::ssl_delete(ctx_mutex_, ssl, shutdown_gracefully); } + detail::shutdown_socket(sock); detail::close_socket(sock); - return false; + return ret; } // SSL HTTP client implementation @@ -5796,12 +8539,13 @@ inline SSLClient::SSLClient(const std::string &host, int port, const std::string &client_cert_path, const std::string &client_key_path) : ClientImpl(host, port, client_cert_path, client_key_path) { - ctx_ = SSL_CTX_new(SSLv23_client_method()); + ctx_ = SSL_CTX_new(TLS_client_method()); detail::split(&host_[0], &host_[host_.size()], '.', [&](const char *b, const char *e) { - host_components_.emplace_back(std::string(b, e)); + host_components_.emplace_back(b, e); }); + if (!client_cert_path.empty() && !client_key_path.empty()) { if (SSL_CTX_use_certificate_file(ctx_, client_cert_path.c_str(), SSL_FILETYPE_PEM) != 1 || @@ -5816,12 +8560,13 @@ inline SSLClient::SSLClient(const std::string &host, int port, inline SSLClient::SSLClient(const std::string &host, int port, X509 *client_cert, EVP_PKEY *client_key) : ClientImpl(host, port) { - ctx_ = SSL_CTX_new(SSLv23_client_method()); + ctx_ = SSL_CTX_new(TLS_client_method()); detail::split(&host_[0], &host_[host_.size()], '.', [&](const char *b, const char *e) { - host_components_.emplace_back(std::string(b, e)); + host_components_.emplace_back(b, e); }); + if (client_cert != nullptr && client_key != nullptr) { if (SSL_CTX_use_certificate(ctx_, client_cert) != 1 || SSL_CTX_use_PrivateKey(ctx_, client_key) != 1) { @@ -5833,18 +8578,30 @@ inline SSLClient::SSLClient(const std::string &host, int port, inline SSLClient::~SSLClient() { if (ctx_) { SSL_CTX_free(ctx_); } + // Make sure to shut down SSL since shutdown_ssl will resolve to the + // base function rather than the derived function once we get to the + // base class destructor, and won't free the SSL (causing a leak). + shutdown_ssl_impl(socket_, true); } inline bool SSLClient::is_valid() const { return ctx_; } -inline void SSLClient::set_ca_cert_path(const char *ca_cert_file_path, - const char *ca_cert_dir_path) { - if (ca_cert_file_path) { ca_cert_file_path_ = ca_cert_file_path; } - if (ca_cert_dir_path) { ca_cert_dir_path_ = ca_cert_dir_path; } +inline void SSLClient::set_ca_cert_store(X509_STORE *ca_cert_store) { + if (ca_cert_store) { + if (ctx_) { + if (SSL_CTX_get_cert_store(ctx_) != ca_cert_store) { + // Free memory allocated for old cert and use new store `ca_cert_store` + SSL_CTX_set_cert_store(ctx_, ca_cert_store); + } + } else { + X509_STORE_free(ca_cert_store); + } + } } -inline void SSLClient::set_ca_cert_store(X509_STORE *ca_cert_store) { - if (ca_cert_store) { ca_cert_store_ = ca_cert_store; } +inline void SSLClient::load_ca_cert_store(const char *ca_cert, + std::size_t size) { + set_ca_cert_store(ClientImpl::create_ca_cert_store(ca_cert, size)); } inline long SSLClient::get_openssl_verify_result() const { @@ -5853,34 +8610,38 @@ inline long SSLClient::get_openssl_verify_result() const { inline SSL_CTX *SSLClient::ssl_context() const { return ctx_; } -inline bool SSLClient::create_and_connect_socket(Socket &socket) { - return is_valid() && ClientImpl::create_and_connect_socket(socket); +inline bool SSLClient::create_and_connect_socket(Socket &socket, Error &error) { + return is_valid() && ClientImpl::create_and_connect_socket(socket, error); } +// Assumes that socket_mutex_ is locked and that there are no requests in flight inline bool SSLClient::connect_with_proxy(Socket &socket, Response &res, - bool &success) { + bool &success, Error &error) { success = true; - Response res2; - + Response proxy_res; if (!detail::process_client_socket( socket.sock, read_timeout_sec_, read_timeout_usec_, write_timeout_sec_, write_timeout_usec_, [&](Stream &strm) { Request req2; req2.method = "CONNECT"; req2.path = host_and_port_; - return process_request(strm, req2, res2, false); + return process_request(strm, req2, proxy_res, false, error); })) { - close_socket(socket, true); + // Thread-safe to close everything because we are assuming there are no + // requests in flight + shutdown_ssl(socket, true); + shutdown_socket(socket); + close_socket(socket); success = false; return false; } - if (res2.status == 407) { + if (proxy_res.status == StatusCode::ProxyAuthenticationRequired_407) { if (!proxy_digest_auth_username_.empty() && !proxy_digest_auth_password_.empty()) { std::map auth; - if (detail::parse_www_authenticate(res2, auth, true)) { - Response res3; + if (detail::parse_www_authenticate(proxy_res, auth, true)) { + proxy_res = Response(); if (!detail::process_client_socket( socket.sock, read_timeout_sec_, read_timeout_usec_, write_timeout_sec_, write_timeout_usec_, [&](Stream &strm) { @@ -5891,24 +8652,39 @@ inline bool SSLClient::connect_with_proxy(Socket &socket, Response &res, req3, auth, 1, detail::random_string(10), proxy_digest_auth_username_, proxy_digest_auth_password_, true)); - return process_request(strm, req3, res3, false); + return process_request(strm, req3, proxy_res, false, error); })) { - close_socket(socket, true); + // Thread-safe to close everything because we are assuming there are + // no requests in flight + shutdown_ssl(socket, true); + shutdown_socket(socket); + close_socket(socket); success = false; return false; } } - } else { - res = res2; - return false; } } + // If status code is not 200, proxy request is failed. + // Set error to ProxyConnection and return proxy response + // as the response of the request + if (proxy_res.status != StatusCode::OK_200) { + error = Error::ProxyConnection; + res = std::move(proxy_res); + // Thread-safe to close everything because we are assuming there are + // no requests in flight + shutdown_ssl(socket, true); + shutdown_socket(socket); + close_socket(socket); + return false; + } + return true; } inline bool SSLClient::load_certs() { - bool ret = true; + auto ret = true; std::call_once(initialize_cert_, [&]() { std::lock_guard guard(ctx_mutex_); @@ -5922,57 +8698,60 @@ inline bool SSLClient::load_certs() { ca_cert_dir_path_.c_str())) { ret = false; } - } else if (ca_cert_store_ != nullptr) { - if (SSL_CTX_get_cert_store(ctx_) != ca_cert_store_) { - SSL_CTX_set_cert_store(ctx_, ca_cert_store_); - } } else { + auto loaded = false; #ifdef _WIN32 - detail::load_system_certs_on_windows(SSL_CTX_get_cert_store(ctx_)); -#else - SSL_CTX_set_default_verify_paths(ctx_); -#endif + loaded = + detail::load_system_certs_on_windows(SSL_CTX_get_cert_store(ctx_)); +#elif defined(CPPHTTPLIB_USE_CERTS_FROM_MACOSX_KEYCHAIN) && defined(__APPLE__) +#if TARGET_OS_OSX + loaded = detail::load_system_certs_on_macos(SSL_CTX_get_cert_store(ctx_)); +#endif // TARGET_OS_OSX +#endif // _WIN32 + if (!loaded) { SSL_CTX_set_default_verify_paths(ctx_); } } }); return ret; } -inline bool SSLClient::initialize_ssl(Socket &socket) { +inline bool SSLClient::initialize_ssl(Socket &socket, Error &error) { auto ssl = detail::ssl_new( socket.sock, ctx_, ctx_mutex_, - [&](SSL *ssl) { + [&](SSL *ssl2) { if (server_certificate_verification_) { if (!load_certs()) { - error_ = Error::SSLLoadingCerts; + error = Error::SSLLoadingCerts; return false; } - SSL_set_verify(ssl, SSL_VERIFY_NONE, nullptr); + SSL_set_verify(ssl2, SSL_VERIFY_NONE, nullptr); } - if (SSL_connect(ssl) != 1) { - error_ = Error::SSLConnection; + if (!detail::ssl_connect_or_accept_nonblocking( + socket.sock, ssl2, SSL_connect, connection_timeout_sec_, + connection_timeout_usec_)) { + error = Error::SSLConnection; return false; } if (server_certificate_verification_) { - verify_result_ = SSL_get_verify_result(ssl); + verify_result_ = SSL_get_verify_result(ssl2); if (verify_result_ != X509_V_OK) { - error_ = Error::SSLServerVerification; + error = Error::SSLServerVerification; return false; } - auto server_cert = SSL_get_peer_certificate(ssl); + auto server_cert = SSL_get1_peer_certificate(ssl2); if (server_cert == nullptr) { - error_ = Error::SSLServerVerification; + error = Error::SSLServerVerification; return false; } if (!verify_host(server_cert)) { X509_free(server_cert); - error_ = Error::SSLServerVerification; + error = Error::SSLServerVerification; return false; } X509_free(server_cert); @@ -5980,8 +8759,12 @@ inline bool SSLClient::initialize_ssl(Socket &socket) { return true; }, - [&](SSL *ssl) { - SSL_set_tlsext_host_name(ssl, host_.c_str()); + [&](SSL *ssl2) { + // NOTE: Direct call instead of using the OpenSSL macro to suppress + // -Wold-style-cast warning + // SSL_set_tlsext_host_name(ssl2, host_.c_str()); + SSL_ctrl(ssl2, SSL_CTRL_SET_TLSEXT_HOSTNAME, TLSEXT_NAMETYPE_host_name, + static_cast(const_cast(host_.c_str()))); return true; }); @@ -5990,26 +8773,35 @@ inline bool SSLClient::initialize_ssl(Socket &socket) { return true; } - close_socket(socket, false); + shutdown_socket(socket); + close_socket(socket); return false; } -inline void SSLClient::close_socket(Socket &socket, bool process_socket_ret) { - detail::close_socket(socket.sock); - socket_.sock = INVALID_SOCKET; - if (socket.ssl) { - detail::ssl_delete(ctx_mutex_, socket.ssl, process_socket_ret); - socket_.ssl = nullptr; +inline void SSLClient::shutdown_ssl(Socket &socket, bool shutdown_gracefully) { + shutdown_ssl_impl(socket, shutdown_gracefully); +} + +inline void SSLClient::shutdown_ssl_impl(Socket &socket, + bool shutdown_gracefully) { + if (socket.sock == INVALID_SOCKET) { + assert(socket.ssl == nullptr); + return; } + if (socket.ssl) { + detail::ssl_delete(ctx_mutex_, socket.ssl, shutdown_gracefully); + socket.ssl = nullptr; + } + assert(socket.ssl == nullptr); } inline bool -SSLClient::process_socket(Socket &socket, +SSLClient::process_socket(const Socket &socket, std::function callback) { assert(socket.ssl); return detail::process_client_socket_ssl( socket.ssl, socket.sock, read_timeout_sec_, read_timeout_usec_, - write_timeout_sec_, write_timeout_usec_, callback); + write_timeout_sec_, write_timeout_usec_, std::move(callback)); } inline bool SSLClient::is_ssl() const { return true; } @@ -6046,8 +8838,8 @@ SSLClient::verify_host_with_subject_alt_name(X509 *server_cert) const { auto type = GEN_DNS; - struct in6_addr addr6; - struct in_addr addr; + struct in6_addr addr6 {}; + struct in_addr addr {}; size_t addr_len = 0; #ifndef __MINGW32__ @@ -6065,35 +8857,35 @@ SSLClient::verify_host_with_subject_alt_name(X509 *server_cert) const { if (alt_names) { auto dsn_matched = false; - auto ip_mached = false; + auto ip_matched = false; auto count = sk_GENERAL_NAME_num(alt_names); for (decltype(count) i = 0; i < count && !dsn_matched; i++) { auto val = sk_GENERAL_NAME_value(alt_names, i); if (val->type == type) { - auto name = (const char *)ASN1_STRING_get0_data(val->d.ia5); - auto name_len = (size_t)ASN1_STRING_length(val->d.ia5); + auto name = + reinterpret_cast(ASN1_STRING_get0_data(val->d.ia5)); + auto name_len = static_cast(ASN1_STRING_length(val->d.ia5)); - if (strlen(name) == name_len) { - switch (type) { - case GEN_DNS: dsn_matched = check_host_name(name, name_len); break; + switch (type) { + case GEN_DNS: dsn_matched = check_host_name(name, name_len); break; - case GEN_IPADD: - if (!memcmp(&addr6, name, addr_len) || - !memcmp(&addr, name, addr_len)) { - ip_mached = true; - } - break; + case GEN_IPADD: + if (!memcmp(&addr6, name, addr_len) || + !memcmp(&addr, name, addr_len)) { + ip_matched = true; } + break; } } } - if (dsn_matched || ip_mached) { ret = true; } + if (dsn_matched || ip_matched) { ret = true; } } - GENERAL_NAMES_free((STACK_OF(GENERAL_NAME) *)alt_names); + GENERAL_NAMES_free(const_cast( + reinterpret_cast(alt_names))); return ret; } @@ -6122,7 +8914,7 @@ inline bool SSLClient::check_host_name(const char *pattern, std::vector pattern_components; detail::split(&pattern[0], &pattern[pattern_len], '.', [&](const char *b, const char *e) { - pattern_components.emplace_back(std::string(b, e)); + pattern_components.emplace_back(b, e); }); if (host_components_.size() != pattern_components.size()) { return false; } @@ -6143,15 +8935,16 @@ inline bool SSLClient::check_host_name(const char *pattern, #endif // Universal client implementation -inline Client::Client(const char *scheme_host_port) +inline Client::Client(const std::string &scheme_host_port) : Client(scheme_host_port, std::string(), std::string()) {} -inline Client::Client(const char *scheme_host_port, +inline Client::Client(const std::string &scheme_host_port, const std::string &client_cert_path, const std::string &client_key_path) { - const static std::regex re(R"(^(?:([a-z]+)://)?([^:/?#]+)(?::(\d+))?)"); + const static std::regex re( + R"((?:([a-z]+):\/\/)?(?:\[([\d:]+)\]|([^:/?#]+))(?::(\d+))?)"); - std::cmatch m; + std::smatch m; if (std::regex_match(scheme_host_port, m, re)) { auto scheme = m[1].str(); @@ -6159,6 +8952,10 @@ inline Client::Client(const char *scheme_host_port, if (!scheme.empty() && (scheme != "http" && scheme != "https")) { #else if (!scheme.empty() && scheme != "http") { +#endif +#ifndef CPPHTTPLIB_NO_EXCEPTIONS + std::string msg = "'" + scheme + "' scheme is not supported."; + throw std::invalid_argument(msg); #endif return; } @@ -6166,233 +8963,397 @@ inline Client::Client(const char *scheme_host_port, auto is_ssl = scheme == "https"; auto host = m[2].str(); + if (host.empty()) { host = m[3].str(); } - auto port_str = m[3].str(); + auto port_str = m[4].str(); auto port = !port_str.empty() ? std::stoi(port_str) : (is_ssl ? 443 : 80); if (is_ssl) { #ifdef CPPHTTPLIB_OPENSSL_SUPPORT - cli_ = std::make_shared(host.c_str(), port, client_cert_path, - client_key_path); + cli_ = detail::make_unique(host, port, client_cert_path, + client_key_path); is_ssl_ = is_ssl; #endif } else { - cli_ = std::make_shared(host.c_str(), port, client_cert_path, - client_key_path); + cli_ = detail::make_unique(host, port, client_cert_path, + client_key_path); } } else { - cli_ = std::make_shared(scheme_host_port, 80, client_cert_path, - client_key_path); + cli_ = detail::make_unique(scheme_host_port, 80, + client_cert_path, client_key_path); } } inline Client::Client(const std::string &host, int port) - : cli_(std::make_shared(host, port)) {} + : cli_(detail::make_unique(host, port)) {} inline Client::Client(const std::string &host, int port, const std::string &client_cert_path, const std::string &client_key_path) - : cli_(std::make_shared(host, port, client_cert_path, - client_key_path)) {} + : cli_(detail::make_unique(host, port, client_cert_path, + client_key_path)) {} -inline Client::~Client() {} +inline Client::~Client() = default; inline bool Client::is_valid() const { return cli_ != nullptr && cli_->is_valid(); } -inline Result Client::Get(const char *path) { return cli_->Get(path); } -inline Result Client::Get(const char *path, const Headers &headers) { +inline Result Client::Get(const std::string &path) { return cli_->Get(path); } +inline Result Client::Get(const std::string &path, const Headers &headers) { return cli_->Get(path, headers); } -inline Result Client::Get(const char *path, Progress progress) { - return cli_->Get(path, progress); +inline Result Client::Get(const std::string &path, Progress progress) { + return cli_->Get(path, std::move(progress)); } -inline Result Client::Get(const char *path, const Headers &headers, +inline Result Client::Get(const std::string &path, const Headers &headers, Progress progress) { - return cli_->Get(path, headers, progress); + return cli_->Get(path, headers, std::move(progress)); } -inline Result Client::Get(const char *path, ContentReceiver content_receiver) { +inline Result Client::Get(const std::string &path, + ContentReceiver content_receiver) { return cli_->Get(path, std::move(content_receiver)); } -inline Result Client::Get(const char *path, const Headers &headers, +inline Result Client::Get(const std::string &path, const Headers &headers, ContentReceiver content_receiver) { return cli_->Get(path, headers, std::move(content_receiver)); } -inline Result Client::Get(const char *path, ContentReceiver content_receiver, - Progress progress) { +inline Result Client::Get(const std::string &path, + ContentReceiver content_receiver, Progress progress) { return cli_->Get(path, std::move(content_receiver), std::move(progress)); } -inline Result Client::Get(const char *path, const Headers &headers, +inline Result Client::Get(const std::string &path, const Headers &headers, ContentReceiver content_receiver, Progress progress) { return cli_->Get(path, headers, std::move(content_receiver), std::move(progress)); } -inline Result Client::Get(const char *path, ResponseHandler response_handler, +inline Result Client::Get(const std::string &path, + ResponseHandler response_handler, ContentReceiver content_receiver) { return cli_->Get(path, std::move(response_handler), std::move(content_receiver)); } -inline Result Client::Get(const char *path, const Headers &headers, +inline Result Client::Get(const std::string &path, const Headers &headers, ResponseHandler response_handler, ContentReceiver content_receiver) { return cli_->Get(path, headers, std::move(response_handler), std::move(content_receiver)); } -inline Result Client::Get(const char *path, ResponseHandler response_handler, +inline Result Client::Get(const std::string &path, + ResponseHandler response_handler, ContentReceiver content_receiver, Progress progress) { return cli_->Get(path, std::move(response_handler), std::move(content_receiver), std::move(progress)); } -inline Result Client::Get(const char *path, const Headers &headers, +inline Result Client::Get(const std::string &path, const Headers &headers, ResponseHandler response_handler, ContentReceiver content_receiver, Progress progress) { - return cli_->Get(path, headers, response_handler, content_receiver, progress); + return cli_->Get(path, headers, std::move(response_handler), + std::move(content_receiver), std::move(progress)); +} +inline Result Client::Get(const std::string &path, const Params ¶ms, + const Headers &headers, Progress progress) { + return cli_->Get(path, params, headers, std::move(progress)); +} +inline Result Client::Get(const std::string &path, const Params ¶ms, + const Headers &headers, + ContentReceiver content_receiver, Progress progress) { + return cli_->Get(path, params, headers, std::move(content_receiver), + std::move(progress)); +} +inline Result Client::Get(const std::string &path, const Params ¶ms, + const Headers &headers, + ResponseHandler response_handler, + ContentReceiver content_receiver, Progress progress) { + return cli_->Get(path, params, headers, std::move(response_handler), + std::move(content_receiver), std::move(progress)); } -inline Result Client::Head(const char *path) { return cli_->Head(path); } -inline Result Client::Head(const char *path, const Headers &headers) { +inline Result Client::Head(const std::string &path) { return cli_->Head(path); } +inline Result Client::Head(const std::string &path, const Headers &headers) { return cli_->Head(path, headers); } -inline Result Client::Post(const char *path) { return cli_->Post(path); } -inline Result Client::Post(const char *path, const std::string &body, - const char *content_type) { +inline Result Client::Post(const std::string &path) { return cli_->Post(path); } +inline Result Client::Post(const std::string &path, const Headers &headers) { + return cli_->Post(path, headers); +} +inline Result Client::Post(const std::string &path, const char *body, + size_t content_length, + const std::string &content_type) { + return cli_->Post(path, body, content_length, content_type); +} +inline Result Client::Post(const std::string &path, const Headers &headers, + const char *body, size_t content_length, + const std::string &content_type) { + return cli_->Post(path, headers, body, content_length, content_type); +} +inline Result Client::Post(const std::string &path, const std::string &body, + const std::string &content_type) { return cli_->Post(path, body, content_type); } -inline Result Client::Post(const char *path, const Headers &headers, - const std::string &body, const char *content_type) { +inline Result Client::Post(const std::string &path, const Headers &headers, + const std::string &body, + const std::string &content_type) { return cli_->Post(path, headers, body, content_type); } -inline Result Client::Post(const char *path, size_t content_length, +inline Result Client::Post(const std::string &path, size_t content_length, ContentProvider content_provider, - const char *content_type) { - return cli_->Post(path, content_length, content_provider, content_type); -} -inline Result Client::Post(const char *path, const Headers &headers, - size_t content_length, - ContentProvider content_provider, - const char *content_type) { - return cli_->Post(path, headers, content_length, content_provider, + const std::string &content_type) { + return cli_->Post(path, content_length, std::move(content_provider), content_type); } -inline Result Client::Post(const char *path, const Params ¶ms) { +inline Result Client::Post(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return cli_->Post(path, std::move(content_provider), content_type); +} +inline Result Client::Post(const std::string &path, const Headers &headers, + size_t content_length, + ContentProvider content_provider, + const std::string &content_type) { + return cli_->Post(path, headers, content_length, std::move(content_provider), + content_type); +} +inline Result Client::Post(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return cli_->Post(path, headers, std::move(content_provider), content_type); +} +inline Result Client::Post(const std::string &path, const Params ¶ms) { return cli_->Post(path, params); } -inline Result Client::Post(const char *path, const Headers &headers, +inline Result Client::Post(const std::string &path, const Headers &headers, const Params ¶ms) { return cli_->Post(path, headers, params); } -inline Result Client::Post(const char *path, +inline Result Client::Post(const std::string &path, const MultipartFormDataItems &items) { return cli_->Post(path, items); } -inline Result Client::Post(const char *path, const Headers &headers, +inline Result Client::Post(const std::string &path, const Headers &headers, const MultipartFormDataItems &items) { return cli_->Post(path, headers, items); } -inline Result Client::Put(const char *path) { return cli_->Put(path); } -inline Result Client::Put(const char *path, const std::string &body, - const char *content_type) { +inline Result Client::Post(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const std::string &boundary) { + return cli_->Post(path, headers, items, boundary); +} +inline Result +Client::Post(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const MultipartFormDataProviderItems &provider_items) { + return cli_->Post(path, headers, items, provider_items); +} +inline Result Client::Put(const std::string &path) { return cli_->Put(path); } +inline Result Client::Put(const std::string &path, const char *body, + size_t content_length, + const std::string &content_type) { + return cli_->Put(path, body, content_length, content_type); +} +inline Result Client::Put(const std::string &path, const Headers &headers, + const char *body, size_t content_length, + const std::string &content_type) { + return cli_->Put(path, headers, body, content_length, content_type); +} +inline Result Client::Put(const std::string &path, const std::string &body, + const std::string &content_type) { return cli_->Put(path, body, content_type); } -inline Result Client::Put(const char *path, const Headers &headers, - const std::string &body, const char *content_type) { +inline Result Client::Put(const std::string &path, const Headers &headers, + const std::string &body, + const std::string &content_type) { return cli_->Put(path, headers, body, content_type); } -inline Result Client::Put(const char *path, size_t content_length, +inline Result Client::Put(const std::string &path, size_t content_length, ContentProvider content_provider, - const char *content_type) { - return cli_->Put(path, content_length, content_provider, content_type); -} -inline Result Client::Put(const char *path, const Headers &headers, - size_t content_length, - ContentProvider content_provider, - const char *content_type) { - return cli_->Put(path, headers, content_length, content_provider, + const std::string &content_type) { + return cli_->Put(path, content_length, std::move(content_provider), content_type); } -inline Result Client::Put(const char *path, const Params ¶ms) { +inline Result Client::Put(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return cli_->Put(path, std::move(content_provider), content_type); +} +inline Result Client::Put(const std::string &path, const Headers &headers, + size_t content_length, + ContentProvider content_provider, + const std::string &content_type) { + return cli_->Put(path, headers, content_length, std::move(content_provider), + content_type); +} +inline Result Client::Put(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return cli_->Put(path, headers, std::move(content_provider), content_type); +} +inline Result Client::Put(const std::string &path, const Params ¶ms) { return cli_->Put(path, params); } -inline Result Client::Put(const char *path, const Headers &headers, +inline Result Client::Put(const std::string &path, const Headers &headers, const Params ¶ms) { return cli_->Put(path, headers, params); } -inline Result Client::Patch(const char *path, const std::string &body, - const char *content_type) { +inline Result Client::Put(const std::string &path, + const MultipartFormDataItems &items) { + return cli_->Put(path, items); +} +inline Result Client::Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items) { + return cli_->Put(path, headers, items); +} +inline Result Client::Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const std::string &boundary) { + return cli_->Put(path, headers, items, boundary); +} +inline Result +Client::Put(const std::string &path, const Headers &headers, + const MultipartFormDataItems &items, + const MultipartFormDataProviderItems &provider_items) { + return cli_->Put(path, headers, items, provider_items); +} +inline Result Client::Patch(const std::string &path) { + return cli_->Patch(path); +} +inline Result Client::Patch(const std::string &path, const char *body, + size_t content_length, + const std::string &content_type) { + return cli_->Patch(path, body, content_length, content_type); +} +inline Result Client::Patch(const std::string &path, const Headers &headers, + const char *body, size_t content_length, + const std::string &content_type) { + return cli_->Patch(path, headers, body, content_length, content_type); +} +inline Result Client::Patch(const std::string &path, const std::string &body, + const std::string &content_type) { return cli_->Patch(path, body, content_type); } -inline Result Client::Patch(const char *path, const Headers &headers, - const std::string &body, const char *content_type) { +inline Result Client::Patch(const std::string &path, const Headers &headers, + const std::string &body, + const std::string &content_type) { return cli_->Patch(path, headers, body, content_type); } -inline Result Client::Patch(const char *path, size_t content_length, +inline Result Client::Patch(const std::string &path, size_t content_length, ContentProvider content_provider, - const char *content_type) { - return cli_->Patch(path, content_length, content_provider, content_type); -} -inline Result Client::Patch(const char *path, const Headers &headers, - size_t content_length, - ContentProvider content_provider, - const char *content_type) { - return cli_->Patch(path, headers, content_length, content_provider, + const std::string &content_type) { + return cli_->Patch(path, content_length, std::move(content_provider), content_type); } -inline Result Client::Delete(const char *path) { return cli_->Delete(path); } -inline Result Client::Delete(const char *path, const std::string &body, - const char *content_type) { - return cli_->Delete(path, body, content_type); +inline Result Client::Patch(const std::string &path, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return cli_->Patch(path, std::move(content_provider), content_type); } -inline Result Client::Delete(const char *path, const Headers &headers) { +inline Result Client::Patch(const std::string &path, const Headers &headers, + size_t content_length, + ContentProvider content_provider, + const std::string &content_type) { + return cli_->Patch(path, headers, content_length, std::move(content_provider), + content_type); +} +inline Result Client::Patch(const std::string &path, const Headers &headers, + ContentProviderWithoutLength content_provider, + const std::string &content_type) { + return cli_->Patch(path, headers, std::move(content_provider), content_type); +} +inline Result Client::Delete(const std::string &path) { + return cli_->Delete(path); +} +inline Result Client::Delete(const std::string &path, const Headers &headers) { return cli_->Delete(path, headers); } -inline Result Client::Delete(const char *path, const Headers &headers, +inline Result Client::Delete(const std::string &path, const char *body, + size_t content_length, + const std::string &content_type) { + return cli_->Delete(path, body, content_length, content_type); +} +inline Result Client::Delete(const std::string &path, const Headers &headers, + const char *body, size_t content_length, + const std::string &content_type) { + return cli_->Delete(path, headers, body, content_length, content_type); +} +inline Result Client::Delete(const std::string &path, const std::string &body, + const std::string &content_type) { + return cli_->Delete(path, body, content_type); +} +inline Result Client::Delete(const std::string &path, const Headers &headers, const std::string &body, - const char *content_type) { + const std::string &content_type) { return cli_->Delete(path, headers, body, content_type); } -inline Result Client::Options(const char *path) { return cli_->Options(path); } -inline Result Client::Options(const char *path, const Headers &headers) { +inline Result Client::Options(const std::string &path) { + return cli_->Options(path); +} +inline Result Client::Options(const std::string &path, const Headers &headers) { return cli_->Options(path, headers); } -inline bool Client::send(const Request &req, Response &res) { - return cli_->send(req, res); +inline bool Client::send(Request &req, Response &res, Error &error) { + return cli_->send(req, res, error); } +inline Result Client::send(const Request &req) { return cli_->send(req); } + +inline void Client::stop() { cli_->stop(); } + +inline std::string Client::host() const { return cli_->host(); } + +inline int Client::port() const { return cli_->port(); } + inline size_t Client::is_socket_open() const { return cli_->is_socket_open(); } -inline void Client::stop() { cli_->stop(); } +inline socket_t Client::socket() const { return cli_->socket(); } + +inline void +Client::set_hostname_addr_map(std::map addr_map) { + cli_->set_hostname_addr_map(std::move(addr_map)); +} inline void Client::set_default_headers(Headers headers) { cli_->set_default_headers(std::move(headers)); } +inline void Client::set_header_writer( + std::function const &writer) { + cli_->set_header_writer(writer); +} + +inline void Client::set_address_family(int family) { + cli_->set_address_family(family); +} + inline void Client::set_tcp_nodelay(bool on) { cli_->set_tcp_nodelay(on); } + inline void Client::set_socket_options(SocketOptions socket_options) { - cli_->set_socket_options(socket_options); + cli_->set_socket_options(std::move(socket_options)); } inline void Client::set_connection_timeout(time_t sec, time_t usec) { cli_->set_connection_timeout(sec, usec); } + inline void Client::set_read_timeout(time_t sec, time_t usec) { cli_->set_read_timeout(sec, usec); } + inline void Client::set_write_timeout(time_t sec, time_t usec) { cli_->set_write_timeout(sec, usec); } -inline void Client::set_basic_auth(const char *username, const char *password) { +inline void Client::set_basic_auth(const std::string &username, + const std::string &password) { cli_->set_basic_auth(username, password); } -inline void Client::set_bearer_token_auth(const char *token) { +inline void Client::set_bearer_token_auth(const std::string &token) { cli_->set_bearer_token_auth(token); } #ifdef CPPHTTPLIB_OPENSSL_SUPPORT -inline void Client::set_digest_auth(const char *username, - const char *password) { +inline void Client::set_digest_auth(const std::string &username, + const std::string &password) { cli_->set_digest_auth(username, password); } #endif @@ -6402,27 +9363,29 @@ inline void Client::set_follow_location(bool on) { cli_->set_follow_location(on); } +inline void Client::set_url_encode(bool on) { cli_->set_url_encode(on); } + inline void Client::set_compress(bool on) { cli_->set_compress(on); } inline void Client::set_decompress(bool on) { cli_->set_decompress(on); } -inline void Client::set_interface(const char *intf) { +inline void Client::set_interface(const std::string &intf) { cli_->set_interface(intf); } -inline void Client::set_proxy(const char *host, int port) { +inline void Client::set_proxy(const std::string &host, int port) { cli_->set_proxy(host, port); } -inline void Client::set_proxy_basic_auth(const char *username, - const char *password) { +inline void Client::set_proxy_basic_auth(const std::string &username, + const std::string &password) { cli_->set_proxy_basic_auth(username, password); } -inline void Client::set_proxy_bearer_token_auth(const char *token) { +inline void Client::set_proxy_bearer_token_auth(const std::string &token) { cli_->set_proxy_bearer_token_auth(token); } #ifdef CPPHTTPLIB_OPENSSL_SUPPORT -inline void Client::set_proxy_digest_auth(const char *username, - const char *password) { +inline void Client::set_proxy_digest_auth(const std::string &username, + const std::string &password) { cli_->set_proxy_digest_auth(username, password); } #endif @@ -6433,23 +9396,28 @@ inline void Client::enable_server_certificate_verification(bool enabled) { } #endif -inline void Client::set_logger(Logger logger) { cli_->set_logger(logger); } +inline void Client::set_logger(Logger logger) { + cli_->set_logger(std::move(logger)); +} #ifdef CPPHTTPLIB_OPENSSL_SUPPORT -inline void Client::set_ca_cert_path(const char *ca_cert_file_path, - const char *ca_cert_dir_path) { - if (is_ssl_) { - static_cast(*cli_).set_ca_cert_path(ca_cert_file_path, - ca_cert_dir_path); - } +inline void Client::set_ca_cert_path(const std::string &ca_cert_file_path, + const std::string &ca_cert_dir_path) { + cli_->set_ca_cert_path(ca_cert_file_path, ca_cert_dir_path); } inline void Client::set_ca_cert_store(X509_STORE *ca_cert_store) { if (is_ssl_) { static_cast(*cli_).set_ca_cert_store(ca_cert_store); + } else { + cli_->set_ca_cert_store(ca_cert_store); } } +inline void Client::load_ca_cert_store(const char *ca_cert, std::size_t size) { + set_ca_cert_store(cli_->create_ca_cert_store(ca_cert, size)); +} + inline long Client::get_openssl_verify_result() const { if (is_ssl_) { return static_cast(*cli_).get_openssl_verify_result(); @@ -6467,4 +9435,8 @@ inline SSL_CTX *Client::ssl_context() const { } // namespace httplib +#if defined(_WIN32) && defined(CPPHTTPLIB_USE_POLL) +#undef poll +#endif + #endif // CPPHTTPLIB_HTTPLIB_H diff --git a/ext/ed25519-amd64-asm/sign.c b/ext/ed25519-amd64-asm/sign.c index 882ae76f3..715feb5b7 100644 --- a/ext/ed25519-amd64-asm/sign.c +++ b/ext/ed25519-amd64-asm/sign.c @@ -61,7 +61,7 @@ int crypto_sign( #endif #if 0 -void C25519::sign(const C25519::Private &myPrivate,const C25519::Public &myPublic,const void *msg,unsigned int len,void *signature) +void ECC::sign(const ECC::Private &myPrivate,const ECC::Public &myPublic,const void *msg,unsigned int len,void *signature) { sc25519 sck, scs, scsk; ge25519 ger; diff --git a/ext/hiredis-1.0.2/.gitignore b/ext/hiredis-1.0.2/.gitignore new file mode 100644 index 000000000..056959ffe --- /dev/null +++ b/ext/hiredis-1.0.2/.gitignore @@ -0,0 +1,9 @@ +/hiredis-test +/examples/hiredis-example* +/*.o +/*.so +/*.dylib +/*.a +/*.pc +*.dSYM +tags diff --git a/ext/hiredis-1.0.2/.travis.yml b/ext/hiredis-1.0.2/.travis.yml new file mode 100644 index 000000000..f9a9460ff --- /dev/null +++ b/ext/hiredis-1.0.2/.travis.yml @@ -0,0 +1,131 @@ +language: c +compiler: + - gcc + - clang + +os: + - linux + - osx + +dist: bionic + +branches: + only: + - staging + - trying + - master + - /^release\/.*$/ + +install: + - if [ "$BITS" == "64" ]; then + wget https://github.com/redis/redis/archive/6.0.6.tar.gz; + tar -xzvf 6.0.6.tar.gz; + pushd redis-6.0.6 && BUILD_TLS=yes make && export PATH=$PWD/src:$PATH && popd; + fi + +before_script: + - if [ "$TRAVIS_OS_NAME" == "osx" ]; then + curl -O https://distfiles.macports.org/MacPorts/MacPorts-2.6.2-10.13-HighSierra.pkg; + sudo installer -pkg MacPorts-2.6.2-10.13-HighSierra.pkg -target /; + export PATH=$PATH:/opt/local/bin && sudo port -v selfupdate; + sudo port -N install openssl redis; + fi; + +addons: + apt: + sources: + - sourceline: 'ppa:chris-lea/redis-server' + packages: + - libc6-dbg + - libc6-dev + - libc6:i386 + - libc6-dev-i386 + - libc6-dbg:i386 + - gcc-multilib + - g++-multilib + - libssl-dev + - libssl-dev:i386 + - valgrind + - redis + +env: + - BITS="32" + - BITS="64" + +script: + - EXTRA_CMAKE_OPTS="-DENABLE_EXAMPLES:BOOL=ON -DENABLE_SSL:BOOL=ON"; + if [ "$BITS" == "64" ]; then + EXTRA_CMAKE_OPTS="$EXTRA_CMAKE_OPTS -DENABLE_SSL_TESTS:BOOL=ON"; + fi; + if [ "$TRAVIS_OS_NAME" == "osx" ]; then + if [ "$BITS" == "32" ]; then + CFLAGS="-m32 -Werror"; + CXXFLAGS="-m32 -Werror"; + LDFLAGS="-m32"; + EXTRA_CMAKE_OPTS=; + else + CFLAGS="-Werror"; + CXXFLAGS="-Werror"; + fi; + else + TEST_PREFIX="valgrind --track-origins=yes --leak-check=full"; + if [ "$BITS" == "32" ]; then + CFLAGS="-m32 -Werror"; + CXXFLAGS="-m32 -Werror"; + LDFLAGS="-m32"; + EXTRA_CMAKE_OPTS=; + else + CFLAGS="-Werror"; + CXXFLAGS="-Werror"; + fi; + fi; + export CFLAGS CXXFLAGS LDFLAGS TEST_PREFIX EXTRA_CMAKE_OPTS + - make && make clean; + if [ "$TRAVIS_OS_NAME" == "osx" ]; then + if [ "$BITS" == "64" ]; then + OPENSSL_PREFIX="$(ls -d /usr/local/Cellar/openssl@1.1/*)" USE_SSL=1 make; + fi; + else + USE_SSL=1 make; + fi; + - mkdir build/ && cd build/ + - cmake .. ${EXTRA_CMAKE_OPTS} + - make VERBOSE=1 + - if [ "$BITS" == "64" ]; then + TEST_SSL=1 SKIPS_AS_FAILS=1 ctest -V; + else + SKIPS_AS_FAILS=1 ctest -V; + fi; + +jobs: + include: + # Windows MinGW cross compile on Linux + - os: linux + dist: xenial + compiler: mingw + addons: + apt: + packages: + - ninja-build + - gcc-mingw-w64-x86-64 + - g++-mingw-w64-x86-64 + script: + - mkdir build && cd build + - CC=x86_64-w64-mingw32-gcc CXX=x86_64-w64-mingw32-g++ cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_BUILD_WITH_INSTALL_RPATH=on + - ninja -v + + # Windows MSVC 2017 + - os: windows + compiler: msvc + env: + - MATRIX_EVAL="CC=cl.exe && CXX=cl.exe" + before_install: + - eval "${MATRIX_EVAL}" + install: + - choco install ninja + - choco install -y memurai-developer + script: + - mkdir build && cd build + - cmd.exe //C 'C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Auxiliary\Build\vcvarsall.bat' amd64 '&&' + cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DENABLE_EXAMPLES=ON '&&' ninja -v + - ./hiredis-test.exe diff --git a/ext/hiredis-1.0.2/CHANGELOG.md b/ext/hiredis-1.0.2/CHANGELOG.md new file mode 100644 index 000000000..2a2bc314a --- /dev/null +++ b/ext/hiredis-1.0.2/CHANGELOG.md @@ -0,0 +1,364 @@ +## [1.0.2](https://github.com/redis/hiredis/tree/v1.0.2) - (2021-10-07) + +Announcing Hiredis v1.0.2, which fixes CVE-2021-32765 but returns the SONAME to the correct value of `1.0.0`. + +- [Revert SONAME bump](https://github.com/redis/hiredis/commit/d4e6f109a064690cde64765c654e679fea1d3548) + ([Michael Grunder](https://github.com/michael-grunder)) + +## [1.0.1](https://github.com/redis/hiredis/tree/v1.0.1) - (2021-10-04) + +This release erroneously bumped the SONAME, please use [1.0.2](https://github.com/redis/hiredis/tree/v1.0.2) + +Announcing Hiredis v1.0.1, a security release fixing CVE-2021-32765 + +- Fix for [CVE-2021-32765](https://github.com/redis/hiredis/security/advisories/GHSA-hfm9-39pp-55p2) + [commit](https://github.com/redis/hiredis/commit/76a7b10005c70babee357a7d0f2becf28ec7ed1e) + ([Yossi Gottlieb](https://github.com/yossigo)) + +_Thanks to [Yossi Gottlieb](https://github.com/yossigo) for the security fix and to [Microsoft Security Vulnerability Research](https://www.microsoft.com/en-us/msrc/msvr) for finding the bug._ :sparkling_heart: + +## [1.0.0](https://github.com/redis/hiredis/tree/v1.0.0) - (2020-08-03) + +Announcing Hiredis v1.0.0, which adds support for RESP3, SSL connections, allocator injection, and better Windows support! :tada: + +_A big thanks to everyone who helped with this release. The following list includes everyone who contributed at least five lines, sorted by lines contributed._ :sparkling_heart: + +[Michael Grunder](https://github.com/michael-grunder), [Yossi Gottlieb](https://github.com/yossigo), +[Mark Nunberg](https://github.com/mnunberg), [Marcus Geelnard](https://github.com/mbitsnbites), +[Justin Brewer](https://github.com/justinbrewer), [Valentino Geron](https://github.com/valentinogeron), +[Minun Dragonation](https://github.com/dragonation), [Omri Steiner](https://github.com/OmriSteiner), +[Sangmoon Yi](https://github.com/jman-krafton), [Jinjiazh](https://github.com/jinjiazhang), +[Odin Hultgren Van Der Horst](https://github.com/Miniwoffer), [Muhammad Zahalqa](https://github.com/tryfinally), +[Nick Rivera](https://github.com/heronr), [Qi Yang](https://github.com/movebean), +[kevin1018](https://github.com/kevin1018) + +[Full Changelog](https://github.com/redis/hiredis/compare/v0.14.1...v1.0.0) + +**BREAKING CHANGES**: + +* `redisOptions` now has two timeout fields. One for connecting, and one for commands. If you're presently using `options->timeout` you will need to change it to use `options->connect_timeout`. (See [example](https://github.com/redis/hiredis/commit/38b5ae543f5c99eb4ccabbe277770fc6bc81226f#diff-86ba39d37aa829c8c82624cce4f049fbL36)) + +* Bulk and multi-bulk lengths less than -1 or greater than `LLONG_MAX` are now protocol errors. This is consistent + with the RESP specification. On 32-bit platforms, the upper bound is lowered to `SIZE_MAX`. + +* `redisReplyObjectFunctions.createArray` now takes `size_t` for its length parameter. + +**New features:** +- Support for RESP3 + [\#697](https://github.com/redis/hiredis/pull/697), + [\#805](https://github.com/redis/hiredis/pull/805), + [\#819](https://github.com/redis/hiredis/pull/819), + [\#841](https://github.com/redis/hiredis/pull/841) + ([Yossi Gottlieb](https://github.com/yossigo), [Michael Grunder](https://github.com/michael-grunder)) +- Support for SSL connections + [\#645](https://github.com/redis/hiredis/pull/645), + [\#699](https://github.com/redis/hiredis/pull/699), + [\#702](https://github.com/redis/hiredis/pull/702), + [\#708](https://github.com/redis/hiredis/pull/708), + [\#711](https://github.com/redis/hiredis/pull/711), + [\#821](https://github.com/redis/hiredis/pull/821), + [more](https://github.com/redis/hiredis/pulls?q=is%3Apr+is%3Amerged+SSL) + ([Mark Nunberg](https://github.com/mnunberg), [Yossi Gottlieb](https://github.com/yossigo)) +- Run-time allocator injection + [\#800](https://github.com/redis/hiredis/pull/800) + ([Michael Grunder](https://github.com/michael-grunder)) +- Improved Windows support (including MinGW and Windows CI) + [\#652](https://github.com/redis/hiredis/pull/652), + [\#663](https://github.com/redis/hiredis/pull/663) + ([Marcus Geelnard](https://www.bitsnbites.eu/author/m/)) +- Adds support for distinct connect and command timeouts + [\#839](https://github.com/redis/hiredis/pull/839), + [\#829](https://github.com/redis/hiredis/pull/829) + ([Valentino Geron](https://github.com/valentinogeron)) +- Add generic pointer and destructor to `redisContext` that users can use for context. + [\#855](https://github.com/redis/hiredis/pull/855) + ([Michael Grunder](https://github.com/michael-grunder)) + +**Closed issues (that involved code changes):** + +- Makefile does not install TLS libraries [\#809](https://github.com/redis/hiredis/issues/809) +- redisConnectWithOptions should not set command timeout [\#722](https://github.com/redis/hiredis/issues/722), [\#829](https://github.com/redis/hiredis/pull/829) ([valentinogeron](https://github.com/valentinogeron)) +- Fix integer overflow in `sdsrange` [\#827](https://github.com/redis/hiredis/issues/827) +- INFO & CLUSTER commands failed when using RESP3 [\#802](https://github.com/redis/hiredis/issues/802) +- Windows compatibility patches [\#687](https://github.com/redis/hiredis/issues/687), [\#838](https://github.com/redis/hiredis/issues/838), [\#842](https://github.com/redis/hiredis/issues/842) +- RESP3 PUSH messages incorrectly use pending callback [\#825](https://github.com/redis/hiredis/issues/825) +- Asynchronous PSUBSCRIBE command fails when using RESP3 [\#815](https://github.com/redis/hiredis/issues/815) +- New SSL API [\#804](https://github.com/redis/hiredis/issues/804), [\#813](https://github.com/redis/hiredis/issues/813) +- Hard-coded limit of nested reply depth [\#794](https://github.com/redis/hiredis/issues/794) +- Fix TCP_NODELAY in Windows/OSX [\#679](https://github.com/redis/hiredis/issues/679), [\#690](https://github.com/redis/hiredis/issues/690), [\#779](https://github.com/redis/hiredis/issues/779), [\#785](https://github.com/redis/hiredis/issues/785), +- Added timers to libev adapter. [\#778](https://github.com/redis/hiredis/issues/778), [\#795](https://github.com/redis/hiredis/pull/795) +- Initialization discards const qualifier [\#777](https://github.com/redis/hiredis/issues/777) +- \[BUG\]\[MinGW64\] Error setting socket timeout [\#775](https://github.com/redis/hiredis/issues/775) +- undefined reference to hi_malloc [\#769](https://github.com/redis/hiredis/issues/769) +- hiredis pkg-config file incorrectly ignores multiarch libdir spec'n [\#767](https://github.com/redis/hiredis/issues/767) +- Don't use -G to build shared object on Solaris [\#757](https://github.com/redis/hiredis/issues/757) +- error when make USE\_SSL=1 [\#748](https://github.com/redis/hiredis/issues/748) +- Allow to change SSL Mode [\#646](https://github.com/redis/hiredis/issues/646) +- hiredis/adapters/libevent.h memleak [\#618](https://github.com/redis/hiredis/issues/618) +- redisLibuvPoll crash when server closes the connetion [\#545](https://github.com/redis/hiredis/issues/545) +- about redisAsyncDisconnect question [\#518](https://github.com/redis/hiredis/issues/518) +- hiredis adapters libuv error for help [\#508](https://github.com/redis/hiredis/issues/508) +- API/ABI changes analysis [\#506](https://github.com/redis/hiredis/issues/506) +- Memory leak patch in Redis [\#502](https://github.com/redis/hiredis/issues/502) +- Remove the depth limitation [\#421](https://github.com/redis/hiredis/issues/421) + +**Merged pull requests:** + +- Move SSL management to a distinct private pointer [\#855](https://github.com/redis/hiredis/pull/855) ([michael-grunder](https://github.com/michael-grunder)) +- Move include to sockcompat.h to maintain style [\#850](https://github.com/redis/hiredis/pull/850) ([michael-grunder](https://github.com/michael-grunder)) +- Remove erroneous tag and add license to push example [\#849](https://github.com/redis/hiredis/pull/849) ([michael-grunder](https://github.com/michael-grunder)) +- fix windows compiling with mingw [\#848](https://github.com/redis/hiredis/pull/848) ([rmalizia44](https://github.com/rmalizia44)) +- Some Windows quality of life improvements. [\#846](https://github.com/redis/hiredis/pull/846) ([michael-grunder](https://github.com/michael-grunder)) +- Use \_WIN32 define instead of WIN32 [\#845](https://github.com/redis/hiredis/pull/845) ([michael-grunder](https://github.com/michael-grunder)) +- Non Linux CI fixes [\#844](https://github.com/redis/hiredis/pull/844) ([michael-grunder](https://github.com/michael-grunder)) +- Resp3 oob push support [\#841](https://github.com/redis/hiredis/pull/841) ([michael-grunder](https://github.com/michael-grunder)) +- fix \#785: defer TCP\_NODELAY in async tcp connections [\#836](https://github.com/redis/hiredis/pull/836) ([OmriSteiner](https://github.com/OmriSteiner)) +- sdsrange overflow fix [\#830](https://github.com/redis/hiredis/pull/830) ([michael-grunder](https://github.com/michael-grunder)) +- Use explicit pointer casting for c++ compatibility [\#826](https://github.com/redis/hiredis/pull/826) ([aureus1](https://github.com/aureus1)) +- Document allocator injection and completeness fix in test.c [\#824](https://github.com/redis/hiredis/pull/824) ([michael-grunder](https://github.com/michael-grunder)) +- Use unique names for allocator struct members [\#823](https://github.com/redis/hiredis/pull/823) ([michael-grunder](https://github.com/michael-grunder)) +- New SSL API to replace redisSecureConnection\(\). [\#821](https://github.com/redis/hiredis/pull/821) ([yossigo](https://github.com/yossigo)) +- Add logic to handle RESP3 push messages [\#819](https://github.com/redis/hiredis/pull/819) ([michael-grunder](https://github.com/michael-grunder)) +- Use standrad isxdigit instead of custom helper function. [\#814](https://github.com/redis/hiredis/pull/814) ([tryfinally](https://github.com/tryfinally)) +- Fix missing SSL build/install options. [\#812](https://github.com/redis/hiredis/pull/812) ([yossigo](https://github.com/yossigo)) +- Add link to ABI tracker [\#808](https://github.com/redis/hiredis/pull/808) ([michael-grunder](https://github.com/michael-grunder)) +- Resp3 verbatim string support [\#805](https://github.com/redis/hiredis/pull/805) ([michael-grunder](https://github.com/michael-grunder)) +- Allow users to replace allocator and handle OOM everywhere. [\#800](https://github.com/redis/hiredis/pull/800) ([michael-grunder](https://github.com/michael-grunder)) +- Remove nested depth limitation. [\#797](https://github.com/redis/hiredis/pull/797) ([michael-grunder](https://github.com/michael-grunder)) +- Attempt to fix compilation on Solaris [\#796](https://github.com/redis/hiredis/pull/796) ([michael-grunder](https://github.com/michael-grunder)) +- Support timeouts in libev adapater [\#795](https://github.com/redis/hiredis/pull/795) ([michael-grunder](https://github.com/michael-grunder)) +- Fix pkgconfig when installing to a custom lib dir [\#793](https://github.com/redis/hiredis/pull/793) ([michael-grunder](https://github.com/michael-grunder)) +- Fix USE\_SSL=1 make/cmake on OSX and CMake tests [\#789](https://github.com/redis/hiredis/pull/789) ([michael-grunder](https://github.com/michael-grunder)) +- Use correct libuv call on Windows [\#784](https://github.com/redis/hiredis/pull/784) ([michael-grunder](https://github.com/michael-grunder)) +- Added CMake package config and fixed hiredis\_ssl on Windows [\#783](https://github.com/redis/hiredis/pull/783) ([michael-grunder](https://github.com/michael-grunder)) +- CMake: Set hiredis\_ssl shared object version. [\#780](https://github.com/redis/hiredis/pull/780) ([yossigo](https://github.com/yossigo)) +- Win32 tests and timeout fix [\#776](https://github.com/redis/hiredis/pull/776) ([michael-grunder](https://github.com/michael-grunder)) +- Provides an optional cleanup callback for async data. [\#768](https://github.com/redis/hiredis/pull/768) ([heronr](https://github.com/heronr)) +- Housekeeping fixes [\#764](https://github.com/redis/hiredis/pull/764) ([michael-grunder](https://github.com/michael-grunder)) +- install alloc.h [\#756](https://github.com/redis/hiredis/pull/756) ([ch1aki](https://github.com/ch1aki)) +- fix spelling mistakes [\#746](https://github.com/redis/hiredis/pull/746) ([ShooterIT](https://github.com/ShooterIT)) +- Free the reply in redisGetReply when passed NULL [\#741](https://github.com/redis/hiredis/pull/741) ([michael-grunder](https://github.com/michael-grunder)) +- Fix dead code in sslLogCallback relating to should\_log variable. [\#737](https://github.com/redis/hiredis/pull/737) ([natoscott](https://github.com/natoscott)) +- Fix typo in dict.c. [\#731](https://github.com/redis/hiredis/pull/731) ([Kevin-Xi](https://github.com/Kevin-Xi)) +- Adding an option to DISABLE\_TESTS [\#727](https://github.com/redis/hiredis/pull/727) ([pbotros](https://github.com/pbotros)) +- Update README with SSL support. [\#720](https://github.com/redis/hiredis/pull/720) ([yossigo](https://github.com/yossigo)) +- Fixes leaks in unit tests [\#715](https://github.com/redis/hiredis/pull/715) ([michael-grunder](https://github.com/michael-grunder)) +- SSL Tests [\#711](https://github.com/redis/hiredis/pull/711) ([yossigo](https://github.com/yossigo)) +- SSL Reorganization [\#708](https://github.com/redis/hiredis/pull/708) ([yossigo](https://github.com/yossigo)) +- Fix MSVC build. [\#706](https://github.com/redis/hiredis/pull/706) ([yossigo](https://github.com/yossigo)) +- SSL: Properly report SSL\_connect\(\) errors. [\#702](https://github.com/redis/hiredis/pull/702) ([yossigo](https://github.com/yossigo)) +- Silent SSL trace to stdout by default. [\#699](https://github.com/redis/hiredis/pull/699) ([yossigo](https://github.com/yossigo)) +- Port RESP3 support from Redis. [\#697](https://github.com/redis/hiredis/pull/697) ([yossigo](https://github.com/yossigo)) +- Removed whitespace before newline [\#691](https://github.com/redis/hiredis/pull/691) ([Miniwoffer](https://github.com/Miniwoffer)) +- Add install adapters header files [\#688](https://github.com/redis/hiredis/pull/688) ([kevin1018](https://github.com/kevin1018)) +- Remove unnecessary null check before free [\#684](https://github.com/redis/hiredis/pull/684) ([qlyoung](https://github.com/qlyoung)) +- redisReaderGetReply leak memory [\#671](https://github.com/redis/hiredis/pull/671) ([movebean](https://github.com/movebean)) +- fix timeout code in windows [\#670](https://github.com/redis/hiredis/pull/670) ([jman-krafton](https://github.com/jman-krafton)) +- test: fix errstr matching for musl libc [\#665](https://github.com/redis/hiredis/pull/665) ([ghost](https://github.com/ghost)) +- Windows: MinGW fixes and Windows Travis builders [\#663](https://github.com/redis/hiredis/pull/663) ([mbitsnbites](https://github.com/mbitsnbites)) +- The setsockopt and getsockopt API diffs from BSD socket and WSA one [\#662](https://github.com/redis/hiredis/pull/662) ([dragonation](https://github.com/dragonation)) +- Fix Compile Error On Windows \(Visual Studio\) [\#658](https://github.com/redis/hiredis/pull/658) ([jinjiazhang](https://github.com/jinjiazhang)) +- Fix NXDOMAIN test case [\#653](https://github.com/redis/hiredis/pull/653) ([michael-grunder](https://github.com/michael-grunder)) +- Add MinGW support [\#652](https://github.com/redis/hiredis/pull/652) ([mbitsnbites](https://github.com/mbitsnbites)) +- SSL Support [\#645](https://github.com/redis/hiredis/pull/645) ([mnunberg](https://github.com/mnunberg)) +- Fix Invalid argument after redisAsyncConnectUnix [\#644](https://github.com/redis/hiredis/pull/644) ([codehz](https://github.com/codehz)) +- Makefile: use predefined AR [\#632](https://github.com/redis/hiredis/pull/632) ([Mic92](https://github.com/Mic92)) +- FreeBSD build fix [\#628](https://github.com/redis/hiredis/pull/628) ([devnexen](https://github.com/devnexen)) +- Fix errors not propagating properly with libuv.h. [\#624](https://github.com/redis/hiredis/pull/624) ([yossigo](https://github.com/yossigo)) +- Update README.md [\#621](https://github.com/redis/hiredis/pull/621) ([Crunsher](https://github.com/Crunsher)) +- Fix redisBufferRead documentation [\#620](https://github.com/redis/hiredis/pull/620) ([hacst](https://github.com/hacst)) +- Add CPPFLAGS to REAL\_CFLAGS [\#614](https://github.com/redis/hiredis/pull/614) ([thomaslee](https://github.com/thomaslee)) +- Update createArray to take size\_t [\#597](https://github.com/redis/hiredis/pull/597) ([justinbrewer](https://github.com/justinbrewer)) +- fix common realloc mistake and add null check more [\#580](https://github.com/redis/hiredis/pull/580) ([charsyam](https://github.com/charsyam)) +- Proper error reporting for connect failures [\#578](https://github.com/redis/hiredis/pull/578) ([mnunberg](https://github.com/mnunberg)) + +\* *This Changelog was automatically generated by [github_changelog_generator](https://github.com/github-changelog-generator/github-changelog-generator)* + +## [1.0.0-rc1](https://github.com/redis/hiredis/tree/v1.0.0-rc1) - (2020-07-29) + +_Note: There were no changes to code between v1.0.0-rc1 and v1.0.0 so see v1.0.0 for changelog_ + +### 0.14.1 (2020-03-13) + +* Adds safe allocation wrappers (CVE-2020-7105, #747, #752) (Michael Grunder) + +### 0.14.0 (2018-09-25) +**BREAKING CHANGES**: + +* Change `redisReply.len` to `size_t`, as it denotes the the size of a string + + User code should compare this to `size_t` values as well. + If it was used to compare to other values, casting might be necessary or can be removed, if casting was applied before. + +* Make string2ll static to fix conflict with Redis (Tom Lee [c3188b]) +* Use -dynamiclib instead of -shared for OSX (Ryan Schmidt [a65537]) +* Use string2ll from Redis w/added tests (Michael Grunder [7bef04, 60f622]) +* Makefile - OSX compilation fixes (Ryan Schmidt [881fcb, 0e9af8]) +* Remove redundant NULL checks (Justin Brewer [54acc8, 58e6b8]) +* Fix bulk and multi-bulk length truncation (Justin Brewer [109197]) +* Fix SIGSEGV in OpenBSD by checking for NULL before calling freeaddrinfo (Justin Brewer [546d94]) +* Several POSIX compatibility fixes (Justin Brewer [bbeab8, 49bbaa, d1c1b6]) +* Makefile - Compatibility fixes (Dimitri Vorobiev [3238cf, 12a9d1]) +* Makefile - Fix make install on FreeBSD (Zach Shipko [a2ef2b]) +* Makefile - don't assume $(INSTALL) is cp (Igor Gnatenko [725a96]) +* Separate side-effect causing function from assert and small cleanup (amallia [b46413, 3c3234]) +* Don't send negative values to `__redisAsyncCommand` (Frederik Deweerdt [706129]) +* Fix leak if setsockopt fails (Frederik Deweerdt [e21c9c]) +* Fix libevent leak (zfz [515228]) +* Clean up GCC warning (Ichito Nagata [2ec774]) +* Keep track of errno in `__redisSetErrorFromErrno()` as snprintf may use it (Jin Qing [25cd88]) +* Solaris compilation fix (Donald Whyte [41b07d]) +* Reorder linker arguments when building examples (Tustfarm-heart [06eedd]) +* Keep track of subscriptions in case of rapid subscribe/unsubscribe (Hyungjin Kim [073dc8, be76c5, d46999]) +* libuv use after free fix (Paul Scott [cbb956]) +* Properly close socket fd on reconnect attempt (WSL [64d1ec]) +* Skip valgrind in OSX tests (Jan-Erik Rediger [9deb78]) +* Various updates for Travis testing OSX (Ted Nyman [fa3774, 16a459, bc0ea5]) +* Update libevent (Chris Xin [386802]) +* Change sds.h for building in C++ projects (Ali Volkan ATLI [f5b32e]) +* Use proper format specifier in redisFormatSdsCommandArgv (Paulino Huerta, Jan-Erik Rediger [360a06, 8655a6]) +* Better handling of NULL reply in example code (Jan-Erik Rediger [1b8ed3]) +* Prevent overflow when formatting an error (Jan-Erik Rediger [0335cb]) +* Compatibility fix for strerror_r (Tom Lee [bb1747]) +* Properly detect integer parse/overflow errors (Justin Brewer [93421f]) +* Adds CI for Windows and cygwin fixes (owent, [6c53d6, 6c3e40]) +* Catch a buffer overflow when formatting the error message +* Import latest upstream sds. This breaks applications that are linked against the old hiredis v0.13 +* Fix warnings, when compiled with -Wshadow +* Make hiredis compile in Cygwin on Windows, now CI-tested +* Bulk and multi-bulk lengths less than -1 or greater than `LLONG_MAX` are now + protocol errors. This is consistent with the RESP specification. On 32-bit + platforms, the upper bound is lowered to `SIZE_MAX`. + +* Remove backwards compatibility macro's + +This removes the following old function aliases, use the new name now: + +| Old | New | +| --------------------------- | ---------------------- | +| redisReplyReaderCreate | redisReaderCreate | +| redisReplyReaderCreate | redisReaderCreate | +| redisReplyReaderFree | redisReaderFree | +| redisReplyReaderFeed | redisReaderFeed | +| redisReplyReaderGetReply | redisReaderGetReply | +| redisReplyReaderSetPrivdata | redisReaderSetPrivdata | +| redisReplyReaderGetObject | redisReaderGetObject | +| redisReplyReaderGetError | redisReaderGetError | + +* The `DEBUG` variable in the Makefile was renamed to `DEBUG_FLAGS` + +Previously it broke some builds for people that had `DEBUG` set to some arbitrary value, +due to debugging other software. +By renaming we avoid unintentional name clashes. + +Simply rename `DEBUG` to `DEBUG_FLAGS` in your environment to make it working again. + +### 0.13.3 (2015-09-16) + +* Revert "Clear `REDIS_CONNECTED` flag when connection is closed". +* Make tests pass on FreeBSD (Thanks, Giacomo Olgeni) + + +If the `REDIS_CONNECTED` flag is cleared, +the async onDisconnect callback function will never be called. +This causes problems as the disconnect is never reported back to the user. + +### 0.13.2 (2015-08-25) + +* Prevent crash on pending replies in async code (Thanks, @switch-st) +* Clear `REDIS_CONNECTED` flag when connection is closed (Thanks, Jerry Jacobs) +* Add MacOS X addapter (Thanks, @dizzus) +* Add Qt adapter (Thanks, Pietro Cerutti) +* Add Ivykis adapter (Thanks, Gergely Nagy) + +All adapters are provided as is and are only tested where possible. + +### 0.13.1 (2015-05-03) + +This is a bug fix release. +The new `reconnect` method introduced new struct members, which clashed with pre-defined names in pre-C99 code. +Another commit forced C99 compilation just to make it work, but of course this is not desirable for outside projects. +Other non-C99 code can now use hiredis as usual again. +Sorry for the inconvenience. + +* Fix memory leak in async reply handling (Salvatore Sanfilippo) +* Rename struct member to avoid name clash with pre-c99 code (Alex Balashov, ncopa) + +### 0.13.0 (2015-04-16) + +This release adds a minimal Windows compatibility layer. +The parser, standalone since v0.12.0, can now be compiled on Windows +(and thus used in other client libraries as well) + +* Windows compatibility layer for parser code (tzickel) +* Properly escape data printed to PKGCONF file (Dan Skorupski) +* Fix tests when assert() undefined (Keith Bennett, Matt Stancliff) +* Implement a reconnect method for the client context, this changes the structure of `redisContext` (Aaron Bedra) + +### 0.12.1 (2015-01-26) + +* Fix `make install`: DESTDIR support, install all required files, install PKGCONF in proper location +* Fix `make test` as 32 bit build on 64 bit platform + +### 0.12.0 (2015-01-22) + +* Add optional KeepAlive support + +* Try again on EINTR errors + +* Add libuv adapter + +* Add IPv6 support + +* Remove possibility of multiple close on same fd + +* Add ability to bind source address on connect + +* Add redisConnectFd() and redisFreeKeepFd() + +* Fix getaddrinfo() memory leak + +* Free string if it is unused (fixes memory leak) + +* Improve redisAppendCommandArgv performance 2.5x + +* Add support for SO_REUSEADDR + +* Fix redisvFormatCommand format parsing + +* Add GLib 2.0 adapter + +* Refactor reading code into read.c + +* Fix errno error buffers to not clobber errors + +* Generate pkgconf during build + +* Silence _BSD_SOURCE warnings + +* Improve digit counting for multibulk creation + + +### 0.11.0 + +* Increase the maximum multi-bulk reply depth to 7. + +* Increase the read buffer size from 2k to 16k. + +* Use poll(2) instead of select(2) to support large fds (>= 1024). + +### 0.10.1 + +* Makefile overhaul. Important to check out if you override one or more + variables using environment variables or via arguments to the "make" tool. + +* Issue #45: Fix potential memory leak for a multi bulk reply with 0 elements + being created by the default reply object functions. + +* Issue #43: Don't crash in an asynchronous context when Redis returns an error + reply after the connection has been made (this happens when the maximum + number of connections is reached). + +### 0.10.0 + +* See commit log. diff --git a/ext/hiredis-1.0.2/CMakeLists.txt b/ext/hiredis-1.0.2/CMakeLists.txt new file mode 100644 index 000000000..f86c9b70b --- /dev/null +++ b/ext/hiredis-1.0.2/CMakeLists.txt @@ -0,0 +1,165 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.4.0) +INCLUDE(GNUInstallDirs) +PROJECT(hiredis) + +OPTION(ENABLE_SSL "Build hiredis_ssl for SSL support" OFF) +OPTION(DISABLE_TESTS "If tests should be compiled or not" OFF) +OPTION(ENABLE_SSL_TESTS, "Should we test SSL connections" OFF) + +MACRO(getVersionBit name) + SET(VERSION_REGEX "^#define ${name} (.+)$") + FILE(STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/hiredis.h" + VERSION_BIT REGEX ${VERSION_REGEX}) + STRING(REGEX REPLACE ${VERSION_REGEX} "\\1" ${name} "${VERSION_BIT}") +ENDMACRO(getVersionBit) + +getVersionBit(HIREDIS_MAJOR) +getVersionBit(HIREDIS_MINOR) +getVersionBit(HIREDIS_PATCH) +getVersionBit(HIREDIS_SONAME) +SET(VERSION "${HIREDIS_MAJOR}.${HIREDIS_MINOR}.${HIREDIS_PATCH}") +MESSAGE("Detected version: ${VERSION}") + +PROJECT(hiredis VERSION "${VERSION}") + +SET(ENABLE_EXAMPLES OFF CACHE BOOL "Enable building hiredis examples") + +SET(hiredis_sources + alloc.c + async.c + dict.c + hiredis.c + net.c + read.c + sds.c + sockcompat.c) + +SET(hiredis_sources ${hiredis_sources}) + +IF(WIN32) + ADD_COMPILE_DEFINITIONS(_CRT_SECURE_NO_WARNINGS WIN32_LEAN_AND_MEAN) +ENDIF() + +ADD_LIBRARY(hiredis SHARED ${hiredis_sources}) + +SET_TARGET_PROPERTIES(hiredis + PROPERTIES WINDOWS_EXPORT_ALL_SYMBOLS TRUE + VERSION "${HIREDIS_SONAME}") +IF(WIN32 OR MINGW) + TARGET_LINK_LIBRARIES(hiredis PRIVATE ws2_32) +ENDIF() + +TARGET_INCLUDE_DIRECTORIES(hiredis PUBLIC $ $) + +CONFIGURE_FILE(hiredis.pc.in hiredis.pc @ONLY) + +INSTALL(TARGETS hiredis + EXPORT hiredis-targets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) + +INSTALL(FILES hiredis.h read.h sds.h async.h alloc.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/hiredis) + +INSTALL(DIRECTORY adapters + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/hiredis) + +INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis.pc + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) + +export(EXPORT hiredis-targets + FILE "${CMAKE_CURRENT_BINARY_DIR}/hiredis-targets.cmake" + NAMESPACE hiredis::) + +SET(CMAKE_CONF_INSTALL_DIR share/hiredis) +SET(INCLUDE_INSTALL_DIR include) +include(CMakePackageConfigHelpers) +configure_package_config_file(hiredis-config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/hiredis-config.cmake + INSTALL_DESTINATION ${CMAKE_CONF_INSTALL_DIR} + PATH_VARS INCLUDE_INSTALL_DIR) + +INSTALL(EXPORT hiredis-targets + FILE hiredis-targets.cmake + NAMESPACE hiredis:: + DESTINATION ${CMAKE_CONF_INSTALL_DIR}) + +INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis-config.cmake + DESTINATION ${CMAKE_CONF_INSTALL_DIR}) + + +IF(ENABLE_SSL) + IF (NOT OPENSSL_ROOT_DIR) + IF (APPLE) + SET(OPENSSL_ROOT_DIR "/usr/local/opt/openssl") + ENDIF() + ENDIF() + FIND_PACKAGE(OpenSSL REQUIRED) + SET(hiredis_ssl_sources + ssl.c) + ADD_LIBRARY(hiredis_ssl SHARED + ${hiredis_ssl_sources}) + + IF (APPLE) + SET_PROPERTY(TARGET hiredis_ssl PROPERTY LINK_FLAGS "-Wl,-undefined -Wl,dynamic_lookup") + ENDIF() + + SET_TARGET_PROPERTIES(hiredis_ssl + PROPERTIES + WINDOWS_EXPORT_ALL_SYMBOLS TRUE + VERSION "${HIREDIS_SONAME}") + + TARGET_INCLUDE_DIRECTORIES(hiredis_ssl PRIVATE "${OPENSSL_INCLUDE_DIR}") + TARGET_LINK_LIBRARIES(hiredis_ssl PRIVATE ${OPENSSL_LIBRARIES}) + IF (WIN32 OR MINGW) + TARGET_LINK_LIBRARIES(hiredis_ssl PRIVATE hiredis) + ENDIF() + CONFIGURE_FILE(hiredis_ssl.pc.in hiredis_ssl.pc @ONLY) + + INSTALL(TARGETS hiredis_ssl + EXPORT hiredis_ssl-targets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) + + INSTALL(FILES hiredis_ssl.h + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/hiredis) + + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis_ssl.pc + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) + + export(EXPORT hiredis_ssl-targets + FILE "${CMAKE_CURRENT_BINARY_DIR}/hiredis_ssl-targets.cmake" + NAMESPACE hiredis::) + + SET(CMAKE_CONF_INSTALL_DIR share/hiredis_ssl) + configure_package_config_file(hiredis_ssl-config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/hiredis_ssl-config.cmake + INSTALL_DESTINATION ${CMAKE_CONF_INSTALL_DIR} + PATH_VARS INCLUDE_INSTALL_DIR) + + INSTALL(EXPORT hiredis_ssl-targets + FILE hiredis_ssl-targets.cmake + NAMESPACE hiredis:: + DESTINATION ${CMAKE_CONF_INSTALL_DIR}) + + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/hiredis_ssl-config.cmake + DESTINATION ${CMAKE_CONF_INSTALL_DIR}) +ENDIF() + +IF(NOT DISABLE_TESTS) + ENABLE_TESTING() + ADD_EXECUTABLE(hiredis-test test.c) + IF(ENABLE_SSL_TESTS) + ADD_DEFINITIONS(-DHIREDIS_TEST_SSL=1) + TARGET_LINK_LIBRARIES(hiredis-test hiredis hiredis_ssl) + ELSE() + TARGET_LINK_LIBRARIES(hiredis-test hiredis) + ENDIF() + ADD_TEST(NAME hiredis-test + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test.sh) +ENDIF() + +# Add examples +IF(ENABLE_EXAMPLES) + ADD_SUBDIRECTORY(examples) +ENDIF(ENABLE_EXAMPLES) diff --git a/ext/hiredis-1.0.2/COPYING b/ext/hiredis-1.0.2/COPYING new file mode 100644 index 000000000..a5fc97395 --- /dev/null +++ b/ext/hiredis-1.0.2/COPYING @@ -0,0 +1,29 @@ +Copyright (c) 2009-2011, Salvatore Sanfilippo +Copyright (c) 2010-2011, Pieter Noordhuis + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of Redis nor the names of its contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/ext/hiredis-1.0.2/Makefile b/ext/hiredis-1.0.2/Makefile new file mode 100644 index 000000000..a8d37a2eb --- /dev/null +++ b/ext/hiredis-1.0.2/Makefile @@ -0,0 +1,308 @@ +# Hiredis Makefile +# Copyright (C) 2010-2011 Salvatore Sanfilippo +# Copyright (C) 2010-2011 Pieter Noordhuis +# This file is released under the BSD license, see the COPYING file + +OBJ=alloc.o net.o hiredis.o sds.o async.o read.o sockcompat.o +SSL_OBJ=ssl.o +EXAMPLES=hiredis-example hiredis-example-libevent hiredis-example-libev hiredis-example-glib hiredis-example-push +ifeq ($(USE_SSL),1) +EXAMPLES+=hiredis-example-ssl hiredis-example-libevent-ssl +endif +TESTS=hiredis-test +LIBNAME=libhiredis +PKGCONFNAME=hiredis.pc +SSL_LIBNAME=libhiredis_ssl +SSL_PKGCONFNAME=hiredis_ssl.pc + +HIREDIS_MAJOR=$(shell grep HIREDIS_MAJOR hiredis.h | awk '{print $$3}') +HIREDIS_MINOR=$(shell grep HIREDIS_MINOR hiredis.h | awk '{print $$3}') +HIREDIS_PATCH=$(shell grep HIREDIS_PATCH hiredis.h | awk '{print $$3}') +HIREDIS_SONAME=$(shell grep HIREDIS_SONAME hiredis.h | awk '{print $$3}') + +# Installation related variables and target +PREFIX?=/usr/local +INCLUDE_PATH?=include/hiredis +LIBRARY_PATH?=lib +PKGCONF_PATH?=pkgconfig +INSTALL_INCLUDE_PATH= $(DESTDIR)$(PREFIX)/$(INCLUDE_PATH) +INSTALL_LIBRARY_PATH= $(DESTDIR)$(PREFIX)/$(LIBRARY_PATH) +INSTALL_PKGCONF_PATH= $(INSTALL_LIBRARY_PATH)/$(PKGCONF_PATH) + +# redis-server configuration used for testing +REDIS_PORT=56379 +REDIS_SERVER=redis-server +define REDIS_TEST_CONFIG + daemonize yes + pidfile /tmp/hiredis-test-redis.pid + port $(REDIS_PORT) + bind 127.0.0.1 + unixsocket /tmp/hiredis-test-redis.sock +endef +export REDIS_TEST_CONFIG + +# Fallback to gcc when $CC is not in $PATH. +CC:=$(shell sh -c 'type $${CC%% *} >/dev/null 2>/dev/null && echo $(CC) || echo gcc') +CXX:=$(shell sh -c 'type $${CXX%% *} >/dev/null 2>/dev/null && echo $(CXX) || echo g++') +OPTIMIZATION?=-O3 +WARNINGS=-Wall -W -Wstrict-prototypes -Wwrite-strings -Wno-missing-field-initializers +DEBUG_FLAGS?= -g -ggdb +REAL_CFLAGS=$(OPTIMIZATION) -fPIC $(CPPFLAGS) $(CFLAGS) $(WARNINGS) $(DEBUG_FLAGS) +REAL_LDFLAGS=$(LDFLAGS) + +DYLIBSUFFIX=so +STLIBSUFFIX=a +DYLIB_MINOR_NAME=$(LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_SONAME) +DYLIB_MAJOR_NAME=$(LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_MAJOR) +DYLIBNAME=$(LIBNAME).$(DYLIBSUFFIX) + +DYLIB_MAKE_CMD=$(CC) -shared -Wl,-soname,$(DYLIB_MINOR_NAME) +STLIBNAME=$(LIBNAME).$(STLIBSUFFIX) +STLIB_MAKE_CMD=$(AR) rcs + +SSL_DYLIB_MINOR_NAME=$(SSL_LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_SONAME) +SSL_DYLIB_MAJOR_NAME=$(SSL_LIBNAME).$(DYLIBSUFFIX).$(HIREDIS_MAJOR) +SSL_DYLIBNAME=$(SSL_LIBNAME).$(DYLIBSUFFIX) +SSL_STLIBNAME=$(SSL_LIBNAME).$(STLIBSUFFIX) +SSL_DYLIB_MAKE_CMD=$(CC) -shared -Wl,-soname,$(SSL_DYLIB_MINOR_NAME) + +# Platform-specific overrides +uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') + +USE_SSL?=0 + +# This is required for test.c only +ifeq ($(USE_SSL),1) + CFLAGS+=-DHIREDIS_TEST_SSL +endif + +ifeq ($(uname_S),Linux) + SSL_LDFLAGS=-lssl -lcrypto +else + OPENSSL_PREFIX?=/usr/local/opt/openssl + CFLAGS+=-I$(OPENSSL_PREFIX)/include + SSL_LDFLAGS+=-L$(OPENSSL_PREFIX)/lib -lssl -lcrypto +endif + +ifeq ($(uname_S),SunOS) + IS_SUN_CC=$(shell sh -c '$(CC) -V 2>&1 |egrep -i -c "sun|studio"') + ifeq ($(IS_SUN_CC),1) + SUN_SHARED_FLAG=-G + else + SUN_SHARED_FLAG=-shared + endif + REAL_LDFLAGS+= -ldl -lnsl -lsocket + DYLIB_MAKE_CMD=$(CC) $(SUN_SHARED_FLAG) -o $(DYLIBNAME) -h $(DYLIB_MINOR_NAME) $(LDFLAGS) + SSL_DYLIB_MAKE_CMD=$(CC) $(SUN_SHARED_FLAG) -o $(SSL_DYLIBNAME) -h $(SSL_DYLIB_MINOR_NAME) $(LDFLAGS) $(SSL_LDFLAGS) +endif +ifeq ($(uname_S),Darwin) + DYLIBSUFFIX=dylib + DYLIB_MINOR_NAME=$(LIBNAME).$(HIREDIS_SONAME).$(DYLIBSUFFIX) + DYLIB_MAKE_CMD=$(CC) -dynamiclib -Wl,-install_name,$(PREFIX)/$(LIBRARY_PATH)/$(DYLIB_MINOR_NAME) -o $(DYLIBNAME) $(LDFLAGS) + SSL_DYLIB_MAKE_CMD=$(CC) -dynamiclib -Wl,-install_name,$(PREFIX)/$(LIBRARY_PATH)/$(SSL_DYLIB_MINOR_NAME) -o $(SSL_DYLIBNAME) $(LDFLAGS) $(SSL_LDFLAGS) + DYLIB_PLUGIN=-Wl,-undefined -Wl,dynamic_lookup +endif + +all: $(DYLIBNAME) $(STLIBNAME) hiredis-test $(PKGCONFNAME) +ifeq ($(USE_SSL),1) +all: $(SSL_DYLIBNAME) $(SSL_STLIBNAME) $(SSL_PKGCONFNAME) +endif + +# Deps (use make dep to generate this) +alloc.o: alloc.c fmacros.h alloc.h +async.o: async.c fmacros.h alloc.h async.h hiredis.h read.h sds.h net.h dict.c dict.h win32.h async_private.h +dict.o: dict.c fmacros.h alloc.h dict.h +hiredis.o: hiredis.c fmacros.h hiredis.h read.h sds.h alloc.h net.h async.h win32.h +net.o: net.c fmacros.h net.h hiredis.h read.h sds.h alloc.h sockcompat.h win32.h +read.o: read.c fmacros.h alloc.h read.h sds.h win32.h +sds.o: sds.c sds.h sdsalloc.h alloc.h +sockcompat.o: sockcompat.c sockcompat.h +ssl.o: ssl.c hiredis.h read.h sds.h alloc.h async.h win32.h async_private.h +test.o: test.c fmacros.h hiredis.h read.h sds.h alloc.h net.h sockcompat.h win32.h + +$(DYLIBNAME): $(OBJ) + $(DYLIB_MAKE_CMD) -o $(DYLIBNAME) $(OBJ) $(REAL_LDFLAGS) + +$(STLIBNAME): $(OBJ) + $(STLIB_MAKE_CMD) $(STLIBNAME) $(OBJ) + +$(SSL_DYLIBNAME): $(SSL_OBJ) + $(SSL_DYLIB_MAKE_CMD) $(DYLIB_PLUGIN) -o $(SSL_DYLIBNAME) $(SSL_OBJ) $(REAL_LDFLAGS) $(LDFLAGS) $(SSL_LDFLAGS) + +$(SSL_STLIBNAME): $(SSL_OBJ) + $(STLIB_MAKE_CMD) $(SSL_STLIBNAME) $(SSL_OBJ) + +dynamic: $(DYLIBNAME) +static: $(STLIBNAME) +ifeq ($(USE_SSL),1) +dynamic: $(SSL_DYLIBNAME) +static: $(SSL_STLIBNAME) +endif + +# Binaries: +hiredis-example-libevent: examples/example-libevent.c adapters/libevent.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -levent $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-libevent-ssl: examples/example-libevent-ssl.c adapters/libevent.h $(STLIBNAME) $(SSL_STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -levent $(STLIBNAME) $(SSL_STLIBNAME) $(REAL_LDFLAGS) $(SSL_LDFLAGS) + +hiredis-example-libev: examples/example-libev.c adapters/libev.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -lev $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-glib: examples/example-glib.c adapters/glib.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< $(shell pkg-config --cflags --libs glib-2.0) $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-ivykis: examples/example-ivykis.c adapters/ivykis.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -livykis $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-macosx: examples/example-macosx.c adapters/macosx.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< -framework CoreFoundation $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-ssl: examples/example-ssl.c $(STLIBNAME) $(SSL_STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< $(STLIBNAME) $(SSL_STLIBNAME) $(REAL_LDFLAGS) $(SSL_LDFLAGS) + + +ifndef AE_DIR +hiredis-example-ae: + @echo "Please specify AE_DIR (e.g. /src)" + @false +else +hiredis-example-ae: examples/example-ae.c adapters/ae.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) $(REAL_LDFLAGS) -I. -I$(AE_DIR) $< $(AE_DIR)/ae.o $(AE_DIR)/zmalloc.o $(AE_DIR)/../deps/jemalloc/lib/libjemalloc.a -pthread $(STLIBNAME) +endif + +ifndef LIBUV_DIR +hiredis-example-libuv: + @echo "Please specify LIBUV_DIR (e.g. ../libuv/)" + @false +else +hiredis-example-libuv: examples/example-libuv.c adapters/libuv.h $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. -I$(LIBUV_DIR)/include $< $(LIBUV_DIR)/.libs/libuv.a -lpthread -lrt $(STLIBNAME) $(REAL_LDFLAGS) +endif + +ifeq ($(and $(QT_MOC),$(QT_INCLUDE_DIR),$(QT_LIBRARY_DIR)),) +hiredis-example-qt: + @echo "Please specify QT_MOC, QT_INCLUDE_DIR AND QT_LIBRARY_DIR" + @false +else +hiredis-example-qt: examples/example-qt.cpp adapters/qt.h $(STLIBNAME) + $(QT_MOC) adapters/qt.h -I. -I$(QT_INCLUDE_DIR) -I$(QT_INCLUDE_DIR)/QtCore | \ + $(CXX) -x c++ -o qt-adapter-moc.o -c - $(REAL_CFLAGS) -I. -I$(QT_INCLUDE_DIR) -I$(QT_INCLUDE_DIR)/QtCore + $(QT_MOC) examples/example-qt.h -I. -I$(QT_INCLUDE_DIR) -I$(QT_INCLUDE_DIR)/QtCore | \ + $(CXX) -x c++ -o qt-example-moc.o -c - $(REAL_CFLAGS) -I. -I$(QT_INCLUDE_DIR) -I$(QT_INCLUDE_DIR)/QtCore + $(CXX) -o examples/$@ $(REAL_CFLAGS) $(REAL_LDFLAGS) -I. -I$(QT_INCLUDE_DIR) -I$(QT_INCLUDE_DIR)/QtCore -L$(QT_LIBRARY_DIR) qt-adapter-moc.o qt-example-moc.o $< -pthread $(STLIBNAME) -lQtCore +endif + +hiredis-example: examples/example.c $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< $(STLIBNAME) $(REAL_LDFLAGS) + +hiredis-example-push: examples/example-push.c $(STLIBNAME) + $(CC) -o examples/$@ $(REAL_CFLAGS) -I. $< $(STLIBNAME) $(REAL_LDFLAGS) + +examples: $(EXAMPLES) + +TEST_LIBS = $(STLIBNAME) +ifeq ($(USE_SSL),1) + TEST_LIBS += $(SSL_STLIBNAME) + TEST_LDFLAGS = $(SSL_LDFLAGS) -lssl -lcrypto -lpthread +endif + +hiredis-test: test.o $(TEST_LIBS) + $(CC) -o $@ $(REAL_CFLAGS) -I. $^ $(REAL_LDFLAGS) $(TEST_LDFLAGS) + +hiredis-%: %.o $(STLIBNAME) + $(CC) $(REAL_CFLAGS) -o $@ $< $(TEST_LIBS) $(REAL_LDFLAGS) + +test: hiredis-test + ./hiredis-test + +check: hiredis-test + TEST_SSL=$(USE_SSL) ./test.sh + +.c.o: + $(CC) -std=c99 -pedantic -c $(REAL_CFLAGS) $< + +clean: + rm -rf $(DYLIBNAME) $(STLIBNAME) $(SSL_DYLIBNAME) $(SSL_STLIBNAME) $(TESTS) $(PKGCONFNAME) examples/hiredis-example* *.o *.gcda *.gcno *.gcov + +dep: + $(CC) $(CPPFLAGS) $(CFLAGS) -MM *.c + +INSTALL?= cp -pPR + +$(PKGCONFNAME): hiredis.h + @echo "Generating $@ for pkgconfig..." + @echo prefix=$(PREFIX) > $@ + @echo exec_prefix=\$${prefix} >> $@ + @echo libdir=$(PREFIX)/$(LIBRARY_PATH) >> $@ + @echo includedir=$(PREFIX)/$(INCLUDE_PATH) >> $@ + @echo >> $@ + @echo Name: hiredis >> $@ + @echo Description: Minimalistic C client library for Redis. >> $@ + @echo Version: $(HIREDIS_MAJOR).$(HIREDIS_MINOR).$(HIREDIS_PATCH) >> $@ + @echo Libs: -L\$${libdir} -lhiredis >> $@ + @echo Cflags: -I\$${includedir} -D_FILE_OFFSET_BITS=64 >> $@ + +$(SSL_PKGCONFNAME): hiredis_ssl.h + @echo "Generating $@ for pkgconfig..." + @echo prefix=$(PREFIX) > $@ + @echo exec_prefix=\$${prefix} >> $@ + @echo libdir=$(PREFIX)/$(LIBRARY_PATH) >> $@ + @echo includedir=$(PREFIX)/$(INCLUDE_PATH) >> $@ + @echo >> $@ + @echo Name: hiredis_ssl >> $@ + @echo Description: SSL Support for hiredis. >> $@ + @echo Version: $(HIREDIS_MAJOR).$(HIREDIS_MINOR).$(HIREDIS_PATCH) >> $@ + @echo Requires: hiredis >> $@ + @echo Libs: -L\$${libdir} -lhiredis_ssl >> $@ + @echo Libs.private: -lssl -lcrypto >> $@ + +install: $(DYLIBNAME) $(STLIBNAME) $(PKGCONFNAME) + mkdir -p $(INSTALL_INCLUDE_PATH) $(INSTALL_INCLUDE_PATH)/adapters $(INSTALL_LIBRARY_PATH) + $(INSTALL) hiredis.h async.h read.h sds.h alloc.h $(INSTALL_INCLUDE_PATH) + $(INSTALL) adapters/*.h $(INSTALL_INCLUDE_PATH)/adapters + $(INSTALL) $(DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(DYLIB_MINOR_NAME) + cd $(INSTALL_LIBRARY_PATH) && ln -sf $(DYLIB_MINOR_NAME) $(DYLIBNAME) + $(INSTALL) $(STLIBNAME) $(INSTALL_LIBRARY_PATH) + mkdir -p $(INSTALL_PKGCONF_PATH) + $(INSTALL) $(PKGCONFNAME) $(INSTALL_PKGCONF_PATH) + +ifeq ($(USE_SSL),1) +install: install-ssl + +install-ssl: $(SSL_DYLIBNAME) $(SSL_STLIBNAME) $(SSL_PKGCONFNAME) + mkdir -p $(INSTALL_INCLUDE_PATH) $(INSTALL_LIBRARY_PATH) + $(INSTALL) hiredis_ssl.h $(INSTALL_INCLUDE_PATH) + $(INSTALL) $(SSL_DYLIBNAME) $(INSTALL_LIBRARY_PATH)/$(SSL_DYLIB_MINOR_NAME) + cd $(INSTALL_LIBRARY_PATH) && ln -sf $(SSL_DYLIB_MINOR_NAME) $(SSL_DYLIBNAME) + $(INSTALL) $(SSL_STLIBNAME) $(INSTALL_LIBRARY_PATH) + mkdir -p $(INSTALL_PKGCONF_PATH) + $(INSTALL) $(SSL_PKGCONFNAME) $(INSTALL_PKGCONF_PATH) +endif + +32bit: + @echo "" + @echo "WARNING: if this fails under Linux you probably need to install libc6-dev-i386" + @echo "" + $(MAKE) CFLAGS="-m32" LDFLAGS="-m32" + +32bit-vars: + $(eval CFLAGS=-m32) + $(eval LDFLAGS=-m32) + +gprof: + $(MAKE) CFLAGS="-pg" LDFLAGS="-pg" + +gcov: + $(MAKE) CFLAGS="-fprofile-arcs -ftest-coverage" LDFLAGS="-fprofile-arcs" + +coverage: gcov + make check + mkdir -p tmp/lcov + lcov -d . -c -o tmp/lcov/hiredis.info + genhtml --legend -o tmp/lcov/report tmp/lcov/hiredis.info + +noopt: + $(MAKE) OPTIMIZATION="" + +.PHONY: all test check clean dep install 32bit 32bit-vars gprof gcov noopt diff --git a/ext/hiredis-1.0.2/README.md b/ext/hiredis-1.0.2/README.md new file mode 100644 index 000000000..c544d5718 --- /dev/null +++ b/ext/hiredis-1.0.2/README.md @@ -0,0 +1,664 @@ +[![Build Status](https://travis-ci.org/redis/hiredis.png)](https://travis-ci.org/redis/hiredis) + +**This Readme reflects the latest changed in the master branch. See [v1.0.0](https://github.com/redis/hiredis/tree/v1.0.0) for the Readme and documentation for the latest release ([API/ABI history](https://abi-laboratory.pro/?view=timeline&l=hiredis)).** + +# HIREDIS + +Hiredis is a minimalistic C client library for the [Redis](http://redis.io/) database. + +It is minimalistic because it just adds minimal support for the protocol, but +at the same time it uses a high level printf-alike API in order to make it +much higher level than otherwise suggested by its minimal code base and the +lack of explicit bindings for every Redis command. + +Apart from supporting sending commands and receiving replies, it comes with +a reply parser that is decoupled from the I/O layer. It +is a stream parser designed for easy reusability, which can for instance be used +in higher level language bindings for efficient reply parsing. + +Hiredis only supports the binary-safe Redis protocol, so you can use it with any +Redis version >= 1.2.0. + +The library comes with multiple APIs. There is the +*synchronous API*, the *asynchronous API* and the *reply parsing API*. + +## Upgrading to `1.0.2` + +NOTE: v1.0.1 erroneously bumped SONAME, which is why it is skipped here. + +Version 1.0.2 is simply 1.0.0 with a fix for [CVE-2021-32765](https://github.com/redis/hiredis/security/advisories/GHSA-hfm9-39pp-55p2). They are otherwise identical. + +## Upgrading to `1.0.0` + +Version 1.0.0 marks the first stable release of Hiredis. +It includes some minor breaking changes, mostly to make the exposed API more uniform and self-explanatory. +It also bundles the updated `sds` library, to sync up with upstream and Redis. +For code changes see the [Changelog](CHANGELOG.md). + +_Note: As described below, a few member names have been changed but most applications should be able to upgrade with minor code changes and recompiling._ + +## IMPORTANT: Breaking changes from `0.14.1` -> `1.0.0` + +* `redisContext` has two additional members (`free_privdata`, and `privctx`). +* `redisOptions.timeout` has been renamed to `redisOptions.connect_timeout`, and we've added `redisOptions.command_timeout`. +* `redisReplyObjectFunctions.createArray` now takes `size_t` instead of `int` for its length parameter. + +## IMPORTANT: Breaking changes when upgrading from 0.13.x -> 0.14.x + +Bulk and multi-bulk lengths less than -1 or greater than `LLONG_MAX` are now +protocol errors. This is consistent with the RESP specification. On 32-bit +platforms, the upper bound is lowered to `SIZE_MAX`. + +Change `redisReply.len` to `size_t`, as it denotes the the size of a string + +User code should compare this to `size_t` values as well. If it was used to +compare to other values, casting might be necessary or can be removed, if +casting was applied before. + +## Upgrading from `<0.9.0` + +Version 0.9.0 is a major overhaul of hiredis in every aspect. However, upgrading existing +code using hiredis should not be a big pain. The key thing to keep in mind when +upgrading is that hiredis >= 0.9.0 uses a `redisContext*` to keep state, in contrast to +the stateless 0.0.1 that only has a file descriptor to work with. + +## Synchronous API + +To consume the synchronous API, there are only a few function calls that need to be introduced: + +```c +redisContext *redisConnect(const char *ip, int port); +void *redisCommand(redisContext *c, const char *format, ...); +void freeReplyObject(void *reply); +``` + +### Connecting + +The function `redisConnect` is used to create a so-called `redisContext`. The +context is where Hiredis holds state for a connection. The `redisContext` +struct has an integer `err` field that is non-zero when the connection is in +an error state. The field `errstr` will contain a string with a description of +the error. More information on errors can be found in the **Errors** section. +After trying to connect to Redis using `redisConnect` you should +check the `err` field to see if establishing the connection was successful: +```c +redisContext *c = redisConnect("127.0.0.1", 6379); +if (c == NULL || c->err) { + if (c) { + printf("Error: %s\n", c->errstr); + // handle error + } else { + printf("Can't allocate redis context\n"); + } +} +``` + +*Note: A `redisContext` is not thread-safe.* + +### Sending commands + +There are several ways to issue commands to Redis. The first that will be introduced is +`redisCommand`. This function takes a format similar to printf. In the simplest form, +it is used like this: +```c +reply = redisCommand(context, "SET foo bar"); +``` + +The specifier `%s` interpolates a string in the command, and uses `strlen` to +determine the length of the string: +```c +reply = redisCommand(context, "SET foo %s", value); +``` +When you need to pass binary safe strings in a command, the `%b` specifier can be +used. Together with a pointer to the string, it requires a `size_t` length argument +of the string: +```c +reply = redisCommand(context, "SET foo %b", value, (size_t) valuelen); +``` +Internally, Hiredis splits the command in different arguments and will +convert it to the protocol used to communicate with Redis. +One or more spaces separates arguments, so you can use the specifiers +anywhere in an argument: +```c +reply = redisCommand(context, "SET key:%s %s", myid, value); +``` + +### Using replies + +The return value of `redisCommand` holds a reply when the command was +successfully executed. When an error occurs, the return value is `NULL` and +the `err` field in the context will be set (see section on **Errors**). +Once an error is returned the context cannot be reused and you should set up +a new connection. + +The standard replies that `redisCommand` are of the type `redisReply`. The +`type` field in the `redisReply` should be used to test what kind of reply +was received: + +### RESP2 + +* **`REDIS_REPLY_STATUS`**: + * The command replied with a status reply. The status string can be accessed using `reply->str`. + The length of this string can be accessed using `reply->len`. + +* **`REDIS_REPLY_ERROR`**: + * The command replied with an error. The error string can be accessed identical to `REDIS_REPLY_STATUS`. + +* **`REDIS_REPLY_INTEGER`**: + * The command replied with an integer. The integer value can be accessed using the + `reply->integer` field of type `long long`. + +* **`REDIS_REPLY_NIL`**: + * The command replied with a **nil** object. There is no data to access. + +* **`REDIS_REPLY_STRING`**: + * A bulk (string) reply. The value of the reply can be accessed using `reply->str`. + The length of this string can be accessed using `reply->len`. + +* **`REDIS_REPLY_ARRAY`**: + * A multi bulk reply. The number of elements in the multi bulk reply is stored in + `reply->elements`. Every element in the multi bulk reply is a `redisReply` object as well + and can be accessed via `reply->element[..index..]`. + Redis may reply with nested arrays but this is fully supported. + +### RESP3 + +Hiredis also supports every new `RESP3` data type which are as follows. For more information about the protocol see the `RESP3` [specification.](https://github.com/antirez/RESP3/blob/master/spec.md) + +* **`REDIS_REPLY_DOUBLE`**: + * The command replied with a double-precision floating point number. + The value is stored as a string in the `str` member, and can be converted with `strtod` or similar. + +* **`REDIS_REPLY_BOOL`**: + * A boolean true/false reply. + The value is stored in the `integer` member and will be either `0` or `1`. + +* **`REDIS_REPLY_MAP`**: + * An array with the added invariant that there will always be an even number of elements. + The MAP is functionally equivelant to `REDIS_REPLY_ARRAY` except for the previously mentioned invariant. + +* **`REDIS_REPLY_SET`**: + * An array response where each entry is unique. + Like the MAP type, the data is identical to an array response except there are no duplicate values. + +* **`REDIS_REPLY_PUSH`**: + * An array that can be generated spontaneously by Redis. + This array response will always contain at least two subelements. The first contains the type of `PUSH` message (e.g. `message`, or `invalidate`), and the second being a sub-array with the `PUSH` payload itself. + +* **`REDIS_REPLY_ATTR`**: + * An array structurally identical to a `MAP` but intended as meta-data about a reply. + _As of Redis 6.0.6 this reply type is not used in Redis_ + +* **`REDIS_REPLY_BIGNUM`**: + * A string representing an arbitrarily large signed or unsigned integer value. + The number will be encoded as a string in the `str` member of `redisReply`. + +* **`REDIS_REPLY_VERB`**: + * A verbatim string, intended to be presented to the user without modification. + The string payload is stored in the `str` memeber, and type data is stored in the `vtype` member (e.g. `txt` for raw text or `md` for markdown). + +Replies should be freed using the `freeReplyObject()` function. +Note that this function will take care of freeing sub-reply objects +contained in arrays and nested arrays, so there is no need for the user to +free the sub replies (it is actually harmful and will corrupt the memory). + +**Important:** the current version of hiredis (1.0.0) frees replies when the +asynchronous API is used. This means you should not call `freeReplyObject` when +you use this API. The reply is cleaned up by hiredis _after_ the callback +returns. We may introduce a flag to make this configurable in future versions of the library. + +### Cleaning up + +To disconnect and free the context the following function can be used: +```c +void redisFree(redisContext *c); +``` +This function immediately closes the socket and then frees the allocations done in +creating the context. + +### Sending commands (cont'd) + +Together with `redisCommand`, the function `redisCommandArgv` can be used to issue commands. +It has the following prototype: +```c +void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); +``` +It takes the number of arguments `argc`, an array of strings `argv` and the lengths of the +arguments `argvlen`. For convenience, `argvlen` may be set to `NULL` and the function will +use `strlen(3)` on every argument to determine its length. Obviously, when any of the arguments +need to be binary safe, the entire array of lengths `argvlen` should be provided. + +The return value has the same semantic as `redisCommand`. + +### Pipelining + +To explain how Hiredis supports pipelining in a blocking connection, there needs to be +understanding of the internal execution flow. + +When any of the functions in the `redisCommand` family is called, Hiredis first formats the +command according to the Redis protocol. The formatted command is then put in the output buffer +of the context. This output buffer is dynamic, so it can hold any number of commands. +After the command is put in the output buffer, `redisGetReply` is called. This function has the +following two execution paths: + +1. The input buffer is non-empty: + * Try to parse a single reply from the input buffer and return it + * If no reply could be parsed, continue at *2* +2. The input buffer is empty: + * Write the **entire** output buffer to the socket + * Read from the socket until a single reply could be parsed + +The function `redisGetReply` is exported as part of the Hiredis API and can be used when a reply +is expected on the socket. To pipeline commands, the only things that needs to be done is +filling up the output buffer. For this cause, two commands can be used that are identical +to the `redisCommand` family, apart from not returning a reply: +```c +void redisAppendCommand(redisContext *c, const char *format, ...); +void redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); +``` +After calling either function one or more times, `redisGetReply` can be used to receive the +subsequent replies. The return value for this function is either `REDIS_OK` or `REDIS_ERR`, where +the latter means an error occurred while reading a reply. Just as with the other commands, +the `err` field in the context can be used to find out what the cause of this error is. + +The following examples shows a simple pipeline (resulting in only a single call to `write(2)` and +a single call to `read(2)`): +```c +redisReply *reply; +redisAppendCommand(context,"SET foo bar"); +redisAppendCommand(context,"GET foo"); +redisGetReply(context,(void *)&reply); // reply for SET +freeReplyObject(reply); +redisGetReply(context,(void *)&reply); // reply for GET +freeReplyObject(reply); +``` +This API can also be used to implement a blocking subscriber: +```c +reply = redisCommand(context,"SUBSCRIBE foo"); +freeReplyObject(reply); +while(redisGetReply(context,(void *)&reply) == REDIS_OK) { + // consume message + freeReplyObject(reply); +} +``` +### Errors + +When a function call is not successful, depending on the function either `NULL` or `REDIS_ERR` is +returned. The `err` field inside the context will be non-zero and set to one of the +following constants: + +* **`REDIS_ERR_IO`**: + There was an I/O error while creating the connection, trying to write + to the socket or read from the socket. If you included `errno.h` in your + application, you can use the global `errno` variable to find out what is + wrong. + +* **`REDIS_ERR_EOF`**: + The server closed the connection which resulted in an empty read. + +* **`REDIS_ERR_PROTOCOL`**: + There was an error while parsing the protocol. + +* **`REDIS_ERR_OTHER`**: + Any other error. Currently, it is only used when a specified hostname to connect + to cannot be resolved. + +In every case, the `errstr` field in the context will be set to hold a string representation +of the error. + +## Asynchronous API + +Hiredis comes with an asynchronous API that works easily with any event library. +Examples are bundled that show using Hiredis with [libev](http://software.schmorp.de/pkg/libev.html) +and [libevent](http://monkey.org/~provos/libevent/). + +### Connecting + +The function `redisAsyncConnect` can be used to establish a non-blocking connection to +Redis. It returns a pointer to the newly created `redisAsyncContext` struct. The `err` field +should be checked after creation to see if there were errors creating the connection. +Because the connection that will be created is non-blocking, the kernel is not able to +instantly return if the specified host and port is able to accept a connection. + +*Note: A `redisAsyncContext` is not thread-safe.* + +```c +redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); +if (c->err) { + printf("Error: %s\n", c->errstr); + // handle error +} +``` + +The asynchronous context can hold a disconnect callback function that is called when the +connection is disconnected (either because of an error or per user request). This function should +have the following prototype: +```c +void(const redisAsyncContext *c, int status); +``` +On a disconnect, the `status` argument is set to `REDIS_OK` when disconnection was initiated by the +user, or `REDIS_ERR` when the disconnection was caused by an error. When it is `REDIS_ERR`, the `err` +field in the context can be accessed to find out the cause of the error. + +The context object is always freed after the disconnect callback fired. When a reconnect is needed, +the disconnect callback is a good point to do so. + +Setting the disconnect callback can only be done once per context. For subsequent calls it will +return `REDIS_ERR`. The function to set the disconnect callback has the following prototype: +```c +int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn); +``` +`ac->data` may be used to pass user data to this callback, the same can be done for redisConnectCallback. +### Sending commands and their callbacks + +In an asynchronous context, commands are automatically pipelined due to the nature of an event loop. +Therefore, unlike the synchronous API, there is only a single way to send commands. +Because commands are sent to Redis asynchronously, issuing a command requires a callback function +that is called when the reply is received. Reply callbacks should have the following prototype: +```c +void(redisAsyncContext *c, void *reply, void *privdata); +``` +The `privdata` argument can be used to curry arbitrary data to the callback from the point where +the command is initially queued for execution. + +The functions that can be used to issue commands in an asynchronous context are: +```c +int redisAsyncCommand( + redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, + const char *format, ...); +int redisAsyncCommandArgv( + redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, + int argc, const char **argv, const size_t *argvlen); +``` +Both functions work like their blocking counterparts. The return value is `REDIS_OK` when the command +was successfully added to the output buffer and `REDIS_ERR` otherwise. Example: when the connection +is being disconnected per user-request, no new commands may be added to the output buffer and `REDIS_ERR` is +returned on calls to the `redisAsyncCommand` family. + +If the reply for a command with a `NULL` callback is read, it is immediately freed. When the callback +for a command is non-`NULL`, the memory is freed immediately following the callback: the reply is only +valid for the duration of the callback. + +All pending callbacks are called with a `NULL` reply when the context encountered an error. + +### Disconnecting + +An asynchronous connection can be terminated using: +```c +void redisAsyncDisconnect(redisAsyncContext *ac); +``` +When this function is called, the connection is **not** immediately terminated. Instead, new +commands are no longer accepted and the connection is only terminated when all pending commands +have been written to the socket, their respective replies have been read and their respective +callbacks have been executed. After this, the disconnection callback is executed with the +`REDIS_OK` status and the context object is freed. + +### Hooking it up to event library *X* + +There are a few hooks that need to be set on the context object after it is created. +See the `adapters/` directory for bindings to *libev* and *libevent*. + +## Reply parsing API + +Hiredis comes with a reply parsing API that makes it easy for writing higher +level language bindings. + +The reply parsing API consists of the following functions: +```c +redisReader *redisReaderCreate(void); +void redisReaderFree(redisReader *reader); +int redisReaderFeed(redisReader *reader, const char *buf, size_t len); +int redisReaderGetReply(redisReader *reader, void **reply); +``` +The same set of functions are used internally by hiredis when creating a +normal Redis context, the above API just exposes it to the user for a direct +usage. + +### Usage + +The function `redisReaderCreate` creates a `redisReader` structure that holds a +buffer with unparsed data and state for the protocol parser. + +Incoming data -- most likely from a socket -- can be placed in the internal +buffer of the `redisReader` using `redisReaderFeed`. This function will make a +copy of the buffer pointed to by `buf` for `len` bytes. This data is parsed +when `redisReaderGetReply` is called. This function returns an integer status +and a reply object (as described above) via `void **reply`. The returned status +can be either `REDIS_OK` or `REDIS_ERR`, where the latter means something went +wrong (either a protocol error, or an out of memory error). + +The parser limits the level of nesting for multi bulk payloads to 7. If the +multi bulk nesting level is higher than this, the parser returns an error. + +### Customizing replies + +The function `redisReaderGetReply` creates `redisReply` and makes the function +argument `reply` point to the created `redisReply` variable. For instance, if +the response of type `REDIS_REPLY_STATUS` then the `str` field of `redisReply` +will hold the status as a vanilla C string. However, the functions that are +responsible for creating instances of the `redisReply` can be customized by +setting the `fn` field on the `redisReader` struct. This should be done +immediately after creating the `redisReader`. + +For example, [hiredis-rb](https://github.com/pietern/hiredis-rb/blob/master/ext/hiredis_ext/reader.c) +uses customized reply object functions to create Ruby objects. + +### Reader max buffer + +Both when using the Reader API directly or when using it indirectly via a +normal Redis context, the redisReader structure uses a buffer in order to +accumulate data from the server. +Usually this buffer is destroyed when it is empty and is larger than 16 +KiB in order to avoid wasting memory in unused buffers + +However when working with very big payloads destroying the buffer may slow +down performances considerably, so it is possible to modify the max size of +an idle buffer changing the value of the `maxbuf` field of the reader structure +to the desired value. The special value of 0 means that there is no maximum +value for an idle buffer, so the buffer will never get freed. + +For instance if you have a normal Redis context you can set the maximum idle +buffer to zero (unlimited) just with: +```c +context->reader->maxbuf = 0; +``` +This should be done only in order to maximize performances when working with +large payloads. The context should be set back to `REDIS_READER_MAX_BUF` again +as soon as possible in order to prevent allocation of useless memory. + +### Reader max array elements + +By default the hiredis reply parser sets the maximum number of multi-bulk elements +to 2^32 - 1 or 4,294,967,295 entries. If you need to process multi-bulk replies +with more than this many elements you can set the value higher or to zero, meaning +unlimited with: +```c +context->reader->maxelements = 0; +``` + +## SSL/TLS Support + +### Building + +SSL/TLS support is not built by default and requires an explicit flag: + + make USE_SSL=1 + +This requires OpenSSL development package (e.g. including header files to be +available. + +When enabled, SSL/TLS support is built into extra `libhiredis_ssl.a` and +`libhiredis_ssl.so` static/dynamic libraries. This leaves the original libraries +unaffected so no additional dependencies are introduced. + +### Using it + +First, you'll need to make sure you include the SSL header file: + +```c +#include "hiredis.h" +#include "hiredis_ssl.h" +``` + +You will also need to link against `libhiredis_ssl`, **in addition** to +`libhiredis` and add `-lssl -lcrypto` to satisfy its dependencies. + +Hiredis implements SSL/TLS on top of its normal `redisContext` or +`redisAsyncContext`, so you will need to establish a connection first and then +initiate an SSL/TLS handshake. + +#### Hiredis OpenSSL Wrappers + +Before Hiredis can negotiate an SSL/TLS connection, it is necessary to +initialize OpenSSL and create a context. You can do that in two ways: + +1. Work directly with the OpenSSL API to initialize the library's global context + and create `SSL_CTX *` and `SSL *` contexts. With an `SSL *` object you can + call `redisInitiateSSL()`. +2. Work with a set of Hiredis-provided wrappers around OpenSSL, create a + `redisSSLContext` object to hold configuration and use + `redisInitiateSSLWithContext()` to initiate the SSL/TLS handshake. + +```c +/* An Hiredis SSL context. It holds SSL configuration and can be reused across + * many contexts. + */ +redisSSLContext *ssl; + +/* An error variable to indicate what went wrong, if the context fails to + * initialize. + */ +redisSSLContextError ssl_error; + +/* Initialize global OpenSSL state. + * + * You should call this only once when your app initializes, and only if + * you don't explicitly or implicitly initialize OpenSSL it elsewhere. + */ +redisInitOpenSSL(); + +/* Create SSL context */ +ssl = redisCreateSSLContext( + "cacertbundle.crt", /* File name of trusted CA/ca bundle file, optional */ + "/path/to/certs", /* Path of trusted certificates, optional */ + "client_cert.pem", /* File name of client certificate file, optional */ + "client_key.pem", /* File name of client private key, optional */ + "redis.mydomain.com", /* Server name to request (SNI), optional */ + &ssl_error + ) != REDIS_OK) { + printf("SSL error: %s\n", redisSSLContextGetError(ssl_error); + /* Abort... */ + } + +/* Create Redis context and establish connection */ +c = redisConnect("localhost", 6443); +if (c == NULL || c->err) { + /* Handle error and abort... */ +} + +/* Negotiate SSL/TLS */ +if (redisInitiateSSLWithContext(c, ssl) != REDIS_OK) { + /* Handle error, in c->err / c->errstr */ +} +``` + +## RESP3 PUSH replies +Redis 6.0 introduced PUSH replies with the reply-type `>`. These messages are generated spontaneously and can arrive at any time, so must be handled using callbacks. + +### Default behavior +Hiredis installs handlers on `redisContext` and `redisAsyncContext` by default, which will intercept and free any PUSH replies detected. This means existing code will work as-is after upgrading to Redis 6 and switching to `RESP3`. + +### Custom PUSH handler prototypes +The callback prototypes differ between `redisContext` and `redisAsyncContext`. + +#### redisContext +```c +void my_push_handler(void *privdata, void *reply) { + /* Handle the reply */ + + /* Note: We need to free the reply in our custom handler for + blocking contexts. This lets us keep the reply if + we want. */ + freeReplyObject(reply); +} +``` + +#### redisAsyncContext +```c +void my_async_push_handler(redisAsyncContext *ac, void *reply) { + /* Handle the reply */ + + /* Note: Because async hiredis always frees replies, you should + not call freeReplyObject in an async push callback. */ +} +``` + +### Installing a custom handler +There are two ways to set your own PUSH handlers. + +1. Set `push_cb` or `async_push_cb` in the `redisOptions` struct and connect with `redisConnectWithOptions` or `redisAsyncConnectWithOptions`. + ```c + redisOptions = {0}; + REDIS_OPTIONS_SET_TCP(&options, "127.0.0.1", 6379); + options->push_cb = my_push_handler; + redisContext *context = redisConnectWithOptions(&options); + ``` +2. Call `redisSetPushCallback` or `redisAsyncSetPushCallback` on a connected context. + ```c + redisContext *context = redisConnect("127.0.0.1", 6379); + redisSetPushCallback(context, my_push_handler); + ``` + + _Note `redisSetPushCallback` and `redisAsyncSetPushCallback` both return any currently configured handler, making it easy to override and then return to the old value._ + +### Specifying no handler +If you have a unique use-case where you don't want hiredis to automatically intercept and free PUSH replies, you will want to configure no handler at all. This can be done in two ways. +1. Set the `REDIS_OPT_NO_PUSH_AUTOFREE` flag in `redisOptions` and leave the callback function pointer `NULL`. + ```c + redisOptions = {0}; + REDIS_OPTIONS_SET_TCP(&options, "127.0.0.1", 6379); + options->options |= REDIS_OPT_NO_PUSH_AUTOFREE; + redisContext *context = redisConnectWithOptions(&options); + ``` +3. Call `redisSetPushCallback` with `NULL` once connected. + ```c + redisContext *context = redisConnect("127.0.0.1", 6379); + redisSetPushCallback(context, NULL); + ``` + + _Note: With no handler configured, calls to `redisCommand` may generate more than one reply, so this strategy is only applicable when there's some kind of blocking`redisGetReply()` loop (e.g. `MONITOR` or `SUBSCRIBE` workloads)._ + +## Allocator injection + +Hiredis uses a pass-thru structure of function pointers defined in [alloc.h](https://github.com/redis/hiredis/blob/f5d25850/alloc.h#L41) that contain the currently configured allocation and deallocation functions. By default they just point to libc (`malloc`, `calloc`, `realloc`, etc). + +### Overriding + +One can override the allocators like so: + +```c +hiredisAllocFuncs myfuncs = { + .mallocFn = my_malloc, + .callocFn = my_calloc, + .reallocFn = my_realloc, + .strdupFn = my_strdup, + .freeFn = my_free, +}; + +// Override allocators (function returns current allocators if needed) +hiredisAllocFuncs orig = hiredisSetAllocators(&myfuncs); +``` + +To reset the allocators to their default libc function simply call: + +```c +hiredisResetAllocators(); +``` + +## AUTHORS + +Salvatore Sanfilippo (antirez at gmail),\ +Pieter Noordhuis (pcnoordhuis at gmail)\ +Michael Grunder (michael dot grunder at gmail) + +_Hiredis is released under the BSD license._ diff --git a/ext/hiredis-1.0.2/adapters/ae.h b/ext/hiredis-1.0.2/adapters/ae.h new file mode 100644 index 000000000..660d82eb0 --- /dev/null +++ b/ext/hiredis-1.0.2/adapters/ae.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_AE_H__ +#define __HIREDIS_AE_H__ +#include +#include +#include "../hiredis.h" +#include "../async.h" + +typedef struct redisAeEvents { + redisAsyncContext *context; + aeEventLoop *loop; + int fd; + int reading, writing; +} redisAeEvents; + +static void redisAeReadEvent(aeEventLoop *el, int fd, void *privdata, int mask) { + ((void)el); ((void)fd); ((void)mask); + + redisAeEvents *e = (redisAeEvents*)privdata; + redisAsyncHandleRead(e->context); +} + +static void redisAeWriteEvent(aeEventLoop *el, int fd, void *privdata, int mask) { + ((void)el); ((void)fd); ((void)mask); + + redisAeEvents *e = (redisAeEvents*)privdata; + redisAsyncHandleWrite(e->context); +} + +static void redisAeAddRead(void *privdata) { + redisAeEvents *e = (redisAeEvents*)privdata; + aeEventLoop *loop = e->loop; + if (!e->reading) { + e->reading = 1; + aeCreateFileEvent(loop,e->fd,AE_READABLE,redisAeReadEvent,e); + } +} + +static void redisAeDelRead(void *privdata) { + redisAeEvents *e = (redisAeEvents*)privdata; + aeEventLoop *loop = e->loop; + if (e->reading) { + e->reading = 0; + aeDeleteFileEvent(loop,e->fd,AE_READABLE); + } +} + +static void redisAeAddWrite(void *privdata) { + redisAeEvents *e = (redisAeEvents*)privdata; + aeEventLoop *loop = e->loop; + if (!e->writing) { + e->writing = 1; + aeCreateFileEvent(loop,e->fd,AE_WRITABLE,redisAeWriteEvent,e); + } +} + +static void redisAeDelWrite(void *privdata) { + redisAeEvents *e = (redisAeEvents*)privdata; + aeEventLoop *loop = e->loop; + if (e->writing) { + e->writing = 0; + aeDeleteFileEvent(loop,e->fd,AE_WRITABLE); + } +} + +static void redisAeCleanup(void *privdata) { + redisAeEvents *e = (redisAeEvents*)privdata; + redisAeDelRead(privdata); + redisAeDelWrite(privdata); + hi_free(e); +} + +static int redisAeAttach(aeEventLoop *loop, redisAsyncContext *ac) { + redisContext *c = &(ac->c); + redisAeEvents *e; + + /* Nothing should be attached when something is already attached */ + if (ac->ev.data != NULL) + return REDIS_ERR; + + /* Create container for context and r/w events */ + e = (redisAeEvents*)hi_malloc(sizeof(*e)); + if (e == NULL) + return REDIS_ERR; + + e->context = ac; + e->loop = loop; + e->fd = c->fd; + e->reading = e->writing = 0; + + /* Register functions to start/stop listening for events */ + ac->ev.addRead = redisAeAddRead; + ac->ev.delRead = redisAeDelRead; + ac->ev.addWrite = redisAeAddWrite; + ac->ev.delWrite = redisAeDelWrite; + ac->ev.cleanup = redisAeCleanup; + ac->ev.data = e; + + return REDIS_OK; +} +#endif diff --git a/ext/hiredis-1.0.2/adapters/glib.h b/ext/hiredis-1.0.2/adapters/glib.h new file mode 100644 index 000000000..ad59dd142 --- /dev/null +++ b/ext/hiredis-1.0.2/adapters/glib.h @@ -0,0 +1,156 @@ +#ifndef __HIREDIS_GLIB_H__ +#define __HIREDIS_GLIB_H__ + +#include + +#include "../hiredis.h" +#include "../async.h" + +typedef struct +{ + GSource source; + redisAsyncContext *ac; + GPollFD poll_fd; +} RedisSource; + +static void +redis_source_add_read (gpointer data) +{ + RedisSource *source = (RedisSource *)data; + g_return_if_fail(source); + source->poll_fd.events |= G_IO_IN; + g_main_context_wakeup(g_source_get_context((GSource *)data)); +} + +static void +redis_source_del_read (gpointer data) +{ + RedisSource *source = (RedisSource *)data; + g_return_if_fail(source); + source->poll_fd.events &= ~G_IO_IN; + g_main_context_wakeup(g_source_get_context((GSource *)data)); +} + +static void +redis_source_add_write (gpointer data) +{ + RedisSource *source = (RedisSource *)data; + g_return_if_fail(source); + source->poll_fd.events |= G_IO_OUT; + g_main_context_wakeup(g_source_get_context((GSource *)data)); +} + +static void +redis_source_del_write (gpointer data) +{ + RedisSource *source = (RedisSource *)data; + g_return_if_fail(source); + source->poll_fd.events &= ~G_IO_OUT; + g_main_context_wakeup(g_source_get_context((GSource *)data)); +} + +static void +redis_source_cleanup (gpointer data) +{ + RedisSource *source = (RedisSource *)data; + + g_return_if_fail(source); + + redis_source_del_read(source); + redis_source_del_write(source); + /* + * It is not our responsibility to remove ourself from the + * current main loop. However, we will remove the GPollFD. + */ + if (source->poll_fd.fd >= 0) { + g_source_remove_poll((GSource *)data, &source->poll_fd); + source->poll_fd.fd = -1; + } +} + +static gboolean +redis_source_prepare (GSource *source, + gint *timeout_) +{ + RedisSource *redis = (RedisSource *)source; + *timeout_ = -1; + return !!(redis->poll_fd.events & redis->poll_fd.revents); +} + +static gboolean +redis_source_check (GSource *source) +{ + RedisSource *redis = (RedisSource *)source; + return !!(redis->poll_fd.events & redis->poll_fd.revents); +} + +static gboolean +redis_source_dispatch (GSource *source, + GSourceFunc callback, + gpointer user_data) +{ + RedisSource *redis = (RedisSource *)source; + + if ((redis->poll_fd.revents & G_IO_OUT)) { + redisAsyncHandleWrite(redis->ac); + redis->poll_fd.revents &= ~G_IO_OUT; + } + + if ((redis->poll_fd.revents & G_IO_IN)) { + redisAsyncHandleRead(redis->ac); + redis->poll_fd.revents &= ~G_IO_IN; + } + + if (callback) { + return callback(user_data); + } + + return TRUE; +} + +static void +redis_source_finalize (GSource *source) +{ + RedisSource *redis = (RedisSource *)source; + + if (redis->poll_fd.fd >= 0) { + g_source_remove_poll(source, &redis->poll_fd); + redis->poll_fd.fd = -1; + } +} + +static GSource * +redis_source_new (redisAsyncContext *ac) +{ + static GSourceFuncs source_funcs = { + .prepare = redis_source_prepare, + .check = redis_source_check, + .dispatch = redis_source_dispatch, + .finalize = redis_source_finalize, + }; + redisContext *c = &ac->c; + RedisSource *source; + + g_return_val_if_fail(ac != NULL, NULL); + + source = (RedisSource *)g_source_new(&source_funcs, sizeof *source); + if (source == NULL) + return NULL; + + source->ac = ac; + source->poll_fd.fd = c->fd; + source->poll_fd.events = 0; + source->poll_fd.revents = 0; + g_source_add_poll((GSource *)source, &source->poll_fd); + + ac->ev.addRead = redis_source_add_read; + ac->ev.delRead = redis_source_del_read; + ac->ev.addWrite = redis_source_add_write; + ac->ev.delWrite = redis_source_del_write; + ac->ev.cleanup = redis_source_cleanup; + ac->ev.data = source; + + return (GSource *)source; +} + +#endif /* __HIREDIS_GLIB_H__ */ diff --git a/ext/hiredis-1.0.2/adapters/ivykis.h b/ext/hiredis-1.0.2/adapters/ivykis.h new file mode 100644 index 000000000..179f6ab52 --- /dev/null +++ b/ext/hiredis-1.0.2/adapters/ivykis.h @@ -0,0 +1,84 @@ +#ifndef __HIREDIS_IVYKIS_H__ +#define __HIREDIS_IVYKIS_H__ +#include +#include "../hiredis.h" +#include "../async.h" + +typedef struct redisIvykisEvents { + redisAsyncContext *context; + struct iv_fd fd; +} redisIvykisEvents; + +static void redisIvykisReadEvent(void *arg) { + redisAsyncContext *context = (redisAsyncContext *)arg; + redisAsyncHandleRead(context); +} + +static void redisIvykisWriteEvent(void *arg) { + redisAsyncContext *context = (redisAsyncContext *)arg; + redisAsyncHandleWrite(context); +} + +static void redisIvykisAddRead(void *privdata) { + redisIvykisEvents *e = (redisIvykisEvents*)privdata; + iv_fd_set_handler_in(&e->fd, redisIvykisReadEvent); +} + +static void redisIvykisDelRead(void *privdata) { + redisIvykisEvents *e = (redisIvykisEvents*)privdata; + iv_fd_set_handler_in(&e->fd, NULL); +} + +static void redisIvykisAddWrite(void *privdata) { + redisIvykisEvents *e = (redisIvykisEvents*)privdata; + iv_fd_set_handler_out(&e->fd, redisIvykisWriteEvent); +} + +static void redisIvykisDelWrite(void *privdata) { + redisIvykisEvents *e = (redisIvykisEvents*)privdata; + iv_fd_set_handler_out(&e->fd, NULL); +} + +static void redisIvykisCleanup(void *privdata) { + redisIvykisEvents *e = (redisIvykisEvents*)privdata; + + iv_fd_unregister(&e->fd); + hi_free(e); +} + +static int redisIvykisAttach(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + redisIvykisEvents *e; + + /* Nothing should be attached when something is already attached */ + if (ac->ev.data != NULL) + return REDIS_ERR; + + /* Create container for context and r/w events */ + e = (redisIvykisEvents*)hi_malloc(sizeof(*e)); + if (e == NULL) + return REDIS_ERR; + + e->context = ac; + + /* Register functions to start/stop listening for events */ + ac->ev.addRead = redisIvykisAddRead; + ac->ev.delRead = redisIvykisDelRead; + ac->ev.addWrite = redisIvykisAddWrite; + ac->ev.delWrite = redisIvykisDelWrite; + ac->ev.cleanup = redisIvykisCleanup; + ac->ev.data = e; + + /* Initialize and install read/write events */ + IV_FD_INIT(&e->fd); + e->fd.fd = c->fd; + e->fd.handler_in = redisIvykisReadEvent; + e->fd.handler_out = redisIvykisWriteEvent; + e->fd.handler_err = NULL; + e->fd.cookie = e->context; + + iv_fd_register(&e->fd); + + return REDIS_OK; +} +#endif diff --git a/ext/hiredis-1.0.2/adapters/libev.h b/ext/hiredis-1.0.2/adapters/libev.h new file mode 100644 index 000000000..e1e7bbd99 --- /dev/null +++ b/ext/hiredis-1.0.2/adapters/libev.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_LIBEV_H__ +#define __HIREDIS_LIBEV_H__ +#include +#include +#include +#include "../hiredis.h" +#include "../async.h" + +typedef struct redisLibevEvents { + redisAsyncContext *context; + struct ev_loop *loop; + int reading, writing; + ev_io rev, wev; + ev_timer timer; +} redisLibevEvents; + +static void redisLibevReadEvent(EV_P_ ev_io *watcher, int revents) { +#if EV_MULTIPLICITY + ((void)loop); +#endif + ((void)revents); + + redisLibevEvents *e = (redisLibevEvents*)watcher->data; + redisAsyncHandleRead(e->context); +} + +static void redisLibevWriteEvent(EV_P_ ev_io *watcher, int revents) { +#if EV_MULTIPLICITY + ((void)loop); +#endif + ((void)revents); + + redisLibevEvents *e = (redisLibevEvents*)watcher->data; + redisAsyncHandleWrite(e->context); +} + +static void redisLibevAddRead(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; + struct ev_loop *loop = e->loop; + ((void)loop); + if (!e->reading) { + e->reading = 1; + ev_io_start(EV_A_ &e->rev); + } +} + +static void redisLibevDelRead(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; + struct ev_loop *loop = e->loop; + ((void)loop); + if (e->reading) { + e->reading = 0; + ev_io_stop(EV_A_ &e->rev); + } +} + +static void redisLibevAddWrite(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; + struct ev_loop *loop = e->loop; + ((void)loop); + if (!e->writing) { + e->writing = 1; + ev_io_start(EV_A_ &e->wev); + } +} + +static void redisLibevDelWrite(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; + struct ev_loop *loop = e->loop; + ((void)loop); + if (e->writing) { + e->writing = 0; + ev_io_stop(EV_A_ &e->wev); + } +} + +static void redisLibevStopTimer(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; + struct ev_loop *loop = e->loop; + ((void)loop); + ev_timer_stop(EV_A_ &e->timer); +} + +static void redisLibevCleanup(void *privdata) { + redisLibevEvents *e = (redisLibevEvents*)privdata; + redisLibevDelRead(privdata); + redisLibevDelWrite(privdata); + redisLibevStopTimer(privdata); + hi_free(e); +} + +static void redisLibevTimeout(EV_P_ ev_timer *timer, int revents) { + ((void)revents); + redisLibevEvents *e = (redisLibevEvents*)timer->data; + redisAsyncHandleTimeout(e->context); +} + +static void redisLibevSetTimeout(void *privdata, struct timeval tv) { + redisLibevEvents *e = (redisLibevEvents*)privdata; + struct ev_loop *loop = e->loop; + ((void)loop); + + if (!ev_is_active(&e->timer)) { + ev_init(&e->timer, redisLibevTimeout); + e->timer.data = e; + } + + e->timer.repeat = tv.tv_sec + tv.tv_usec / 1000000.00; + ev_timer_again(EV_A_ &e->timer); +} + +static int redisLibevAttach(EV_P_ redisAsyncContext *ac) { + redisContext *c = &(ac->c); + redisLibevEvents *e; + + /* Nothing should be attached when something is already attached */ + if (ac->ev.data != NULL) + return REDIS_ERR; + + /* Create container for context and r/w events */ + e = (redisLibevEvents*)hi_calloc(1, sizeof(*e)); + if (e == NULL) + return REDIS_ERR; + + e->context = ac; +#if EV_MULTIPLICITY + e->loop = loop; +#else + e->loop = NULL; +#endif + e->rev.data = e; + e->wev.data = e; + + /* Register functions to start/stop listening for events */ + ac->ev.addRead = redisLibevAddRead; + ac->ev.delRead = redisLibevDelRead; + ac->ev.addWrite = redisLibevAddWrite; + ac->ev.delWrite = redisLibevDelWrite; + ac->ev.cleanup = redisLibevCleanup; + ac->ev.scheduleTimer = redisLibevSetTimeout; + ac->ev.data = e; + + /* Initialize read/write events */ + ev_io_init(&e->rev,redisLibevReadEvent,c->fd,EV_READ); + ev_io_init(&e->wev,redisLibevWriteEvent,c->fd,EV_WRITE); + return REDIS_OK; +} + +#endif diff --git a/ext/hiredis-1.0.2/adapters/libevent.h b/ext/hiredis-1.0.2/adapters/libevent.h new file mode 100644 index 000000000..9150979bc --- /dev/null +++ b/ext/hiredis-1.0.2/adapters/libevent.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_LIBEVENT_H__ +#define __HIREDIS_LIBEVENT_H__ +#include +#include "../hiredis.h" +#include "../async.h" + +#define REDIS_LIBEVENT_DELETED 0x01 +#define REDIS_LIBEVENT_ENTERED 0x02 + +typedef struct redisLibeventEvents { + redisAsyncContext *context; + struct event *ev; + struct event_base *base; + struct timeval tv; + short flags; + short state; +} redisLibeventEvents; + +static void redisLibeventDestroy(redisLibeventEvents *e) { + hi_free(e); +} + +static void redisLibeventHandler(int fd, short event, void *arg) { + ((void)fd); + redisLibeventEvents *e = (redisLibeventEvents*)arg; + e->state |= REDIS_LIBEVENT_ENTERED; + + #define CHECK_DELETED() if (e->state & REDIS_LIBEVENT_DELETED) {\ + redisLibeventDestroy(e);\ + return; \ + } + + if ((event & EV_TIMEOUT) && (e->state & REDIS_LIBEVENT_DELETED) == 0) { + redisAsyncHandleTimeout(e->context); + CHECK_DELETED(); + } + + if ((event & EV_READ) && e->context && (e->state & REDIS_LIBEVENT_DELETED) == 0) { + redisAsyncHandleRead(e->context); + CHECK_DELETED(); + } + + if ((event & EV_WRITE) && e->context && (e->state & REDIS_LIBEVENT_DELETED) == 0) { + redisAsyncHandleWrite(e->context); + CHECK_DELETED(); + } + + e->state &= ~REDIS_LIBEVENT_ENTERED; + #undef CHECK_DELETED +} + +static void redisLibeventUpdate(void *privdata, short flag, int isRemove) { + redisLibeventEvents *e = (redisLibeventEvents *)privdata; + const struct timeval *tv = e->tv.tv_sec || e->tv.tv_usec ? &e->tv : NULL; + + if (isRemove) { + if ((e->flags & flag) == 0) { + return; + } else { + e->flags &= ~flag; + } + } else { + if (e->flags & flag) { + return; + } else { + e->flags |= flag; + } + } + + event_del(e->ev); + event_assign(e->ev, e->base, e->context->c.fd, e->flags | EV_PERSIST, + redisLibeventHandler, privdata); + event_add(e->ev, tv); +} + +static void redisLibeventAddRead(void *privdata) { + redisLibeventUpdate(privdata, EV_READ, 0); +} + +static void redisLibeventDelRead(void *privdata) { + redisLibeventUpdate(privdata, EV_READ, 1); +} + +static void redisLibeventAddWrite(void *privdata) { + redisLibeventUpdate(privdata, EV_WRITE, 0); +} + +static void redisLibeventDelWrite(void *privdata) { + redisLibeventUpdate(privdata, EV_WRITE, 1); +} + +static void redisLibeventCleanup(void *privdata) { + redisLibeventEvents *e = (redisLibeventEvents*)privdata; + if (!e) { + return; + } + event_del(e->ev); + event_free(e->ev); + e->ev = NULL; + + if (e->state & REDIS_LIBEVENT_ENTERED) { + e->state |= REDIS_LIBEVENT_DELETED; + } else { + redisLibeventDestroy(e); + } +} + +static void redisLibeventSetTimeout(void *privdata, struct timeval tv) { + redisLibeventEvents *e = (redisLibeventEvents *)privdata; + short flags = e->flags; + e->flags = 0; + e->tv = tv; + redisLibeventUpdate(e, flags, 0); +} + +static int redisLibeventAttach(redisAsyncContext *ac, struct event_base *base) { + redisContext *c = &(ac->c); + redisLibeventEvents *e; + + /* Nothing should be attached when something is already attached */ + if (ac->ev.data != NULL) + return REDIS_ERR; + + /* Create container for context and r/w events */ + e = (redisLibeventEvents*)hi_calloc(1, sizeof(*e)); + if (e == NULL) + return REDIS_ERR; + + e->context = ac; + + /* Register functions to start/stop listening for events */ + ac->ev.addRead = redisLibeventAddRead; + ac->ev.delRead = redisLibeventDelRead; + ac->ev.addWrite = redisLibeventAddWrite; + ac->ev.delWrite = redisLibeventDelWrite; + ac->ev.cleanup = redisLibeventCleanup; + ac->ev.scheduleTimer = redisLibeventSetTimeout; + ac->ev.data = e; + + /* Initialize and install read/write events */ + e->ev = event_new(base, c->fd, EV_READ | EV_WRITE, redisLibeventHandler, e); + e->base = base; + return REDIS_OK; +} +#endif diff --git a/ext/hiredis-1.0.2/adapters/libuv.h b/ext/hiredis-1.0.2/adapters/libuv.h new file mode 100644 index 000000000..c120b1b39 --- /dev/null +++ b/ext/hiredis-1.0.2/adapters/libuv.h @@ -0,0 +1,117 @@ +#ifndef __HIREDIS_LIBUV_H__ +#define __HIREDIS_LIBUV_H__ +#include +#include +#include "../hiredis.h" +#include "../async.h" +#include + +typedef struct redisLibuvEvents { + redisAsyncContext* context; + uv_poll_t handle; + int events; +} redisLibuvEvents; + + +static void redisLibuvPoll(uv_poll_t* handle, int status, int events) { + redisLibuvEvents* p = (redisLibuvEvents*)handle->data; + int ev = (status ? p->events : events); + + if (p->context != NULL && (ev & UV_READABLE)) { + redisAsyncHandleRead(p->context); + } + if (p->context != NULL && (ev & UV_WRITABLE)) { + redisAsyncHandleWrite(p->context); + } +} + + +static void redisLibuvAddRead(void *privdata) { + redisLibuvEvents* p = (redisLibuvEvents*)privdata; + + p->events |= UV_READABLE; + + uv_poll_start(&p->handle, p->events, redisLibuvPoll); +} + + +static void redisLibuvDelRead(void *privdata) { + redisLibuvEvents* p = (redisLibuvEvents*)privdata; + + p->events &= ~UV_READABLE; + + if (p->events) { + uv_poll_start(&p->handle, p->events, redisLibuvPoll); + } else { + uv_poll_stop(&p->handle); + } +} + + +static void redisLibuvAddWrite(void *privdata) { + redisLibuvEvents* p = (redisLibuvEvents*)privdata; + + p->events |= UV_WRITABLE; + + uv_poll_start(&p->handle, p->events, redisLibuvPoll); +} + + +static void redisLibuvDelWrite(void *privdata) { + redisLibuvEvents* p = (redisLibuvEvents*)privdata; + + p->events &= ~UV_WRITABLE; + + if (p->events) { + uv_poll_start(&p->handle, p->events, redisLibuvPoll); + } else { + uv_poll_stop(&p->handle); + } +} + + +static void on_close(uv_handle_t* handle) { + redisLibuvEvents* p = (redisLibuvEvents*)handle->data; + + hi_free(p); +} + + +static void redisLibuvCleanup(void *privdata) { + redisLibuvEvents* p = (redisLibuvEvents*)privdata; + + p->context = NULL; // indicate that context might no longer exist + uv_close((uv_handle_t*)&p->handle, on_close); +} + + +static int redisLibuvAttach(redisAsyncContext* ac, uv_loop_t* loop) { + redisContext *c = &(ac->c); + + if (ac->ev.data != NULL) { + return REDIS_ERR; + } + + ac->ev.addRead = redisLibuvAddRead; + ac->ev.delRead = redisLibuvDelRead; + ac->ev.addWrite = redisLibuvAddWrite; + ac->ev.delWrite = redisLibuvDelWrite; + ac->ev.cleanup = redisLibuvCleanup; + + redisLibuvEvents* p = (redisLibuvEvents*)hi_malloc(sizeof(*p)); + if (p == NULL) + return REDIS_ERR; + + memset(p, 0, sizeof(*p)); + + if (uv_poll_init_socket(loop, &p->handle, c->fd) != 0) { + return REDIS_ERR; + } + + ac->ev.data = p; + p->handle.data = p; + p->context = ac; + + return REDIS_OK; +} +#endif diff --git a/ext/hiredis-1.0.2/adapters/macosx.h b/ext/hiredis-1.0.2/adapters/macosx.h new file mode 100644 index 000000000..3c87f1b2f --- /dev/null +++ b/ext/hiredis-1.0.2/adapters/macosx.h @@ -0,0 +1,115 @@ +// +// Created by Дмитрий Бахвалов on 13.07.15. +// Copyright (c) 2015 Dmitry Bakhvalov. All rights reserved. +// + +#ifndef __HIREDIS_MACOSX_H__ +#define __HIREDIS_MACOSX_H__ + +#include + +#include "../hiredis.h" +#include "../async.h" + +typedef struct { + redisAsyncContext *context; + CFSocketRef socketRef; + CFRunLoopSourceRef sourceRef; +} RedisRunLoop; + +static int freeRedisRunLoop(RedisRunLoop* redisRunLoop) { + if( redisRunLoop != NULL ) { + if( redisRunLoop->sourceRef != NULL ) { + CFRunLoopSourceInvalidate(redisRunLoop->sourceRef); + CFRelease(redisRunLoop->sourceRef); + } + if( redisRunLoop->socketRef != NULL ) { + CFSocketInvalidate(redisRunLoop->socketRef); + CFRelease(redisRunLoop->socketRef); + } + hi_free(redisRunLoop); + } + return REDIS_ERR; +} + +static void redisMacOSAddRead(void *privdata) { + RedisRunLoop *redisRunLoop = (RedisRunLoop*)privdata; + CFSocketEnableCallBacks(redisRunLoop->socketRef, kCFSocketReadCallBack); +} + +static void redisMacOSDelRead(void *privdata) { + RedisRunLoop *redisRunLoop = (RedisRunLoop*)privdata; + CFSocketDisableCallBacks(redisRunLoop->socketRef, kCFSocketReadCallBack); +} + +static void redisMacOSAddWrite(void *privdata) { + RedisRunLoop *redisRunLoop = (RedisRunLoop*)privdata; + CFSocketEnableCallBacks(redisRunLoop->socketRef, kCFSocketWriteCallBack); +} + +static void redisMacOSDelWrite(void *privdata) { + RedisRunLoop *redisRunLoop = (RedisRunLoop*)privdata; + CFSocketDisableCallBacks(redisRunLoop->socketRef, kCFSocketWriteCallBack); +} + +static void redisMacOSCleanup(void *privdata) { + RedisRunLoop *redisRunLoop = (RedisRunLoop*)privdata; + freeRedisRunLoop(redisRunLoop); +} + +static void redisMacOSAsyncCallback(CFSocketRef __unused s, CFSocketCallBackType callbackType, CFDataRef __unused address, const void __unused *data, void *info) { + redisAsyncContext* context = (redisAsyncContext*) info; + + switch (callbackType) { + case kCFSocketReadCallBack: + redisAsyncHandleRead(context); + break; + + case kCFSocketWriteCallBack: + redisAsyncHandleWrite(context); + break; + + default: + break; + } +} + +static int redisMacOSAttach(redisAsyncContext *redisAsyncCtx, CFRunLoopRef runLoop) { + redisContext *redisCtx = &(redisAsyncCtx->c); + + /* Nothing should be attached when something is already attached */ + if( redisAsyncCtx->ev.data != NULL ) return REDIS_ERR; + + RedisRunLoop* redisRunLoop = (RedisRunLoop*) hi_calloc(1, sizeof(RedisRunLoop)); + if (redisRunLoop == NULL) + return REDIS_ERR; + + /* Setup redis stuff */ + redisRunLoop->context = redisAsyncCtx; + + redisAsyncCtx->ev.addRead = redisMacOSAddRead; + redisAsyncCtx->ev.delRead = redisMacOSDelRead; + redisAsyncCtx->ev.addWrite = redisMacOSAddWrite; + redisAsyncCtx->ev.delWrite = redisMacOSDelWrite; + redisAsyncCtx->ev.cleanup = redisMacOSCleanup; + redisAsyncCtx->ev.data = redisRunLoop; + + /* Initialize and install read/write events */ + CFSocketContext socketCtx = { 0, redisAsyncCtx, NULL, NULL, NULL }; + + redisRunLoop->socketRef = CFSocketCreateWithNative(NULL, redisCtx->fd, + kCFSocketReadCallBack | kCFSocketWriteCallBack, + redisMacOSAsyncCallback, + &socketCtx); + if( !redisRunLoop->socketRef ) return freeRedisRunLoop(redisRunLoop); + + redisRunLoop->sourceRef = CFSocketCreateRunLoopSource(NULL, redisRunLoop->socketRef, 0); + if( !redisRunLoop->sourceRef ) return freeRedisRunLoop(redisRunLoop); + + CFRunLoopAddSource(runLoop, redisRunLoop->sourceRef, kCFRunLoopDefaultMode); + + return REDIS_OK; +} + +#endif + diff --git a/ext/hiredis-1.0.2/adapters/qt.h b/ext/hiredis-1.0.2/adapters/qt.h new file mode 100644 index 000000000..5cc02e6ce --- /dev/null +++ b/ext/hiredis-1.0.2/adapters/qt.h @@ -0,0 +1,135 @@ +/*- + * Copyright (C) 2014 Pietro Cerutti + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef __HIREDIS_QT_H__ +#define __HIREDIS_QT_H__ +#include +#include "../async.h" + +static void RedisQtAddRead(void *); +static void RedisQtDelRead(void *); +static void RedisQtAddWrite(void *); +static void RedisQtDelWrite(void *); +static void RedisQtCleanup(void *); + +class RedisQtAdapter : public QObject { + + Q_OBJECT + + friend + void RedisQtAddRead(void * adapter) { + RedisQtAdapter * a = static_cast(adapter); + a->addRead(); + } + + friend + void RedisQtDelRead(void * adapter) { + RedisQtAdapter * a = static_cast(adapter); + a->delRead(); + } + + friend + void RedisQtAddWrite(void * adapter) { + RedisQtAdapter * a = static_cast(adapter); + a->addWrite(); + } + + friend + void RedisQtDelWrite(void * adapter) { + RedisQtAdapter * a = static_cast(adapter); + a->delWrite(); + } + + friend + void RedisQtCleanup(void * adapter) { + RedisQtAdapter * a = static_cast(adapter); + a->cleanup(); + } + + public: + RedisQtAdapter(QObject * parent = 0) + : QObject(parent), m_ctx(0), m_read(0), m_write(0) { } + + ~RedisQtAdapter() { + if (m_ctx != 0) { + m_ctx->ev.data = NULL; + } + } + + int setContext(redisAsyncContext * ac) { + if (ac->ev.data != NULL) { + return REDIS_ERR; + } + m_ctx = ac; + m_ctx->ev.data = this; + m_ctx->ev.addRead = RedisQtAddRead; + m_ctx->ev.delRead = RedisQtDelRead; + m_ctx->ev.addWrite = RedisQtAddWrite; + m_ctx->ev.delWrite = RedisQtDelWrite; + m_ctx->ev.cleanup = RedisQtCleanup; + return REDIS_OK; + } + + private: + void addRead() { + if (m_read) return; + m_read = new QSocketNotifier(m_ctx->c.fd, QSocketNotifier::Read, 0); + connect(m_read, SIGNAL(activated(int)), this, SLOT(read())); + } + + void delRead() { + if (!m_read) return; + delete m_read; + m_read = 0; + } + + void addWrite() { + if (m_write) return; + m_write = new QSocketNotifier(m_ctx->c.fd, QSocketNotifier::Write, 0); + connect(m_write, SIGNAL(activated(int)), this, SLOT(write())); + } + + void delWrite() { + if (!m_write) return; + delete m_write; + m_write = 0; + } + + void cleanup() { + delRead(); + delWrite(); + } + + private slots: + void read() { redisAsyncHandleRead(m_ctx); } + void write() { redisAsyncHandleWrite(m_ctx); } + + private: + redisAsyncContext * m_ctx; + QSocketNotifier * m_read; + QSocketNotifier * m_write; +}; + +#endif /* !__HIREDIS_QT_H__ */ diff --git a/ext/hiredis-1.0.2/alloc.c b/ext/hiredis-1.0.2/alloc.c new file mode 100644 index 000000000..7fb6b35e7 --- /dev/null +++ b/ext/hiredis-1.0.2/alloc.c @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2020, Michael Grunder + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include "alloc.h" +#include +#include + +hiredisAllocFuncs hiredisAllocFns = { + .mallocFn = malloc, + .callocFn = calloc, + .reallocFn = realloc, + .strdupFn = strdup, + .freeFn = free, +}; + +/* Override hiredis' allocators with ones supplied by the user */ +hiredisAllocFuncs hiredisSetAllocators(hiredisAllocFuncs *override) { + hiredisAllocFuncs orig = hiredisAllocFns; + + hiredisAllocFns = *override; + + return orig; +} + +/* Reset allocators to use libc defaults */ +void hiredisResetAllocators(void) { + hiredisAllocFns = (hiredisAllocFuncs) { + .mallocFn = malloc, + .callocFn = calloc, + .reallocFn = realloc, + .strdupFn = strdup, + .freeFn = free, + }; +} + +#ifdef _WIN32 + +void *hi_malloc(size_t size) { + return hiredisAllocFns.mallocFn(size); +} + +void *hi_calloc(size_t nmemb, size_t size) { + return hiredisAllocFns.callocFn(nmemb, size); +} + +void *hi_realloc(void *ptr, size_t size) { + return hiredisAllocFns.reallocFn(ptr, size); +} + +char *hi_strdup(const char *str) { + return hiredisAllocFns.strdupFn(str); +} + +void hi_free(void *ptr) { + hiredisAllocFns.freeFn(ptr); +} + +#endif diff --git a/ext/hiredis-1.0.2/alloc.h b/ext/hiredis-1.0.2/alloc.h new file mode 100644 index 000000000..34a05f49f --- /dev/null +++ b/ext/hiredis-1.0.2/alloc.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2020, Michael Grunder + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HIREDIS_ALLOC_H +#define HIREDIS_ALLOC_H + +#include /* for size_t */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Structure pointing to our actually configured allocators */ +typedef struct hiredisAllocFuncs { + void *(*mallocFn)(size_t); + void *(*callocFn)(size_t,size_t); + void *(*reallocFn)(void*,size_t); + char *(*strdupFn)(const char*); + void (*freeFn)(void*); +} hiredisAllocFuncs; + +hiredisAllocFuncs hiredisSetAllocators(hiredisAllocFuncs *ha); +void hiredisResetAllocators(void); + +#ifndef _WIN32 + +/* Hiredis' configured allocator function pointer struct */ +extern hiredisAllocFuncs hiredisAllocFns; + +static inline void *hi_malloc(size_t size) { + return hiredisAllocFns.mallocFn(size); +} + +static inline void *hi_calloc(size_t nmemb, size_t size) { + return hiredisAllocFns.callocFn(nmemb, size); +} + +static inline void *hi_realloc(void *ptr, size_t size) { + return hiredisAllocFns.reallocFn(ptr, size); +} + +static inline char *hi_strdup(const char *str) { + return hiredisAllocFns.strdupFn(str); +} + +static inline void hi_free(void *ptr) { + hiredisAllocFns.freeFn(ptr); +} + +#else + +void *hi_malloc(size_t size); +void *hi_calloc(size_t nmemb, size_t size); +void *hi_realloc(void *ptr, size_t size); +char *hi_strdup(const char *str); +void hi_free(void *ptr); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* HIREDIS_ALLOC_H */ diff --git a/ext/hiredis-1.0.2/appveyor.yml b/ext/hiredis-1.0.2/appveyor.yml new file mode 100644 index 000000000..5b43fdbeb --- /dev/null +++ b/ext/hiredis-1.0.2/appveyor.yml @@ -0,0 +1,24 @@ +# Appveyor configuration file for CI build of hiredis on Windows (under Cygwin) +environment: + matrix: + - CYG_BASH: C:\cygwin64\bin\bash + CC: gcc + - CYG_BASH: C:\cygwin\bin\bash + CC: gcc + CFLAGS: -m32 + CXXFLAGS: -m32 + LDFLAGS: -m32 + +clone_depth: 1 + +# Attempt to ensure we don't try to convert line endings to Win32 CRLF as this will cause build to fail +init: + - git config --global core.autocrlf input + +# Install needed build dependencies +install: + - '%CYG_BASH% -lc "cygcheck -dc cygwin"' + +build_script: + - 'echo building...' + - '%CYG_BASH% -lc "cd $APPVEYOR_BUILD_FOLDER; exec 0 + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include "alloc.h" +#include +#include +#ifndef _MSC_VER +#include +#endif +#include +#include +#include +#include "async.h" +#include "net.h" +#include "dict.c" +#include "sds.h" +#include "win32.h" + +#include "async_private.h" + +/* Forward declarations of hiredis.c functions */ +int __redisAppendCommand(redisContext *c, const char *cmd, size_t len); +void __redisSetError(redisContext *c, int type, const char *str); + +/* Functions managing dictionary of callbacks for pub/sub. */ +static unsigned int callbackHash(const void *key) { + return dictGenHashFunction((const unsigned char *)key, + sdslen((const sds)key)); +} + +static void *callbackValDup(void *privdata, const void *src) { + ((void) privdata); + redisCallback *dup; + + dup = hi_malloc(sizeof(*dup)); + if (dup == NULL) + return NULL; + + memcpy(dup,src,sizeof(*dup)); + return dup; +} + +static int callbackKeyCompare(void *privdata, const void *key1, const void *key2) { + int l1, l2; + ((void) privdata); + + l1 = sdslen((const sds)key1); + l2 = sdslen((const sds)key2); + if (l1 != l2) return 0; + return memcmp(key1,key2,l1) == 0; +} + +static void callbackKeyDestructor(void *privdata, void *key) { + ((void) privdata); + sdsfree((sds)key); +} + +static void callbackValDestructor(void *privdata, void *val) { + ((void) privdata); + hi_free(val); +} + +static dictType callbackDict = { + callbackHash, + NULL, + callbackValDup, + callbackKeyCompare, + callbackKeyDestructor, + callbackValDestructor +}; + +static redisAsyncContext *redisAsyncInitialize(redisContext *c) { + redisAsyncContext *ac; + dict *channels = NULL, *patterns = NULL; + + channels = dictCreate(&callbackDict,NULL); + if (channels == NULL) + goto oom; + + patterns = dictCreate(&callbackDict,NULL); + if (patterns == NULL) + goto oom; + + ac = hi_realloc(c,sizeof(redisAsyncContext)); + if (ac == NULL) + goto oom; + + c = &(ac->c); + + /* The regular connect functions will always set the flag REDIS_CONNECTED. + * For the async API, we want to wait until the first write event is + * received up before setting this flag, so reset it here. */ + c->flags &= ~REDIS_CONNECTED; + + ac->err = 0; + ac->errstr = NULL; + ac->data = NULL; + ac->dataCleanup = NULL; + + ac->ev.data = NULL; + ac->ev.addRead = NULL; + ac->ev.delRead = NULL; + ac->ev.addWrite = NULL; + ac->ev.delWrite = NULL; + ac->ev.cleanup = NULL; + ac->ev.scheduleTimer = NULL; + + ac->onConnect = NULL; + ac->onDisconnect = NULL; + + ac->replies.head = NULL; + ac->replies.tail = NULL; + ac->sub.invalid.head = NULL; + ac->sub.invalid.tail = NULL; + ac->sub.channels = channels; + ac->sub.patterns = patterns; + + return ac; +oom: + if (channels) dictRelease(channels); + if (patterns) dictRelease(patterns); + return NULL; +} + +/* We want the error field to be accessible directly instead of requiring + * an indirection to the redisContext struct. */ +static void __redisAsyncCopyError(redisAsyncContext *ac) { + if (!ac) + return; + + redisContext *c = &(ac->c); + ac->err = c->err; + ac->errstr = c->errstr; +} + +redisAsyncContext *redisAsyncConnectWithOptions(const redisOptions *options) { + redisOptions myOptions = *options; + redisContext *c; + redisAsyncContext *ac; + + /* Clear any erroneously set sync callback and flag that we don't want to + * use freeReplyObject by default. */ + myOptions.push_cb = NULL; + myOptions.options |= REDIS_OPT_NO_PUSH_AUTOFREE; + + myOptions.options |= REDIS_OPT_NONBLOCK; + c = redisConnectWithOptions(&myOptions); + if (c == NULL) { + return NULL; + } + + ac = redisAsyncInitialize(c); + if (ac == NULL) { + redisFree(c); + return NULL; + } + + /* Set any configured async push handler */ + redisAsyncSetPushCallback(ac, myOptions.async_push_cb); + + __redisAsyncCopyError(ac); + return ac; +} + +redisAsyncContext *redisAsyncConnect(const char *ip, int port) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + return redisAsyncConnectWithOptions(&options); +} + +redisAsyncContext *redisAsyncConnectBind(const char *ip, int port, + const char *source_addr) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.endpoint.tcp.source_addr = source_addr; + return redisAsyncConnectWithOptions(&options); +} + +redisAsyncContext *redisAsyncConnectBindWithReuse(const char *ip, int port, + const char *source_addr) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.options |= REDIS_OPT_REUSEADDR; + options.endpoint.tcp.source_addr = source_addr; + return redisAsyncConnectWithOptions(&options); +} + +redisAsyncContext *redisAsyncConnectUnix(const char *path) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_UNIX(&options, path); + return redisAsyncConnectWithOptions(&options); +} + +int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn) { + if (ac->onConnect == NULL) { + ac->onConnect = fn; + + /* The common way to detect an established connection is to wait for + * the first write event to be fired. This assumes the related event + * library functions are already set. */ + _EL_ADD_WRITE(ac); + return REDIS_OK; + } + return REDIS_ERR; +} + +int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn) { + if (ac->onDisconnect == NULL) { + ac->onDisconnect = fn; + return REDIS_OK; + } + return REDIS_ERR; +} + +/* Helper functions to push/shift callbacks */ +static int __redisPushCallback(redisCallbackList *list, redisCallback *source) { + redisCallback *cb; + + /* Copy callback from stack to heap */ + cb = hi_malloc(sizeof(*cb)); + if (cb == NULL) + return REDIS_ERR_OOM; + + if (source != NULL) { + memcpy(cb,source,sizeof(*cb)); + cb->next = NULL; + } + + /* Store callback in list */ + if (list->head == NULL) + list->head = cb; + if (list->tail != NULL) + list->tail->next = cb; + list->tail = cb; + return REDIS_OK; +} + +static int __redisShiftCallback(redisCallbackList *list, redisCallback *target) { + redisCallback *cb = list->head; + if (cb != NULL) { + list->head = cb->next; + if (cb == list->tail) + list->tail = NULL; + + /* Copy callback from heap to stack */ + if (target != NULL) + memcpy(target,cb,sizeof(*cb)); + hi_free(cb); + return REDIS_OK; + } + return REDIS_ERR; +} + +static void __redisRunCallback(redisAsyncContext *ac, redisCallback *cb, redisReply *reply) { + redisContext *c = &(ac->c); + if (cb->fn != NULL) { + c->flags |= REDIS_IN_CALLBACK; + cb->fn(ac,reply,cb->privdata); + c->flags &= ~REDIS_IN_CALLBACK; + } +} + +static void __redisRunPushCallback(redisAsyncContext *ac, redisReply *reply) { + if (ac->push_cb != NULL) { + ac->c.flags |= REDIS_IN_CALLBACK; + ac->push_cb(ac, reply); + ac->c.flags &= ~REDIS_IN_CALLBACK; + } +} + +/* Helper function to free the context. */ +static void __redisAsyncFree(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + redisCallback cb; + dictIterator *it; + dictEntry *de; + + /* Execute pending callbacks with NULL reply. */ + while (__redisShiftCallback(&ac->replies,&cb) == REDIS_OK) + __redisRunCallback(ac,&cb,NULL); + + /* Execute callbacks for invalid commands */ + while (__redisShiftCallback(&ac->sub.invalid,&cb) == REDIS_OK) + __redisRunCallback(ac,&cb,NULL); + + /* Run subscription callbacks with NULL reply */ + if (ac->sub.channels) { + it = dictGetIterator(ac->sub.channels); + if (it != NULL) { + while ((de = dictNext(it)) != NULL) + __redisRunCallback(ac,dictGetEntryVal(de),NULL); + dictReleaseIterator(it); + } + + dictRelease(ac->sub.channels); + } + + if (ac->sub.patterns) { + it = dictGetIterator(ac->sub.patterns); + if (it != NULL) { + while ((de = dictNext(it)) != NULL) + __redisRunCallback(ac,dictGetEntryVal(de),NULL); + dictReleaseIterator(it); + } + + dictRelease(ac->sub.patterns); + } + + /* Signal event lib to clean up */ + _EL_CLEANUP(ac); + + /* Execute disconnect callback. When redisAsyncFree() initiated destroying + * this context, the status will always be REDIS_OK. */ + if (ac->onDisconnect && (c->flags & REDIS_CONNECTED)) { + if (c->flags & REDIS_FREEING) { + ac->onDisconnect(ac,REDIS_OK); + } else { + ac->onDisconnect(ac,(ac->err == 0) ? REDIS_OK : REDIS_ERR); + } + } + + if (ac->dataCleanup) { + ac->dataCleanup(ac->data); + } + + /* Cleanup self */ + redisFree(c); +} + +/* Free the async context. When this function is called from a callback, + * control needs to be returned to redisProcessCallbacks() before actual + * free'ing. To do so, a flag is set on the context which is picked up by + * redisProcessCallbacks(). Otherwise, the context is immediately free'd. */ +void redisAsyncFree(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + c->flags |= REDIS_FREEING; + if (!(c->flags & REDIS_IN_CALLBACK)) + __redisAsyncFree(ac); +} + +/* Helper function to make the disconnect happen and clean up. */ +void __redisAsyncDisconnect(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + + /* Make sure error is accessible if there is any */ + __redisAsyncCopyError(ac); + + if (ac->err == 0) { + /* For clean disconnects, there should be no pending callbacks. */ + int ret = __redisShiftCallback(&ac->replies,NULL); + assert(ret == REDIS_ERR); + } else { + /* Disconnection is caused by an error, make sure that pending + * callbacks cannot call new commands. */ + c->flags |= REDIS_DISCONNECTING; + } + + /* cleanup event library on disconnect. + * this is safe to call multiple times */ + _EL_CLEANUP(ac); + + /* For non-clean disconnects, __redisAsyncFree() will execute pending + * callbacks with a NULL-reply. */ + if (!(c->flags & REDIS_NO_AUTO_FREE)) { + __redisAsyncFree(ac); + } +} + +/* Tries to do a clean disconnect from Redis, meaning it stops new commands + * from being issued, but tries to flush the output buffer and execute + * callbacks for all remaining replies. When this function is called from a + * callback, there might be more replies and we can safely defer disconnecting + * to redisProcessCallbacks(). Otherwise, we can only disconnect immediately + * when there are no pending callbacks. */ +void redisAsyncDisconnect(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + c->flags |= REDIS_DISCONNECTING; + + /** unset the auto-free flag here, because disconnect undoes this */ + c->flags &= ~REDIS_NO_AUTO_FREE; + if (!(c->flags & REDIS_IN_CALLBACK) && ac->replies.head == NULL) + __redisAsyncDisconnect(ac); +} + +static int __redisGetSubscribeCallback(redisAsyncContext *ac, redisReply *reply, redisCallback *dstcb) { + redisContext *c = &(ac->c); + dict *callbacks; + redisCallback *cb; + dictEntry *de; + int pvariant; + char *stype; + sds sname; + + /* Custom reply functions are not supported for pub/sub. This will fail + * very hard when they are used... */ + if (reply->type == REDIS_REPLY_ARRAY || reply->type == REDIS_REPLY_PUSH) { + assert(reply->elements >= 2); + assert(reply->element[0]->type == REDIS_REPLY_STRING); + stype = reply->element[0]->str; + pvariant = (tolower(stype[0]) == 'p') ? 1 : 0; + + if (pvariant) + callbacks = ac->sub.patterns; + else + callbacks = ac->sub.channels; + + /* Locate the right callback */ + assert(reply->element[1]->type == REDIS_REPLY_STRING); + sname = sdsnewlen(reply->element[1]->str,reply->element[1]->len); + if (sname == NULL) + goto oom; + + de = dictFind(callbacks,sname); + if (de != NULL) { + cb = dictGetEntryVal(de); + + /* If this is an subscribe reply decrease pending counter. */ + if (strcasecmp(stype+pvariant,"subscribe") == 0) { + cb->pending_subs -= 1; + } + + memcpy(dstcb,cb,sizeof(*dstcb)); + + /* If this is an unsubscribe message, remove it. */ + if (strcasecmp(stype+pvariant,"unsubscribe") == 0) { + if (cb->pending_subs == 0) + dictDelete(callbacks,sname); + + /* If this was the last unsubscribe message, revert to + * non-subscribe mode. */ + assert(reply->element[2]->type == REDIS_REPLY_INTEGER); + + /* Unset subscribed flag only when no pipelined pending subscribe. */ + if (reply->element[2]->integer == 0 + && dictSize(ac->sub.channels) == 0 + && dictSize(ac->sub.patterns) == 0) + c->flags &= ~REDIS_SUBSCRIBED; + } + } + sdsfree(sname); + } else { + /* Shift callback for invalid commands. */ + __redisShiftCallback(&ac->sub.invalid,dstcb); + } + return REDIS_OK; +oom: + __redisSetError(&(ac->c), REDIS_ERR_OOM, "Out of memory"); + return REDIS_ERR; +} + +#define redisIsSpontaneousPushReply(r) \ + (redisIsPushReply(r) && !redisIsSubscribeReply(r)) + +static int redisIsSubscribeReply(redisReply *reply) { + char *str; + size_t len, off; + + /* We will always have at least one string with the subscribe/message type */ + if (reply->elements < 1 || reply->element[0]->type != REDIS_REPLY_STRING || + reply->element[0]->len < sizeof("message") - 1) + { + return 0; + } + + /* Get the string/len moving past 'p' if needed */ + off = tolower(reply->element[0]->str[0]) == 'p'; + str = reply->element[0]->str + off; + len = reply->element[0]->len - off; + + return !strncasecmp(str, "subscribe", len) || + !strncasecmp(str, "message", len); + +} + +void redisProcessCallbacks(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + redisCallback cb = {NULL, NULL, 0, NULL}; + void *reply = NULL; + int status; + + while((status = redisGetReply(c,&reply)) == REDIS_OK) { + if (reply == NULL) { + /* When the connection is being disconnected and there are + * no more replies, this is the cue to really disconnect. */ + if (c->flags & REDIS_DISCONNECTING && sdslen(c->obuf) == 0 + && ac->replies.head == NULL) { + __redisAsyncDisconnect(ac); + return; + } + + /* If monitor mode, repush callback */ + if(c->flags & REDIS_MONITORING) { + __redisPushCallback(&ac->replies,&cb); + } + + /* When the connection is not being disconnected, simply stop + * trying to get replies and wait for the next loop tick. */ + break; + } + + /* Send any non-subscribe related PUSH messages to our PUSH handler + * while allowing subscribe related PUSH messages to pass through. + * This allows existing code to be backward compatible and work in + * either RESP2 or RESP3 mode. */ + if (redisIsSpontaneousPushReply(reply)) { + __redisRunPushCallback(ac, reply); + c->reader->fn->freeObject(reply); + continue; + } + + /* Even if the context is subscribed, pending regular + * callbacks will get a reply before pub/sub messages arrive. */ + if (__redisShiftCallback(&ac->replies,&cb) != REDIS_OK) { + /* + * A spontaneous reply in a not-subscribed context can be the error + * reply that is sent when a new connection exceeds the maximum + * number of allowed connections on the server side. + * + * This is seen as an error instead of a regular reply because the + * server closes the connection after sending it. + * + * To prevent the error from being overwritten by an EOF error the + * connection is closed here. See issue #43. + * + * Another possibility is that the server is loading its dataset. + * In this case we also want to close the connection, and have the + * user wait until the server is ready to take our request. + */ + if (((redisReply*)reply)->type == REDIS_REPLY_ERROR) { + c->err = REDIS_ERR_OTHER; + snprintf(c->errstr,sizeof(c->errstr),"%s",((redisReply*)reply)->str); + c->reader->fn->freeObject(reply); + __redisAsyncDisconnect(ac); + return; + } + /* No more regular callbacks and no errors, the context *must* be subscribed or monitoring. */ + assert((c->flags & REDIS_SUBSCRIBED || c->flags & REDIS_MONITORING)); + if(c->flags & REDIS_SUBSCRIBED) + __redisGetSubscribeCallback(ac,reply,&cb); + } + + if (cb.fn != NULL) { + __redisRunCallback(ac,&cb,reply); + c->reader->fn->freeObject(reply); + + /* Proceed with free'ing when redisAsyncFree() was called. */ + if (c->flags & REDIS_FREEING) { + __redisAsyncFree(ac); + return; + } + } else { + /* No callback for this reply. This can either be a NULL callback, + * or there were no callbacks to begin with. Either way, don't + * abort with an error, but simply ignore it because the client + * doesn't know what the server will spit out over the wire. */ + c->reader->fn->freeObject(reply); + } + } + + /* Disconnect when there was an error reading the reply */ + if (status != REDIS_OK) + __redisAsyncDisconnect(ac); +} + +static void __redisAsyncHandleConnectFailure(redisAsyncContext *ac) { + if (ac->onConnect) ac->onConnect(ac, REDIS_ERR); + __redisAsyncDisconnect(ac); +} + +/* Internal helper function to detect socket status the first time a read or + * write event fires. When connecting was not successful, the connect callback + * is called with a REDIS_ERR status and the context is free'd. */ +static int __redisAsyncHandleConnect(redisAsyncContext *ac) { + int completed = 0; + redisContext *c = &(ac->c); + + if (redisCheckConnectDone(c, &completed) == REDIS_ERR) { + /* Error! */ + redisCheckSocketError(c); + __redisAsyncHandleConnectFailure(ac); + return REDIS_ERR; + } else if (completed == 1) { + /* connected! */ + if (c->connection_type == REDIS_CONN_TCP && + redisSetTcpNoDelay(c) == REDIS_ERR) { + __redisAsyncHandleConnectFailure(ac); + return REDIS_ERR; + } + + if (ac->onConnect) ac->onConnect(ac, REDIS_OK); + c->flags |= REDIS_CONNECTED; + return REDIS_OK; + } else { + return REDIS_OK; + } +} + +void redisAsyncRead(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + + if (redisBufferRead(c) == REDIS_ERR) { + __redisAsyncDisconnect(ac); + } else { + /* Always re-schedule reads */ + _EL_ADD_READ(ac); + redisProcessCallbacks(ac); + } +} + +/* This function should be called when the socket is readable. + * It processes all replies that can be read and executes their callbacks. + */ +void redisAsyncHandleRead(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + + if (!(c->flags & REDIS_CONNECTED)) { + /* Abort connect was not successful. */ + if (__redisAsyncHandleConnect(ac) != REDIS_OK) + return; + /* Try again later when the context is still not connected. */ + if (!(c->flags & REDIS_CONNECTED)) + return; + } + + c->funcs->async_read(ac); +} + +void redisAsyncWrite(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + int done = 0; + + if (redisBufferWrite(c,&done) == REDIS_ERR) { + __redisAsyncDisconnect(ac); + } else { + /* Continue writing when not done, stop writing otherwise */ + if (!done) + _EL_ADD_WRITE(ac); + else + _EL_DEL_WRITE(ac); + + /* Always schedule reads after writes */ + _EL_ADD_READ(ac); + } +} + +void redisAsyncHandleWrite(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + + if (!(c->flags & REDIS_CONNECTED)) { + /* Abort connect was not successful. */ + if (__redisAsyncHandleConnect(ac) != REDIS_OK) + return; + /* Try again later when the context is still not connected. */ + if (!(c->flags & REDIS_CONNECTED)) + return; + } + + c->funcs->async_write(ac); +} + +void redisAsyncHandleTimeout(redisAsyncContext *ac) { + redisContext *c = &(ac->c); + redisCallback cb; + + if ((c->flags & REDIS_CONNECTED) && ac->replies.head == NULL) { + /* Nothing to do - just an idle timeout */ + return; + } + + if (!c->err) { + __redisSetError(c, REDIS_ERR_TIMEOUT, "Timeout"); + } + + if (!(c->flags & REDIS_CONNECTED) && ac->onConnect) { + ac->onConnect(ac, REDIS_ERR); + } + + while (__redisShiftCallback(&ac->replies, &cb) == REDIS_OK) { + __redisRunCallback(ac, &cb, NULL); + } + + /** + * TODO: Don't automatically sever the connection, + * rather, allow to ignore responses before the queue is clear + */ + __redisAsyncDisconnect(ac); +} + +/* Sets a pointer to the first argument and its length starting at p. Returns + * the number of bytes to skip to get to the following argument. */ +static const char *nextArgument(const char *start, const char **str, size_t *len) { + const char *p = start; + if (p[0] != '$') { + p = strchr(p,'$'); + if (p == NULL) return NULL; + } + + *len = (int)strtol(p+1,NULL,10); + p = strchr(p,'\r'); + assert(p); + *str = p+2; + return p+2+(*len)+2; +} + +/* Helper function for the redisAsyncCommand* family of functions. Writes a + * formatted command to the output buffer and registers the provided callback + * function with the context. */ +static int __redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *cmd, size_t len) { + redisContext *c = &(ac->c); + redisCallback cb; + struct dict *cbdict; + dictEntry *de; + redisCallback *existcb; + int pvariant, hasnext; + const char *cstr, *astr; + size_t clen, alen; + const char *p; + sds sname; + int ret; + + /* Don't accept new commands when the connection is about to be closed. */ + if (c->flags & (REDIS_DISCONNECTING | REDIS_FREEING)) return REDIS_ERR; + + /* Setup callback */ + cb.fn = fn; + cb.privdata = privdata; + cb.pending_subs = 1; + + /* Find out which command will be appended. */ + p = nextArgument(cmd,&cstr,&clen); + assert(p != NULL); + hasnext = (p[0] == '$'); + pvariant = (tolower(cstr[0]) == 'p') ? 1 : 0; + cstr += pvariant; + clen -= pvariant; + + if (hasnext && strncasecmp(cstr,"subscribe\r\n",11) == 0) { + c->flags |= REDIS_SUBSCRIBED; + + /* Add every channel/pattern to the list of subscription callbacks. */ + while ((p = nextArgument(p,&astr,&alen)) != NULL) { + sname = sdsnewlen(astr,alen); + if (sname == NULL) + goto oom; + + if (pvariant) + cbdict = ac->sub.patterns; + else + cbdict = ac->sub.channels; + + de = dictFind(cbdict,sname); + + if (de != NULL) { + existcb = dictGetEntryVal(de); + cb.pending_subs = existcb->pending_subs + 1; + } + + ret = dictReplace(cbdict,sname,&cb); + + if (ret == 0) sdsfree(sname); + } + } else if (strncasecmp(cstr,"unsubscribe\r\n",13) == 0) { + /* It is only useful to call (P)UNSUBSCRIBE when the context is + * subscribed to one or more channels or patterns. */ + if (!(c->flags & REDIS_SUBSCRIBED)) return REDIS_ERR; + + /* (P)UNSUBSCRIBE does not have its own response: every channel or + * pattern that is unsubscribed will receive a message. This means we + * should not append a callback function for this command. */ + } else if(strncasecmp(cstr,"monitor\r\n",9) == 0) { + /* Set monitor flag and push callback */ + c->flags |= REDIS_MONITORING; + __redisPushCallback(&ac->replies,&cb); + } else { + if (c->flags & REDIS_SUBSCRIBED) + /* This will likely result in an error reply, but it needs to be + * received and passed to the callback. */ + __redisPushCallback(&ac->sub.invalid,&cb); + else + __redisPushCallback(&ac->replies,&cb); + } + + __redisAppendCommand(c,cmd,len); + + /* Always schedule a write when the write buffer is non-empty */ + _EL_ADD_WRITE(ac); + + return REDIS_OK; +oom: + __redisSetError(&(ac->c), REDIS_ERR_OOM, "Out of memory"); + return REDIS_ERR; +} + +int redisvAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, va_list ap) { + char *cmd; + int len; + int status; + len = redisvFormatCommand(&cmd,format,ap); + + /* We don't want to pass -1 or -2 to future functions as a length. */ + if (len < 0) + return REDIS_ERR; + + status = __redisAsyncCommand(ac,fn,privdata,cmd,len); + hi_free(cmd); + return status; +} + +int redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, ...) { + va_list ap; + int status; + va_start(ap,format); + status = redisvAsyncCommand(ac,fn,privdata,format,ap); + va_end(ap); + return status; +} + +int redisAsyncCommandArgv(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, int argc, const char **argv, const size_t *argvlen) { + sds cmd; + int len; + int status; + len = redisFormatSdsCommandArgv(&cmd,argc,argv,argvlen); + if (len < 0) + return REDIS_ERR; + status = __redisAsyncCommand(ac,fn,privdata,cmd,len); + sdsfree(cmd); + return status; +} + +int redisAsyncFormattedCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *cmd, size_t len) { + int status = __redisAsyncCommand(ac,fn,privdata,cmd,len); + return status; +} + +redisAsyncPushFn *redisAsyncSetPushCallback(redisAsyncContext *ac, redisAsyncPushFn *fn) { + redisAsyncPushFn *old = ac->push_cb; + ac->push_cb = fn; + return old; +} + +int redisAsyncSetTimeout(redisAsyncContext *ac, struct timeval tv) { + if (!ac->c.command_timeout) { + ac->c.command_timeout = hi_calloc(1, sizeof(tv)); + if (ac->c.command_timeout == NULL) { + __redisSetError(&ac->c, REDIS_ERR_OOM, "Out of memory"); + __redisAsyncCopyError(ac); + return REDIS_ERR; + } + } + + if (tv.tv_sec != ac->c.command_timeout->tv_sec || + tv.tv_usec != ac->c.command_timeout->tv_usec) + { + *ac->c.command_timeout = tv; + } + + return REDIS_OK; +} diff --git a/ext/hiredis-1.0.2/async.h b/ext/hiredis-1.0.2/async.h new file mode 100644 index 000000000..b1d2cb263 --- /dev/null +++ b/ext/hiredis-1.0.2/async.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_ASYNC_H +#define __HIREDIS_ASYNC_H +#include "hiredis.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct redisAsyncContext; /* need forward declaration of redisAsyncContext */ +struct dict; /* dictionary header is included in async.c */ + +/* Reply callback prototype and container */ +typedef void (redisCallbackFn)(struct redisAsyncContext*, void*, void*); +typedef struct redisCallback { + struct redisCallback *next; /* simple singly linked list */ + redisCallbackFn *fn; + int pending_subs; + void *privdata; +} redisCallback; + +/* List of callbacks for either regular replies or pub/sub */ +typedef struct redisCallbackList { + redisCallback *head, *tail; +} redisCallbackList; + +/* Connection callback prototypes */ +typedef void (redisDisconnectCallback)(const struct redisAsyncContext*, int status); +typedef void (redisConnectCallback)(const struct redisAsyncContext*, int status); +typedef void(redisTimerCallback)(void *timer, void *privdata); + +/* Context for an async connection to Redis */ +typedef struct redisAsyncContext { + /* Hold the regular context, so it can be realloc'ed. */ + redisContext c; + + /* Setup error flags so they can be used directly. */ + int err; + char *errstr; + + /* Not used by hiredis */ + void *data; + void (*dataCleanup)(void *privdata); + + /* Event library data and hooks */ + struct { + void *data; + + /* Hooks that are called when the library expects to start + * reading/writing. These functions should be idempotent. */ + void (*addRead)(void *privdata); + void (*delRead)(void *privdata); + void (*addWrite)(void *privdata); + void (*delWrite)(void *privdata); + void (*cleanup)(void *privdata); + void (*scheduleTimer)(void *privdata, struct timeval tv); + } ev; + + /* Called when either the connection is terminated due to an error or per + * user request. The status is set accordingly (REDIS_OK, REDIS_ERR). */ + redisDisconnectCallback *onDisconnect; + + /* Called when the first write event was received. */ + redisConnectCallback *onConnect; + + /* Regular command callbacks */ + redisCallbackList replies; + + /* Address used for connect() */ + struct sockaddr *saddr; + size_t addrlen; + + /* Subscription callbacks */ + struct { + redisCallbackList invalid; + struct dict *channels; + struct dict *patterns; + } sub; + + /* Any configured RESP3 PUSH handler */ + redisAsyncPushFn *push_cb; +} redisAsyncContext; + +/* Functions that proxy to hiredis */ +redisAsyncContext *redisAsyncConnectWithOptions(const redisOptions *options); +redisAsyncContext *redisAsyncConnect(const char *ip, int port); +redisAsyncContext *redisAsyncConnectBind(const char *ip, int port, const char *source_addr); +redisAsyncContext *redisAsyncConnectBindWithReuse(const char *ip, int port, + const char *source_addr); +redisAsyncContext *redisAsyncConnectUnix(const char *path); +int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn); +int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn); + +redisAsyncPushFn *redisAsyncSetPushCallback(redisAsyncContext *ac, redisAsyncPushFn *fn); +int redisAsyncSetTimeout(redisAsyncContext *ac, struct timeval tv); +void redisAsyncDisconnect(redisAsyncContext *ac); +void redisAsyncFree(redisAsyncContext *ac); + +/* Handle read/write events */ +void redisAsyncHandleRead(redisAsyncContext *ac); +void redisAsyncHandleWrite(redisAsyncContext *ac); +void redisAsyncHandleTimeout(redisAsyncContext *ac); +void redisAsyncRead(redisAsyncContext *ac); +void redisAsyncWrite(redisAsyncContext *ac); + +/* Command functions for an async context. Write the command to the + * output buffer and register the provided callback. */ +int redisvAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, va_list ap); +int redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, ...); +int redisAsyncCommandArgv(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, int argc, const char **argv, const size_t *argvlen); +int redisAsyncFormattedCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *cmd, size_t len); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ext/hiredis-1.0.2/async_private.h b/ext/hiredis-1.0.2/async_private.h new file mode 100644 index 000000000..b9d23fffd --- /dev/null +++ b/ext/hiredis-1.0.2/async_private.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_ASYNC_PRIVATE_H +#define __HIREDIS_ASYNC_PRIVATE_H + +#define _EL_ADD_READ(ctx) \ + do { \ + refreshTimeout(ctx); \ + if ((ctx)->ev.addRead) (ctx)->ev.addRead((ctx)->ev.data); \ + } while (0) +#define _EL_DEL_READ(ctx) do { \ + if ((ctx)->ev.delRead) (ctx)->ev.delRead((ctx)->ev.data); \ + } while(0) +#define _EL_ADD_WRITE(ctx) \ + do { \ + refreshTimeout(ctx); \ + if ((ctx)->ev.addWrite) (ctx)->ev.addWrite((ctx)->ev.data); \ + } while (0) +#define _EL_DEL_WRITE(ctx) do { \ + if ((ctx)->ev.delWrite) (ctx)->ev.delWrite((ctx)->ev.data); \ + } while(0) +#define _EL_CLEANUP(ctx) do { \ + if ((ctx)->ev.cleanup) (ctx)->ev.cleanup((ctx)->ev.data); \ + ctx->ev.cleanup = NULL; \ + } while(0); + +static inline void refreshTimeout(redisAsyncContext *ctx) { + #define REDIS_TIMER_ISSET(tvp) \ + (tvp && ((tvp)->tv_sec || (tvp)->tv_usec)) + + #define REDIS_EL_TIMER(ac, tvp) \ + if ((ac)->ev.scheduleTimer && REDIS_TIMER_ISSET(tvp)) { \ + (ac)->ev.scheduleTimer((ac)->ev.data, *(tvp)); \ + } + + if (ctx->c.flags & REDIS_CONNECTED) { + REDIS_EL_TIMER(ctx, ctx->c.command_timeout); + } else { + REDIS_EL_TIMER(ctx, ctx->c.connect_timeout); + } +} + +void __redisAsyncDisconnect(redisAsyncContext *ac); +void redisProcessCallbacks(redisAsyncContext *ac); + +#endif /* __HIREDIS_ASYNC_PRIVATE_H */ diff --git a/ext/hiredis-1.0.2/dict.c b/ext/hiredis-1.0.2/dict.c new file mode 100644 index 000000000..34a33ead9 --- /dev/null +++ b/ext/hiredis-1.0.2/dict.c @@ -0,0 +1,352 @@ +/* Hash table implementation. + * + * This file implements in memory hash tables with insert/del/replace/find/ + * get-random-element operations. Hash tables will auto resize if needed + * tables of power of two in size are used, collisions are handled by + * chaining. See the source code for more information... :) + * + * Copyright (c) 2006-2010, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include "alloc.h" +#include +#include +#include +#include "dict.h" + +/* -------------------------- private prototypes ---------------------------- */ + +static int _dictExpandIfNeeded(dict *ht); +static unsigned long _dictNextPower(unsigned long size); +static int _dictKeyIndex(dict *ht, const void *key); +static int _dictInit(dict *ht, dictType *type, void *privDataPtr); + +/* -------------------------- hash functions -------------------------------- */ + +/* Generic hash function (a popular one from Bernstein). + * I tested a few and this was the best. */ +static unsigned int dictGenHashFunction(const unsigned char *buf, int len) { + unsigned int hash = 5381; + + while (len--) + hash = ((hash << 5) + hash) + (*buf++); /* hash * 33 + c */ + return hash; +} + +/* ----------------------------- API implementation ------------------------- */ + +/* Reset an hashtable already initialized with ht_init(). + * NOTE: This function should only called by ht_destroy(). */ +static void _dictReset(dict *ht) { + ht->table = NULL; + ht->size = 0; + ht->sizemask = 0; + ht->used = 0; +} + +/* Create a new hash table */ +static dict *dictCreate(dictType *type, void *privDataPtr) { + dict *ht = hi_malloc(sizeof(*ht)); + if (ht == NULL) + return NULL; + + _dictInit(ht,type,privDataPtr); + return ht; +} + +/* Initialize the hash table */ +static int _dictInit(dict *ht, dictType *type, void *privDataPtr) { + _dictReset(ht); + ht->type = type; + ht->privdata = privDataPtr; + return DICT_OK; +} + +/* Expand or create the hashtable */ +static int dictExpand(dict *ht, unsigned long size) { + dict n; /* the new hashtable */ + unsigned long realsize = _dictNextPower(size), i; + + /* the size is invalid if it is smaller than the number of + * elements already inside the hashtable */ + if (ht->used > size) + return DICT_ERR; + + _dictInit(&n, ht->type, ht->privdata); + n.size = realsize; + n.sizemask = realsize-1; + n.table = hi_calloc(realsize,sizeof(dictEntry*)); + if (n.table == NULL) + return DICT_ERR; + + /* Copy all the elements from the old to the new table: + * note that if the old hash table is empty ht->size is zero, + * so dictExpand just creates an hash table. */ + n.used = ht->used; + for (i = 0; i < ht->size && ht->used > 0; i++) { + dictEntry *he, *nextHe; + + if (ht->table[i] == NULL) continue; + + /* For each hash entry on this slot... */ + he = ht->table[i]; + while(he) { + unsigned int h; + + nextHe = he->next; + /* Get the new element index */ + h = dictHashKey(ht, he->key) & n.sizemask; + he->next = n.table[h]; + n.table[h] = he; + ht->used--; + /* Pass to the next element */ + he = nextHe; + } + } + assert(ht->used == 0); + hi_free(ht->table); + + /* Remap the new hashtable in the old */ + *ht = n; + return DICT_OK; +} + +/* Add an element to the target hash table */ +static int dictAdd(dict *ht, void *key, void *val) { + int index; + dictEntry *entry; + + /* Get the index of the new element, or -1 if + * the element already exists. */ + if ((index = _dictKeyIndex(ht, key)) == -1) + return DICT_ERR; + + /* Allocates the memory and stores key */ + entry = hi_malloc(sizeof(*entry)); + if (entry == NULL) + return DICT_ERR; + + entry->next = ht->table[index]; + ht->table[index] = entry; + + /* Set the hash entry fields. */ + dictSetHashKey(ht, entry, key); + dictSetHashVal(ht, entry, val); + ht->used++; + return DICT_OK; +} + +/* Add an element, discarding the old if the key already exists. + * Return 1 if the key was added from scratch, 0 if there was already an + * element with such key and dictReplace() just performed a value update + * operation. */ +static int dictReplace(dict *ht, void *key, void *val) { + dictEntry *entry, auxentry; + + /* Try to add the element. If the key + * does not exists dictAdd will succeed. */ + if (dictAdd(ht, key, val) == DICT_OK) + return 1; + /* It already exists, get the entry */ + entry = dictFind(ht, key); + if (entry == NULL) + return 0; + + /* Free the old value and set the new one */ + /* Set the new value and free the old one. Note that it is important + * to do that in this order, as the value may just be exactly the same + * as the previous one. In this context, think to reference counting, + * you want to increment (set), and then decrement (free), and not the + * reverse. */ + auxentry = *entry; + dictSetHashVal(ht, entry, val); + dictFreeEntryVal(ht, &auxentry); + return 0; +} + +/* Search and remove an element */ +static int dictDelete(dict *ht, const void *key) { + unsigned int h; + dictEntry *de, *prevde; + + if (ht->size == 0) + return DICT_ERR; + h = dictHashKey(ht, key) & ht->sizemask; + de = ht->table[h]; + + prevde = NULL; + while(de) { + if (dictCompareHashKeys(ht,key,de->key)) { + /* Unlink the element from the list */ + if (prevde) + prevde->next = de->next; + else + ht->table[h] = de->next; + + dictFreeEntryKey(ht,de); + dictFreeEntryVal(ht,de); + hi_free(de); + ht->used--; + return DICT_OK; + } + prevde = de; + de = de->next; + } + return DICT_ERR; /* not found */ +} + +/* Destroy an entire hash table */ +static int _dictClear(dict *ht) { + unsigned long i; + + /* Free all the elements */ + for (i = 0; i < ht->size && ht->used > 0; i++) { + dictEntry *he, *nextHe; + + if ((he = ht->table[i]) == NULL) continue; + while(he) { + nextHe = he->next; + dictFreeEntryKey(ht, he); + dictFreeEntryVal(ht, he); + hi_free(he); + ht->used--; + he = nextHe; + } + } + /* Free the table and the allocated cache structure */ + hi_free(ht->table); + /* Re-initialize the table */ + _dictReset(ht); + return DICT_OK; /* never fails */ +} + +/* Clear & Release the hash table */ +static void dictRelease(dict *ht) { + _dictClear(ht); + hi_free(ht); +} + +static dictEntry *dictFind(dict *ht, const void *key) { + dictEntry *he; + unsigned int h; + + if (ht->size == 0) return NULL; + h = dictHashKey(ht, key) & ht->sizemask; + he = ht->table[h]; + while(he) { + if (dictCompareHashKeys(ht, key, he->key)) + return he; + he = he->next; + } + return NULL; +} + +static dictIterator *dictGetIterator(dict *ht) { + dictIterator *iter = hi_malloc(sizeof(*iter)); + if (iter == NULL) + return NULL; + + iter->ht = ht; + iter->index = -1; + iter->entry = NULL; + iter->nextEntry = NULL; + return iter; +} + +static dictEntry *dictNext(dictIterator *iter) { + while (1) { + if (iter->entry == NULL) { + iter->index++; + if (iter->index >= + (signed)iter->ht->size) break; + iter->entry = iter->ht->table[iter->index]; + } else { + iter->entry = iter->nextEntry; + } + if (iter->entry) { + /* We need to save the 'next' here, the iterator user + * may delete the entry we are returning. */ + iter->nextEntry = iter->entry->next; + return iter->entry; + } + } + return NULL; +} + +static void dictReleaseIterator(dictIterator *iter) { + hi_free(iter); +} + +/* ------------------------- private functions ------------------------------ */ + +/* Expand the hash table if needed */ +static int _dictExpandIfNeeded(dict *ht) { + /* If the hash table is empty expand it to the initial size, + * if the table is "full" double its size. */ + if (ht->size == 0) + return dictExpand(ht, DICT_HT_INITIAL_SIZE); + if (ht->used == ht->size) + return dictExpand(ht, ht->size*2); + return DICT_OK; +} + +/* Our hash table capability is a power of two */ +static unsigned long _dictNextPower(unsigned long size) { + unsigned long i = DICT_HT_INITIAL_SIZE; + + if (size >= LONG_MAX) return LONG_MAX; + while(1) { + if (i >= size) + return i; + i *= 2; + } +} + +/* Returns the index of a free slot that can be populated with + * an hash entry for the given 'key'. + * If the key already exists, -1 is returned. */ +static int _dictKeyIndex(dict *ht, const void *key) { + unsigned int h; + dictEntry *he; + + /* Expand the hashtable if needed */ + if (_dictExpandIfNeeded(ht) == DICT_ERR) + return -1; + /* Compute the key hash value */ + h = dictHashKey(ht, key) & ht->sizemask; + /* Search if this slot does not already contain the given key */ + he = ht->table[h]; + while(he) { + if (dictCompareHashKeys(ht, key, he->key)) + return -1; + he = he->next; + } + return h; +} + diff --git a/ext/hiredis-1.0.2/dict.h b/ext/hiredis-1.0.2/dict.h new file mode 100644 index 000000000..95fcd280e --- /dev/null +++ b/ext/hiredis-1.0.2/dict.h @@ -0,0 +1,126 @@ +/* Hash table implementation. + * + * This file implements in memory hash tables with insert/del/replace/find/ + * get-random-element operations. Hash tables will auto resize if needed + * tables of power of two in size are used, collisions are handled by + * chaining. See the source code for more information... :) + * + * Copyright (c) 2006-2010, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DICT_H +#define __DICT_H + +#define DICT_OK 0 +#define DICT_ERR 1 + +/* Unused arguments generate annoying warnings... */ +#define DICT_NOTUSED(V) ((void) V) + +typedef struct dictEntry { + void *key; + void *val; + struct dictEntry *next; +} dictEntry; + +typedef struct dictType { + unsigned int (*hashFunction)(const void *key); + void *(*keyDup)(void *privdata, const void *key); + void *(*valDup)(void *privdata, const void *obj); + int (*keyCompare)(void *privdata, const void *key1, const void *key2); + void (*keyDestructor)(void *privdata, void *key); + void (*valDestructor)(void *privdata, void *obj); +} dictType; + +typedef struct dict { + dictEntry **table; + dictType *type; + unsigned long size; + unsigned long sizemask; + unsigned long used; + void *privdata; +} dict; + +typedef struct dictIterator { + dict *ht; + int index; + dictEntry *entry, *nextEntry; +} dictIterator; + +/* This is the initial size of every hash table */ +#define DICT_HT_INITIAL_SIZE 4 + +/* ------------------------------- Macros ------------------------------------*/ +#define dictFreeEntryVal(ht, entry) \ + if ((ht)->type->valDestructor) \ + (ht)->type->valDestructor((ht)->privdata, (entry)->val) + +#define dictSetHashVal(ht, entry, _val_) do { \ + if ((ht)->type->valDup) \ + entry->val = (ht)->type->valDup((ht)->privdata, _val_); \ + else \ + entry->val = (_val_); \ +} while(0) + +#define dictFreeEntryKey(ht, entry) \ + if ((ht)->type->keyDestructor) \ + (ht)->type->keyDestructor((ht)->privdata, (entry)->key) + +#define dictSetHashKey(ht, entry, _key_) do { \ + if ((ht)->type->keyDup) \ + entry->key = (ht)->type->keyDup((ht)->privdata, _key_); \ + else \ + entry->key = (_key_); \ +} while(0) + +#define dictCompareHashKeys(ht, key1, key2) \ + (((ht)->type->keyCompare) ? \ + (ht)->type->keyCompare((ht)->privdata, key1, key2) : \ + (key1) == (key2)) + +#define dictHashKey(ht, key) (ht)->type->hashFunction(key) + +#define dictGetEntryKey(he) ((he)->key) +#define dictGetEntryVal(he) ((he)->val) +#define dictSlots(ht) ((ht)->size) +#define dictSize(ht) ((ht)->used) + +/* API */ +static unsigned int dictGenHashFunction(const unsigned char *buf, int len); +static dict *dictCreate(dictType *type, void *privDataPtr); +static int dictExpand(dict *ht, unsigned long size); +static int dictAdd(dict *ht, void *key, void *val); +static int dictReplace(dict *ht, void *key, void *val); +static int dictDelete(dict *ht, const void *key); +static void dictRelease(dict *ht); +static dictEntry * dictFind(dict *ht, const void *key); +static dictIterator *dictGetIterator(dict *ht); +static dictEntry *dictNext(dictIterator *iter); +static void dictReleaseIterator(dictIterator *iter); + +#endif /* __DICT_H */ diff --git a/ext/hiredis-1.0.2/examples/CMakeLists.txt b/ext/hiredis-1.0.2/examples/CMakeLists.txt new file mode 100644 index 000000000..1d5bc56e0 --- /dev/null +++ b/ext/hiredis-1.0.2/examples/CMakeLists.txt @@ -0,0 +1,49 @@ +INCLUDE(FindPkgConfig) +# Check for GLib + +PKG_CHECK_MODULES(GLIB2 glib-2.0) +if (GLIB2_FOUND) + INCLUDE_DIRECTORIES(${GLIB2_INCLUDE_DIRS}) + LINK_DIRECTORIES(${GLIB2_LIBRARY_DIRS}) + ADD_EXECUTABLE(example-glib example-glib.c) + TARGET_LINK_LIBRARIES(example-glib hiredis ${GLIB2_LIBRARIES}) +ENDIF(GLIB2_FOUND) + +FIND_PATH(LIBEV ev.h + HINTS /usr/local /usr/opt/local + ENV LIBEV_INCLUDE_DIR) + +if (LIBEV) + # Just compile and link with libev + ADD_EXECUTABLE(example-libev example-libev.c) + TARGET_LINK_LIBRARIES(example-libev hiredis ev) +ENDIF() + +FIND_PATH(LIBEVENT event.h) +if (LIBEVENT) + ADD_EXECUTABLE(example-libevent example-libevent) + TARGET_LINK_LIBRARIES(example-libevent hiredis event) +ENDIF() + +FIND_PATH(LIBUV uv.h) +IF (LIBUV) + ADD_EXECUTABLE(example-libuv example-libuv.c) + TARGET_LINK_LIBRARIES(example-libuv hiredis uv) +ENDIF() + +IF (APPLE) + FIND_LIBRARY(CF CoreFoundation) + ADD_EXECUTABLE(example-macosx example-macosx.c) + TARGET_LINK_LIBRARIES(example-macosx hiredis ${CF}) +ENDIF() + +IF (ENABLE_SSL) + ADD_EXECUTABLE(example-ssl example-ssl.c) + TARGET_LINK_LIBRARIES(example-ssl hiredis hiredis_ssl) +ENDIF() + +ADD_EXECUTABLE(example example.c) +TARGET_LINK_LIBRARIES(example hiredis) + +ADD_EXECUTABLE(example-push example-push.c) +TARGET_LINK_LIBRARIES(example-push hiredis) diff --git a/ext/hiredis-1.0.2/examples/example-ae.c b/ext/hiredis-1.0.2/examples/example-ae.c new file mode 100644 index 000000000..8efa7306a --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-ae.c @@ -0,0 +1,62 @@ +#include +#include +#include +#include + +#include +#include +#include + +/* Put event loop in the global scope, so it can be explicitly stopped */ +static aeEventLoop *loop; + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) return; + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + aeStop(loop); + return; + } + + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + aeStop(loop); + return; + } + + printf("Disconnected...\n"); + aeStop(loop); +} + +int main (int argc, char **argv) { + signal(SIGPIPE, SIG_IGN); + + redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + loop = aeCreateEventLoop(64); + redisAeAttach(loop, c); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + aeMain(loop); + return 0; +} + diff --git a/ext/hiredis-1.0.2/examples/example-glib.c b/ext/hiredis-1.0.2/examples/example-glib.c new file mode 100644 index 000000000..d6e10f8e8 --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-glib.c @@ -0,0 +1,73 @@ +#include + +#include +#include +#include + +static GMainLoop *mainloop; + +static void +connect_cb (const redisAsyncContext *ac G_GNUC_UNUSED, + int status) +{ + if (status != REDIS_OK) { + g_printerr("Failed to connect: %s\n", ac->errstr); + g_main_loop_quit(mainloop); + } else { + g_printerr("Connected...\n"); + } +} + +static void +disconnect_cb (const redisAsyncContext *ac G_GNUC_UNUSED, + int status) +{ + if (status != REDIS_OK) { + g_error("Failed to disconnect: %s", ac->errstr); + } else { + g_printerr("Disconnected...\n"); + g_main_loop_quit(mainloop); + } +} + +static void +command_cb(redisAsyncContext *ac, + gpointer r, + gpointer user_data G_GNUC_UNUSED) +{ + redisReply *reply = r; + + if (reply) { + g_print("REPLY: %s\n", reply->str); + } + + redisAsyncDisconnect(ac); +} + +gint +main (gint argc G_GNUC_UNUSED, + gchar *argv[] G_GNUC_UNUSED) +{ + redisAsyncContext *ac; + GMainContext *context = NULL; + GSource *source; + + ac = redisAsyncConnect("127.0.0.1", 6379); + if (ac->err) { + g_printerr("%s\n", ac->errstr); + exit(EXIT_FAILURE); + } + + source = redis_source_new(ac); + mainloop = g_main_loop_new(context, FALSE); + g_source_attach(source, context); + + redisAsyncSetConnectCallback(ac, connect_cb); + redisAsyncSetDisconnectCallback(ac, disconnect_cb); + redisAsyncCommand(ac, command_cb, NULL, "SET key 1234"); + redisAsyncCommand(ac, command_cb, NULL, "GET key"); + + g_main_loop_run(mainloop); + + return EXIT_SUCCESS; +} diff --git a/ext/hiredis-1.0.2/examples/example-ivykis.c b/ext/hiredis-1.0.2/examples/example-ivykis.c new file mode 100644 index 000000000..f57dc3887 --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-ivykis.c @@ -0,0 +1,60 @@ +#include +#include +#include +#include + +#include +#include +#include + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) return; + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { +#ifndef _WIN32 + signal(SIGPIPE, SIG_IGN); +#endif + + iv_init(); + + redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + redisIvykisAttach(c); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + + iv_main(); + + iv_deinit(); + + return 0; +} diff --git a/ext/hiredis-1.0.2/examples/example-libev.c b/ext/hiredis-1.0.2/examples/example-libev.c new file mode 100644 index 000000000..ec474306b --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-libev.c @@ -0,0 +1,54 @@ +#include +#include +#include +#include + +#include +#include +#include + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) return; + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { +#ifndef _WIN32 + signal(SIGPIPE, SIG_IGN); +#endif + + redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + redisLibevAttach(EV_DEFAULT_ c); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + ev_loop(EV_DEFAULT_ 0); + return 0; +} diff --git a/ext/hiredis-1.0.2/examples/example-libevent-ssl.c b/ext/hiredis-1.0.2/examples/example-libevent-ssl.c new file mode 100644 index 000000000..7d99af1ba --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-libevent-ssl.c @@ -0,0 +1,90 @@ +#include +#include +#include +#include + +#include +#include +#include +#include + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) return; + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { +#ifndef _WIN32 + signal(SIGPIPE, SIG_IGN); +#endif + + struct event_base *base = event_base_new(); + if (argc < 5) { + fprintf(stderr, + "Usage: %s [ca]\n", argv[0]); + exit(1); + } + + const char *value = argv[1]; + size_t nvalue = strlen(value); + + const char *hostname = argv[2]; + int port = atoi(argv[3]); + + const char *cert = argv[4]; + const char *certKey = argv[5]; + const char *caCert = argc > 5 ? argv[6] : NULL; + + redisSSLContext *ssl; + redisSSLContextError ssl_error; + + redisInitOpenSSL(); + + ssl = redisCreateSSLContext(caCert, NULL, + cert, certKey, NULL, &ssl_error); + if (!ssl) { + printf("Error: %s\n", redisSSLContextGetError(ssl_error)); + return 1; + } + + redisAsyncContext *c = redisAsyncConnect(hostname, port); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + if (redisInitiateSSLWithContext(&c->c, ssl) != REDIS_OK) { + printf("SSL Error!\n"); + exit(1); + } + + redisLibeventAttach(c,base); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncCommand(c, NULL, NULL, "SET key %b", value, nvalue); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + event_base_dispatch(base); + + redisFreeSSLContext(ssl); + return 0; +} diff --git a/ext/hiredis-1.0.2/examples/example-libevent.c b/ext/hiredis-1.0.2/examples/example-libevent.c new file mode 100644 index 000000000..49bddd0c2 --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-libevent.c @@ -0,0 +1,67 @@ +#include +#include +#include +#include + +#include +#include +#include + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) { + if (c->errstr) { + printf("errstr: %s\n", c->errstr); + } + return; + } + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { +#ifndef _WIN32 + signal(SIGPIPE, SIG_IGN); +#endif + + struct event_base *base = event_base_new(); + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, "127.0.0.1", 6379); + struct timeval tv = {0}; + tv.tv_sec = 1; + options.connect_timeout = &tv; + + + redisAsyncContext *c = redisAsyncConnectWithOptions(&options); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + redisLibeventAttach(c,base); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + event_base_dispatch(base); + return 0; +} diff --git a/ext/hiredis-1.0.2/examples/example-libuv.c b/ext/hiredis-1.0.2/examples/example-libuv.c new file mode 100644 index 000000000..cbde452b9 --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-libuv.c @@ -0,0 +1,56 @@ +#include +#include +#include +#include + +#include +#include +#include + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) return; + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { +#ifndef _WIN32 + signal(SIGPIPE, SIG_IGN); +#endif + + uv_loop_t* loop = uv_default_loop(); + + redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + redisLibuvAttach(c,loop); + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + uv_run(loop, UV_RUN_DEFAULT); + return 0; +} diff --git a/ext/hiredis-1.0.2/examples/example-macosx.c b/ext/hiredis-1.0.2/examples/example-macosx.c new file mode 100644 index 000000000..bc84ed5ba --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-macosx.c @@ -0,0 +1,66 @@ +// +// Created by Дмитрий Бахвалов on 13.07.15. +// Copyright (c) 2015 Dmitry Bakhvalov. All rights reserved. +// + +#include + +#include +#include +#include + +void getCallback(redisAsyncContext *c, void *r, void *privdata) { + redisReply *reply = r; + if (reply == NULL) return; + printf("argv[%s]: %s\n", (char*)privdata, reply->str); + + /* Disconnect after receiving the reply to GET */ + redisAsyncDisconnect(c); +} + +void connectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + printf("Connected...\n"); +} + +void disconnectCallback(const redisAsyncContext *c, int status) { + if (status != REDIS_OK) { + printf("Error: %s\n", c->errstr); + return; + } + CFRunLoopStop(CFRunLoopGetCurrent()); + printf("Disconnected...\n"); +} + +int main (int argc, char **argv) { + signal(SIGPIPE, SIG_IGN); + + CFRunLoopRef loop = CFRunLoopGetCurrent(); + if( !loop ) { + printf("Error: Cannot get current run loop\n"); + return 1; + } + + redisAsyncContext *c = redisAsyncConnect("127.0.0.1", 6379); + if (c->err) { + /* Let *c leak for now... */ + printf("Error: %s\n", c->errstr); + return 1; + } + + redisMacOSAttach(c, loop); + + redisAsyncSetConnectCallback(c,connectCallback); + redisAsyncSetDisconnectCallback(c,disconnectCallback); + + redisAsyncCommand(c, NULL, NULL, "SET key %b", argv[argc-1], strlen(argv[argc-1])); + redisAsyncCommand(c, getCallback, (char*)"end-1", "GET key"); + + CFRunLoopRun(); + + return 0; +} + diff --git a/ext/hiredis-1.0.2/examples/example-push.c b/ext/hiredis-1.0.2/examples/example-push.c new file mode 100644 index 000000000..2d4ab4dc0 --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-push.c @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2020, Michael Grunder + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +#include +#include +#include +#include +#include + +#define KEY_COUNT 5 + +#define panicAbort(fmt, ...) \ + do { \ + fprintf(stderr, "%s:%d:%s(): " fmt, __FILE__, __LINE__, __func__, __VA_ARGS__); \ + exit(-1); \ + } while (0) + +static void assertReplyAndFree(redisContext *context, redisReply *reply, int type) { + if (reply == NULL) + panicAbort("NULL reply from server (error: %s)", context->errstr); + + if (reply->type != type) { + if (reply->type == REDIS_REPLY_ERROR) + fprintf(stderr, "Redis Error: %s\n", reply->str); + + panicAbort("Expected reply type %d but got type %d", type, reply->type); + } + + freeReplyObject(reply); +} + +/* Switch to the RESP3 protocol and enable client tracking */ +static void enableClientTracking(redisContext *c) { + redisReply *reply = redisCommand(c, "HELLO 3"); + if (reply == NULL || c->err) { + panicAbort("NULL reply or server error (error: %s)", c->errstr); + } + + if (reply->type != REDIS_REPLY_MAP) { + fprintf(stderr, "Error: Can't send HELLO 3 command. Are you sure you're "); + fprintf(stderr, "connected to redis-server >= 6.0.0?\nRedis error: %s\n", + reply->type == REDIS_REPLY_ERROR ? reply->str : "(unknown)"); + exit(-1); + } + + freeReplyObject(reply); + + /* Enable client tracking */ + reply = redisCommand(c, "CLIENT TRACKING ON"); + assertReplyAndFree(c, reply, REDIS_REPLY_STATUS); +} + +void pushReplyHandler(void *privdata, void *r) { + redisReply *reply = r; + int *invalidations = privdata; + + /* Sanity check on the invalidation reply */ + if (reply->type != REDIS_REPLY_PUSH || reply->elements != 2 || + reply->element[1]->type != REDIS_REPLY_ARRAY || + reply->element[1]->element[0]->type != REDIS_REPLY_STRING) + { + panicAbort("%s", "Can't parse PUSH message!"); + } + + /* Increment our invalidation count */ + *invalidations += 1; + + printf("pushReplyHandler(): INVALIDATE '%s' (invalidation count: %d)\n", + reply->element[1]->element[0]->str, *invalidations); + + freeReplyObject(reply); +} + +/* We aren't actually freeing anything here, but it is included to show that we can + * have hiredis call our data destructor when freeing the context */ +void privdata_dtor(void *privdata) { + unsigned int *icount = privdata; + printf("privdata_dtor(): In context privdata dtor (invalidations: %u)\n", *icount); +} + +int main(int argc, char **argv) { + unsigned int j, invalidations = 0; + redisContext *c; + redisReply *reply; + + const char *hostname = (argc > 1) ? argv[1] : "127.0.0.1"; + int port = (argc > 2) ? atoi(argv[2]) : 6379; + + redisOptions o = {0}; + REDIS_OPTIONS_SET_TCP(&o, hostname, port); + + /* Set our context privdata to the address of our invalidation counter. Each + * time our PUSH handler is called, hiredis will pass the privdata for context. + * + * This could also be done after we create the context like so: + * + * c->privdata = &invalidations; + * c->free_privdata = privdata_dtor; + */ + REDIS_OPTIONS_SET_PRIVDATA(&o, &invalidations, privdata_dtor); + + /* Set our custom PUSH message handler */ + o.push_cb = pushReplyHandler; + + c = redisConnectWithOptions(&o); + if (c == NULL || c->err) + panicAbort("Connection error: %s", c ? c->errstr : "OOM"); + + /* Enable RESP3 and turn on client tracking */ + enableClientTracking(c); + + /* Set some keys and then read them back. Once we do that, Redis will deliver + * invalidation push messages whenever the key is modified */ + for (j = 0; j < KEY_COUNT; j++) { + reply = redisCommand(c, "SET key:%d initial:%d", j, j); + assertReplyAndFree(c, reply, REDIS_REPLY_STATUS); + + reply = redisCommand(c, "GET key:%d", j); + assertReplyAndFree(c, reply, REDIS_REPLY_STRING); + } + + /* Trigger invalidation messages by updating keys we just read */ + for (j = 0; j < KEY_COUNT; j++) { + printf(" main(): SET key:%d update:%d\n", j, j); + reply = redisCommand(c, "SET key:%d update:%d", j, j); + assertReplyAndFree(c, reply, REDIS_REPLY_STATUS); + printf(" main(): SET REPLY OK\n"); + } + + printf("\nTotal detected invalidations: %d, expected: %d\n", invalidations, KEY_COUNT); + + /* PING server */ + redisFree(c); +} diff --git a/ext/hiredis-1.0.2/examples/example-qt.cpp b/ext/hiredis-1.0.2/examples/example-qt.cpp new file mode 100644 index 000000000..f524c3f3d --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-qt.cpp @@ -0,0 +1,46 @@ +#include +using namespace std; + +#include +#include + +#include "example-qt.h" + +void getCallback(redisAsyncContext *, void * r, void * privdata) { + + redisReply * reply = static_cast(r); + ExampleQt * ex = static_cast(privdata); + if (reply == nullptr || ex == nullptr) return; + + cout << "key: " << reply->str << endl; + + ex->finish(); +} + +void ExampleQt::run() { + + m_ctx = redisAsyncConnect("localhost", 6379); + + if (m_ctx->err) { + cerr << "Error: " << m_ctx->errstr << endl; + redisAsyncFree(m_ctx); + emit finished(); + } + + m_adapter.setContext(m_ctx); + + redisAsyncCommand(m_ctx, NULL, NULL, "SET key %s", m_value); + redisAsyncCommand(m_ctx, getCallback, this, "GET key"); +} + +int main (int argc, char **argv) { + + QCoreApplication app(argc, argv); + + ExampleQt example(argv[argc-1]); + + QObject::connect(&example, SIGNAL(finished()), &app, SLOT(quit())); + QTimer::singleShot(0, &example, SLOT(run())); + + return app.exec(); +} diff --git a/ext/hiredis-1.0.2/examples/example-qt.h b/ext/hiredis-1.0.2/examples/example-qt.h new file mode 100644 index 000000000..374f47666 --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-qt.h @@ -0,0 +1,32 @@ +#ifndef __HIREDIS_EXAMPLE_QT_H +#define __HIREDIS_EXAMPLE_QT_H + +#include + +class ExampleQt : public QObject { + + Q_OBJECT + + public: + ExampleQt(const char * value, QObject * parent = 0) + : QObject(parent), m_value(value) {} + + signals: + void finished(); + + public slots: + void run(); + + private: + void finish() { emit finished(); } + + private: + const char * m_value; + redisAsyncContext * m_ctx; + RedisQtAdapter m_adapter; + + friend + void getCallback(redisAsyncContext *, void *, void *); +}; + +#endif /* !__HIREDIS_EXAMPLE_QT_H */ diff --git a/ext/hiredis-1.0.2/examples/example-ssl.c b/ext/hiredis-1.0.2/examples/example-ssl.c new file mode 100644 index 000000000..c754177cf --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example-ssl.c @@ -0,0 +1,110 @@ +#include +#include +#include + +#include +#include +#include + +int main(int argc, char **argv) { + unsigned int j; + redisSSLContext *ssl; + redisSSLContextError ssl_error; + redisContext *c; + redisReply *reply; + if (argc < 4) { + printf("Usage: %s [ca]\n", argv[0]); + exit(1); + } + const char *hostname = (argc > 1) ? argv[1] : "127.0.0.1"; + int port = atoi(argv[2]); + const char *cert = argv[3]; + const char *key = argv[4]; + const char *ca = argc > 4 ? argv[5] : NULL; + + redisInitOpenSSL(); + ssl = redisCreateSSLContext(ca, NULL, cert, key, NULL, &ssl_error); + if (!ssl) { + printf("SSL Context error: %s\n", + redisSSLContextGetError(ssl_error)); + exit(1); + } + + struct timeval tv = { 1, 500000 }; // 1.5 seconds + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, hostname, port); + options.connect_timeout = &tv; + c = redisConnectWithOptions(&options); + + if (c == NULL || c->err) { + if (c) { + printf("Connection error: %s\n", c->errstr); + redisFree(c); + } else { + printf("Connection error: can't allocate redis context\n"); + } + exit(1); + } + + if (redisInitiateSSLWithContext(c, ssl) != REDIS_OK) { + printf("Couldn't initialize SSL!\n"); + printf("Error: %s\n", c->errstr); + redisFree(c); + exit(1); + } + + /* PING server */ + reply = redisCommand(c,"PING"); + printf("PING: %s\n", reply->str); + freeReplyObject(reply); + + /* Set a key */ + reply = redisCommand(c,"SET %s %s", "foo", "hello world"); + printf("SET: %s\n", reply->str); + freeReplyObject(reply); + + /* Set a key using binary safe API */ + reply = redisCommand(c,"SET %b %b", "bar", (size_t) 3, "hello", (size_t) 5); + printf("SET (binary API): %s\n", reply->str); + freeReplyObject(reply); + + /* Try a GET and two INCR */ + reply = redisCommand(c,"GET foo"); + printf("GET foo: %s\n", reply->str); + freeReplyObject(reply); + + reply = redisCommand(c,"INCR counter"); + printf("INCR counter: %lld\n", reply->integer); + freeReplyObject(reply); + /* again ... */ + reply = redisCommand(c,"INCR counter"); + printf("INCR counter: %lld\n", reply->integer); + freeReplyObject(reply); + + /* Create a list of numbers, from 0 to 9 */ + reply = redisCommand(c,"DEL mylist"); + freeReplyObject(reply); + for (j = 0; j < 10; j++) { + char buf[64]; + + snprintf(buf,64,"%u",j); + reply = redisCommand(c,"LPUSH mylist element-%s", buf); + freeReplyObject(reply); + } + + /* Let's check what we have inside the list */ + reply = redisCommand(c,"LRANGE mylist 0 -1"); + if (reply->type == REDIS_REPLY_ARRAY) { + for (j = 0; j < reply->elements; j++) { + printf("%u) %s\n", j, reply->element[j]->str); + } + } + freeReplyObject(reply); + + /* Disconnects and frees the context */ + redisFree(c); + + redisFreeSSLContext(ssl); + + return 0; +} diff --git a/ext/hiredis-1.0.2/examples/example.c b/ext/hiredis-1.0.2/examples/example.c new file mode 100644 index 000000000..15dacbd18 --- /dev/null +++ b/ext/hiredis-1.0.2/examples/example.c @@ -0,0 +1,91 @@ +#include +#include +#include +#include +#include + +int main(int argc, char **argv) { + unsigned int j, isunix = 0; + redisContext *c; + redisReply *reply; + const char *hostname = (argc > 1) ? argv[1] : "127.0.0.1"; + + if (argc > 2) { + if (*argv[2] == 'u' || *argv[2] == 'U') { + isunix = 1; + /* in this case, host is the path to the unix socket */ + printf("Will connect to unix socket @%s\n", hostname); + } + } + + int port = (argc > 2) ? atoi(argv[2]) : 6379; + + struct timeval timeout = { 1, 500000 }; // 1.5 seconds + if (isunix) { + c = redisConnectUnixWithTimeout(hostname, timeout); + } else { + c = redisConnectWithTimeout(hostname, port, timeout); + } + if (c == NULL || c->err) { + if (c) { + printf("Connection error: %s\n", c->errstr); + redisFree(c); + } else { + printf("Connection error: can't allocate redis context\n"); + } + exit(1); + } + + /* PING server */ + reply = redisCommand(c,"PING"); + printf("PING: %s\n", reply->str); + freeReplyObject(reply); + + /* Set a key */ + reply = redisCommand(c,"SET %s %s", "foo", "hello world"); + printf("SET: %s\n", reply->str); + freeReplyObject(reply); + + /* Set a key using binary safe API */ + reply = redisCommand(c,"SET %b %b", "bar", (size_t) 3, "hello", (size_t) 5); + printf("SET (binary API): %s\n", reply->str); + freeReplyObject(reply); + + /* Try a GET and two INCR */ + reply = redisCommand(c,"GET foo"); + printf("GET foo: %s\n", reply->str); + freeReplyObject(reply); + + reply = redisCommand(c,"INCR counter"); + printf("INCR counter: %lld\n", reply->integer); + freeReplyObject(reply); + /* again ... */ + reply = redisCommand(c,"INCR counter"); + printf("INCR counter: %lld\n", reply->integer); + freeReplyObject(reply); + + /* Create a list of numbers, from 0 to 9 */ + reply = redisCommand(c,"DEL mylist"); + freeReplyObject(reply); + for (j = 0; j < 10; j++) { + char buf[64]; + + snprintf(buf,64,"%u",j); + reply = redisCommand(c,"LPUSH mylist element-%s", buf); + freeReplyObject(reply); + } + + /* Let's check what we have inside the list */ + reply = redisCommand(c,"LRANGE mylist 0 -1"); + if (reply->type == REDIS_REPLY_ARRAY) { + for (j = 0; j < reply->elements; j++) { + printf("%u) %s\n", j, reply->element[j]->str); + } + } + freeReplyObject(reply); + + /* Disconnects and frees the context */ + redisFree(c); + + return 0; +} diff --git a/ext/hiredis-1.0.2/fmacros.h b/ext/hiredis-1.0.2/fmacros.h new file mode 100644 index 000000000..3227faafd --- /dev/null +++ b/ext/hiredis-1.0.2/fmacros.h @@ -0,0 +1,12 @@ +#ifndef __HIREDIS_FMACRO_H +#define __HIREDIS_FMACRO_H + +#define _XOPEN_SOURCE 600 +#define _POSIX_C_SOURCE 200112L + +#if defined(__APPLE__) && defined(__MACH__) +/* Enable TCP_KEEPALIVE */ +#define _DARWIN_C_SOURCE +#endif + +#endif diff --git a/ext/hiredis-1.0.2/hiredis-config.cmake.in b/ext/hiredis-1.0.2/hiredis-config.cmake.in new file mode 100644 index 000000000..98851dcee --- /dev/null +++ b/ext/hiredis-1.0.2/hiredis-config.cmake.in @@ -0,0 +1,13 @@ +@PACKAGE_INIT@ + +set_and_check(hiredis_INCLUDEDIR "@PACKAGE_INCLUDE_INSTALL_DIR@") + +IF (NOT TARGET hiredis::hiredis) + INCLUDE(${CMAKE_CURRENT_LIST_DIR}/hiredis-targets.cmake) +ENDIF() + +SET(hiredis_LIBRARIES hiredis::hiredis) +SET(hiredis_INCLUDE_DIRS ${hiredis_INCLUDEDIR}) + +check_required_components(hiredis) + diff --git a/ext/hiredis-1.0.2/hiredis.c b/ext/hiredis-1.0.2/hiredis.c new file mode 100644 index 000000000..ab0e39822 --- /dev/null +++ b/ext/hiredis-1.0.2/hiredis.c @@ -0,0 +1,1174 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2014, Pieter Noordhuis + * Copyright (c) 2015, Matt Stancliff , + * Jan-Erik Rediger + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include +#include +#include +#include +#include + +#include "hiredis.h" +#include "net.h" +#include "sds.h" +#include "async.h" +#include "win32.h" + +extern int redisContextUpdateConnectTimeout(redisContext *c, const struct timeval *timeout); +extern int redisContextUpdateCommandTimeout(redisContext *c, const struct timeval *timeout); + +static redisContextFuncs redisContextDefaultFuncs = { + .free_privctx = NULL, + .async_read = redisAsyncRead, + .async_write = redisAsyncWrite, + .read = redisNetRead, + .write = redisNetWrite +}; + +static redisReply *createReplyObject(int type); +static void *createStringObject(const redisReadTask *task, char *str, size_t len); +static void *createArrayObject(const redisReadTask *task, size_t elements); +static void *createIntegerObject(const redisReadTask *task, long long value); +static void *createDoubleObject(const redisReadTask *task, double value, char *str, size_t len); +static void *createNilObject(const redisReadTask *task); +static void *createBoolObject(const redisReadTask *task, int bval); + +/* Default set of functions to build the reply. Keep in mind that such a + * function returning NULL is interpreted as OOM. */ +static redisReplyObjectFunctions defaultFunctions = { + createStringObject, + createArrayObject, + createIntegerObject, + createDoubleObject, + createNilObject, + createBoolObject, + freeReplyObject +}; + +/* Create a reply object */ +static redisReply *createReplyObject(int type) { + redisReply *r = hi_calloc(1,sizeof(*r)); + + if (r == NULL) + return NULL; + + r->type = type; + return r; +} + +/* Free a reply object */ +void freeReplyObject(void *reply) { + redisReply *r = reply; + size_t j; + + if (r == NULL) + return; + + switch(r->type) { + case REDIS_REPLY_INTEGER: + break; /* Nothing to free */ + case REDIS_REPLY_ARRAY: + case REDIS_REPLY_MAP: + case REDIS_REPLY_SET: + case REDIS_REPLY_PUSH: + if (r->element != NULL) { + for (j = 0; j < r->elements; j++) + freeReplyObject(r->element[j]); + hi_free(r->element); + } + break; + case REDIS_REPLY_ERROR: + case REDIS_REPLY_STATUS: + case REDIS_REPLY_STRING: + case REDIS_REPLY_DOUBLE: + case REDIS_REPLY_VERB: + hi_free(r->str); + break; + } + hi_free(r); +} + +static void *createStringObject(const redisReadTask *task, char *str, size_t len) { + redisReply *r, *parent; + char *buf; + + r = createReplyObject(task->type); + if (r == NULL) + return NULL; + + assert(task->type == REDIS_REPLY_ERROR || + task->type == REDIS_REPLY_STATUS || + task->type == REDIS_REPLY_STRING || + task->type == REDIS_REPLY_VERB); + + /* Copy string value */ + if (task->type == REDIS_REPLY_VERB) { + buf = hi_malloc(len-4+1); /* Skip 4 bytes of verbatim type header. */ + if (buf == NULL) goto oom; + + memcpy(r->vtype,str,3); + r->vtype[3] = '\0'; + memcpy(buf,str+4,len-4); + buf[len-4] = '\0'; + r->len = len - 4; + } else { + buf = hi_malloc(len+1); + if (buf == NULL) goto oom; + + memcpy(buf,str,len); + buf[len] = '\0'; + r->len = len; + } + r->str = buf; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET || + parent->type == REDIS_REPLY_PUSH); + parent->element[task->idx] = r; + } + return r; + +oom: + freeReplyObject(r); + return NULL; +} + +static void *createArrayObject(const redisReadTask *task, size_t elements) { + redisReply *r, *parent; + + r = createReplyObject(task->type); + if (r == NULL) + return NULL; + + if (elements > 0) { + if (SIZE_MAX / sizeof(redisReply*) < elements) return NULL; /* Don't overflow */ + r->element = hi_calloc(elements,sizeof(redisReply*)); + if (r->element == NULL) { + freeReplyObject(r); + return NULL; + } + } + + r->elements = elements; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET || + parent->type == REDIS_REPLY_PUSH); + parent->element[task->idx] = r; + } + return r; +} + +static void *createIntegerObject(const redisReadTask *task, long long value) { + redisReply *r, *parent; + + r = createReplyObject(REDIS_REPLY_INTEGER); + if (r == NULL) + return NULL; + + r->integer = value; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET || + parent->type == REDIS_REPLY_PUSH); + parent->element[task->idx] = r; + } + return r; +} + +static void *createDoubleObject(const redisReadTask *task, double value, char *str, size_t len) { + redisReply *r, *parent; + + r = createReplyObject(REDIS_REPLY_DOUBLE); + if (r == NULL) + return NULL; + + r->dval = value; + r->str = hi_malloc(len+1); + if (r->str == NULL) { + freeReplyObject(r); + return NULL; + } + + /* The double reply also has the original protocol string representing a + * double as a null terminated string. This way the caller does not need + * to format back for string conversion, especially since Redis does efforts + * to make the string more human readable avoiding the calssical double + * decimal string conversion artifacts. */ + memcpy(r->str, str, len); + r->str[len] = '\0'; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET); + parent->element[task->idx] = r; + } + return r; +} + +static void *createNilObject(const redisReadTask *task) { + redisReply *r, *parent; + + r = createReplyObject(REDIS_REPLY_NIL); + if (r == NULL) + return NULL; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET); + parent->element[task->idx] = r; + } + return r; +} + +static void *createBoolObject(const redisReadTask *task, int bval) { + redisReply *r, *parent; + + r = createReplyObject(REDIS_REPLY_BOOL); + if (r == NULL) + return NULL; + + r->integer = bval != 0; + + if (task->parent) { + parent = task->parent->obj; + assert(parent->type == REDIS_REPLY_ARRAY || + parent->type == REDIS_REPLY_MAP || + parent->type == REDIS_REPLY_SET); + parent->element[task->idx] = r; + } + return r; +} + +/* Return the number of digits of 'v' when converted to string in radix 10. + * Implementation borrowed from link in redis/src/util.c:string2ll(). */ +static uint32_t countDigits(uint64_t v) { + uint32_t result = 1; + for (;;) { + if (v < 10) return result; + if (v < 100) return result + 1; + if (v < 1000) return result + 2; + if (v < 10000) return result + 3; + v /= 10000U; + result += 4; + } +} + +/* Helper that calculates the bulk length given a certain string length. */ +static size_t bulklen(size_t len) { + return 1+countDigits(len)+2+len+2; +} + +int redisvFormatCommand(char **target, const char *format, va_list ap) { + const char *c = format; + char *cmd = NULL; /* final command */ + int pos; /* position in final command */ + sds curarg, newarg; /* current argument */ + int touched = 0; /* was the current argument touched? */ + char **curargv = NULL, **newargv = NULL; + int argc = 0; + int totlen = 0; + int error_type = 0; /* 0 = no error; -1 = memory error; -2 = format error */ + int j; + + /* Abort if there is not target to set */ + if (target == NULL) + return -1; + + /* Build the command string accordingly to protocol */ + curarg = sdsempty(); + if (curarg == NULL) + return -1; + + while(*c != '\0') { + if (*c != '%' || c[1] == '\0') { + if (*c == ' ') { + if (touched) { + newargv = hi_realloc(curargv,sizeof(char*)*(argc+1)); + if (newargv == NULL) goto memory_err; + curargv = newargv; + curargv[argc++] = curarg; + totlen += bulklen(sdslen(curarg)); + + /* curarg is put in argv so it can be overwritten. */ + curarg = sdsempty(); + if (curarg == NULL) goto memory_err; + touched = 0; + } + } else { + newarg = sdscatlen(curarg,c,1); + if (newarg == NULL) goto memory_err; + curarg = newarg; + touched = 1; + } + } else { + char *arg; + size_t size; + + /* Set newarg so it can be checked even if it is not touched. */ + newarg = curarg; + + switch(c[1]) { + case 's': + arg = va_arg(ap,char*); + size = strlen(arg); + if (size > 0) + newarg = sdscatlen(curarg,arg,size); + break; + case 'b': + arg = va_arg(ap,char*); + size = va_arg(ap,size_t); + if (size > 0) + newarg = sdscatlen(curarg,arg,size); + break; + case '%': + newarg = sdscat(curarg,"%"); + break; + default: + /* Try to detect printf format */ + { + static const char intfmts[] = "diouxX"; + static const char flags[] = "#0-+ "; + char _format[16]; + const char *_p = c+1; + size_t _l = 0; + va_list _cpy; + + /* Flags */ + while (*_p != '\0' && strchr(flags,*_p) != NULL) _p++; + + /* Field width */ + while (*_p != '\0' && isdigit(*_p)) _p++; + + /* Precision */ + if (*_p == '.') { + _p++; + while (*_p != '\0' && isdigit(*_p)) _p++; + } + + /* Copy va_list before consuming with va_arg */ + va_copy(_cpy,ap); + + /* Integer conversion (without modifiers) */ + if (strchr(intfmts,*_p) != NULL) { + va_arg(ap,int); + goto fmt_valid; + } + + /* Double conversion (without modifiers) */ + if (strchr("eEfFgGaA",*_p) != NULL) { + va_arg(ap,double); + goto fmt_valid; + } + + /* Size: char */ + if (_p[0] == 'h' && _p[1] == 'h') { + _p += 2; + if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { + va_arg(ap,int); /* char gets promoted to int */ + goto fmt_valid; + } + goto fmt_invalid; + } + + /* Size: short */ + if (_p[0] == 'h') { + _p += 1; + if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { + va_arg(ap,int); /* short gets promoted to int */ + goto fmt_valid; + } + goto fmt_invalid; + } + + /* Size: long long */ + if (_p[0] == 'l' && _p[1] == 'l') { + _p += 2; + if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { + va_arg(ap,long long); + goto fmt_valid; + } + goto fmt_invalid; + } + + /* Size: long */ + if (_p[0] == 'l') { + _p += 1; + if (*_p != '\0' && strchr(intfmts,*_p) != NULL) { + va_arg(ap,long); + goto fmt_valid; + } + goto fmt_invalid; + } + + fmt_invalid: + va_end(_cpy); + goto format_err; + + fmt_valid: + _l = (_p+1)-c; + if (_l < sizeof(_format)-2) { + memcpy(_format,c,_l); + _format[_l] = '\0'; + newarg = sdscatvprintf(curarg,_format,_cpy); + + /* Update current position (note: outer blocks + * increment c twice so compensate here) */ + c = _p-1; + } + + va_end(_cpy); + break; + } + } + + if (newarg == NULL) goto memory_err; + curarg = newarg; + + touched = 1; + c++; + } + c++; + } + + /* Add the last argument if needed */ + if (touched) { + newargv = hi_realloc(curargv,sizeof(char*)*(argc+1)); + if (newargv == NULL) goto memory_err; + curargv = newargv; + curargv[argc++] = curarg; + totlen += bulklen(sdslen(curarg)); + } else { + sdsfree(curarg); + } + + /* Clear curarg because it was put in curargv or was free'd. */ + curarg = NULL; + + /* Add bytes needed to hold multi bulk count */ + totlen += 1+countDigits(argc)+2; + + /* Build the command at protocol level */ + cmd = hi_malloc(totlen+1); + if (cmd == NULL) goto memory_err; + + pos = sprintf(cmd,"*%d\r\n",argc); + for (j = 0; j < argc; j++) { + pos += sprintf(cmd+pos,"$%zu\r\n",sdslen(curargv[j])); + memcpy(cmd+pos,curargv[j],sdslen(curargv[j])); + pos += sdslen(curargv[j]); + sdsfree(curargv[j]); + cmd[pos++] = '\r'; + cmd[pos++] = '\n'; + } + assert(pos == totlen); + cmd[pos] = '\0'; + + hi_free(curargv); + *target = cmd; + return totlen; + +format_err: + error_type = -2; + goto cleanup; + +memory_err: + error_type = -1; + goto cleanup; + +cleanup: + if (curargv) { + while(argc--) + sdsfree(curargv[argc]); + hi_free(curargv); + } + + sdsfree(curarg); + hi_free(cmd); + + return error_type; +} + +/* Format a command according to the Redis protocol. This function + * takes a format similar to printf: + * + * %s represents a C null terminated string you want to interpolate + * %b represents a binary safe string + * + * When using %b you need to provide both the pointer to the string + * and the length in bytes as a size_t. Examples: + * + * len = redisFormatCommand(target, "GET %s", mykey); + * len = redisFormatCommand(target, "SET %s %b", mykey, myval, myvallen); + */ +int redisFormatCommand(char **target, const char *format, ...) { + va_list ap; + int len; + va_start(ap,format); + len = redisvFormatCommand(target,format,ap); + va_end(ap); + + /* The API says "-1" means bad result, but we now also return "-2" in some + * cases. Force the return value to always be -1. */ + if (len < 0) + len = -1; + + return len; +} + +/* Format a command according to the Redis protocol using an sds string and + * sdscatfmt for the processing of arguments. This function takes the + * number of arguments, an array with arguments and an array with their + * lengths. If the latter is set to NULL, strlen will be used to compute the + * argument lengths. + */ +int redisFormatSdsCommandArgv(sds *target, int argc, const char **argv, + const size_t *argvlen) +{ + sds cmd, aux; + unsigned long long totlen; + int j; + size_t len; + + /* Abort on a NULL target */ + if (target == NULL) + return -1; + + /* Calculate our total size */ + totlen = 1+countDigits(argc)+2; + for (j = 0; j < argc; j++) { + len = argvlen ? argvlen[j] : strlen(argv[j]); + totlen += bulklen(len); + } + + /* Use an SDS string for command construction */ + cmd = sdsempty(); + if (cmd == NULL) + return -1; + + /* We already know how much storage we need */ + aux = sdsMakeRoomFor(cmd, totlen); + if (aux == NULL) { + sdsfree(cmd); + return -1; + } + + cmd = aux; + + /* Construct command */ + cmd = sdscatfmt(cmd, "*%i\r\n", argc); + for (j=0; j < argc; j++) { + len = argvlen ? argvlen[j] : strlen(argv[j]); + cmd = sdscatfmt(cmd, "$%u\r\n", len); + cmd = sdscatlen(cmd, argv[j], len); + cmd = sdscatlen(cmd, "\r\n", sizeof("\r\n")-1); + } + + assert(sdslen(cmd)==totlen); + + *target = cmd; + return totlen; +} + +void redisFreeSdsCommand(sds cmd) { + sdsfree(cmd); +} + +/* Format a command according to the Redis protocol. This function takes the + * number of arguments, an array with arguments and an array with their + * lengths. If the latter is set to NULL, strlen will be used to compute the + * argument lengths. + */ +int redisFormatCommandArgv(char **target, int argc, const char **argv, const size_t *argvlen) { + char *cmd = NULL; /* final command */ + int pos; /* position in final command */ + size_t len; + int totlen, j; + + /* Abort on a NULL target */ + if (target == NULL) + return -1; + + /* Calculate number of bytes needed for the command */ + totlen = 1+countDigits(argc)+2; + for (j = 0; j < argc; j++) { + len = argvlen ? argvlen[j] : strlen(argv[j]); + totlen += bulklen(len); + } + + /* Build the command at protocol level */ + cmd = hi_malloc(totlen+1); + if (cmd == NULL) + return -1; + + pos = sprintf(cmd,"*%d\r\n",argc); + for (j = 0; j < argc; j++) { + len = argvlen ? argvlen[j] : strlen(argv[j]); + pos += sprintf(cmd+pos,"$%zu\r\n",len); + memcpy(cmd+pos,argv[j],len); + pos += len; + cmd[pos++] = '\r'; + cmd[pos++] = '\n'; + } + assert(pos == totlen); + cmd[pos] = '\0'; + + *target = cmd; + return totlen; +} + +void redisFreeCommand(char *cmd) { + hi_free(cmd); +} + +void __redisSetError(redisContext *c, int type, const char *str) { + size_t len; + + c->err = type; + if (str != NULL) { + len = strlen(str); + len = len < (sizeof(c->errstr)-1) ? len : (sizeof(c->errstr)-1); + memcpy(c->errstr,str,len); + c->errstr[len] = '\0'; + } else { + /* Only REDIS_ERR_IO may lack a description! */ + assert(type == REDIS_ERR_IO); + strerror_r(errno, c->errstr, sizeof(c->errstr)); + } +} + +redisReader *redisReaderCreate(void) { + return redisReaderCreateWithFunctions(&defaultFunctions); +} + +static void redisPushAutoFree(void *privdata, void *reply) { + (void)privdata; + freeReplyObject(reply); +} + +static redisContext *redisContextInit(void) { + redisContext *c; + + c = hi_calloc(1, sizeof(*c)); + if (c == NULL) + return NULL; + + c->funcs = &redisContextDefaultFuncs; + + c->obuf = sdsempty(); + c->reader = redisReaderCreate(); + c->fd = REDIS_INVALID_FD; + + if (c->obuf == NULL || c->reader == NULL) { + redisFree(c); + return NULL; + } + + return c; +} + +void redisFree(redisContext *c) { + if (c == NULL) + return; + redisNetClose(c); + + sdsfree(c->obuf); + redisReaderFree(c->reader); + hi_free(c->tcp.host); + hi_free(c->tcp.source_addr); + hi_free(c->unix_sock.path); + hi_free(c->connect_timeout); + hi_free(c->command_timeout); + hi_free(c->saddr); + + if (c->privdata && c->free_privdata) + c->free_privdata(c->privdata); + + if (c->funcs->free_privctx) + c->funcs->free_privctx(c->privctx); + + memset(c, 0xff, sizeof(*c)); + hi_free(c); +} + +redisFD redisFreeKeepFd(redisContext *c) { + redisFD fd = c->fd; + c->fd = REDIS_INVALID_FD; + redisFree(c); + return fd; +} + +int redisReconnect(redisContext *c) { + c->err = 0; + memset(c->errstr, '\0', strlen(c->errstr)); + + if (c->privctx && c->funcs->free_privctx) { + c->funcs->free_privctx(c->privctx); + c->privctx = NULL; + } + + redisNetClose(c); + + sdsfree(c->obuf); + redisReaderFree(c->reader); + + c->obuf = sdsempty(); + c->reader = redisReaderCreate(); + + if (c->obuf == NULL || c->reader == NULL) { + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); + return REDIS_ERR; + } + + int ret = REDIS_ERR; + if (c->connection_type == REDIS_CONN_TCP) { + ret = redisContextConnectBindTcp(c, c->tcp.host, c->tcp.port, + c->connect_timeout, c->tcp.source_addr); + } else if (c->connection_type == REDIS_CONN_UNIX) { + ret = redisContextConnectUnix(c, c->unix_sock.path, c->connect_timeout); + } else { + /* Something bad happened here and shouldn't have. There isn't + enough information in the context to reconnect. */ + __redisSetError(c,REDIS_ERR_OTHER,"Not enough information to reconnect"); + ret = REDIS_ERR; + } + + if (c->command_timeout != NULL && (c->flags & REDIS_BLOCK) && c->fd != REDIS_INVALID_FD) { + redisContextSetTimeout(c, *c->command_timeout); + } + + return ret; +} + +redisContext *redisConnectWithOptions(const redisOptions *options) { + redisContext *c = redisContextInit(); + if (c == NULL) { + return NULL; + } + if (!(options->options & REDIS_OPT_NONBLOCK)) { + c->flags |= REDIS_BLOCK; + } + if (options->options & REDIS_OPT_REUSEADDR) { + c->flags |= REDIS_REUSEADDR; + } + if (options->options & REDIS_OPT_NOAUTOFREE) { + c->flags |= REDIS_NO_AUTO_FREE; + } + + /* Set any user supplied RESP3 PUSH handler or use freeReplyObject + * as a default unless specifically flagged that we don't want one. */ + if (options->push_cb != NULL) + redisSetPushCallback(c, options->push_cb); + else if (!(options->options & REDIS_OPT_NO_PUSH_AUTOFREE)) + redisSetPushCallback(c, redisPushAutoFree); + + c->privdata = options->privdata; + c->free_privdata = options->free_privdata; + + if (redisContextUpdateConnectTimeout(c, options->connect_timeout) != REDIS_OK || + redisContextUpdateCommandTimeout(c, options->command_timeout) != REDIS_OK) { + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); + return c; + } + + if (options->type == REDIS_CONN_TCP) { + redisContextConnectBindTcp(c, options->endpoint.tcp.ip, + options->endpoint.tcp.port, options->connect_timeout, + options->endpoint.tcp.source_addr); + } else if (options->type == REDIS_CONN_UNIX) { + redisContextConnectUnix(c, options->endpoint.unix_socket, + options->connect_timeout); + } else if (options->type == REDIS_CONN_USERFD) { + c->fd = options->endpoint.fd; + c->flags |= REDIS_CONNECTED; + } else { + // Unknown type - FIXME - FREE + return NULL; + } + + if (options->command_timeout != NULL && (c->flags & REDIS_BLOCK) && c->fd != REDIS_INVALID_FD) { + redisContextSetTimeout(c, *options->command_timeout); + } + + return c; +} + +/* Connect to a Redis instance. On error the field error in the returned + * context will be set to the return value of the error function. + * When no set of reply functions is given, the default set will be used. */ +redisContext *redisConnect(const char *ip, int port) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectWithTimeout(const char *ip, int port, const struct timeval tv) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.connect_timeout = &tv; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectNonBlock(const char *ip, int port) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.options |= REDIS_OPT_NONBLOCK; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectBindNonBlock(const char *ip, int port, + const char *source_addr) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.endpoint.tcp.source_addr = source_addr; + options.options |= REDIS_OPT_NONBLOCK; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectBindNonBlockWithReuse(const char *ip, int port, + const char *source_addr) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, ip, port); + options.endpoint.tcp.source_addr = source_addr; + options.options |= REDIS_OPT_NONBLOCK|REDIS_OPT_REUSEADDR; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectUnix(const char *path) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_UNIX(&options, path); + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectUnixWithTimeout(const char *path, const struct timeval tv) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_UNIX(&options, path); + options.connect_timeout = &tv; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectUnixNonBlock(const char *path) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_UNIX(&options, path); + options.options |= REDIS_OPT_NONBLOCK; + return redisConnectWithOptions(&options); +} + +redisContext *redisConnectFd(redisFD fd) { + redisOptions options = {0}; + options.type = REDIS_CONN_USERFD; + options.endpoint.fd = fd; + return redisConnectWithOptions(&options); +} + +/* Set read/write timeout on a blocking socket. */ +int redisSetTimeout(redisContext *c, const struct timeval tv) { + if (c->flags & REDIS_BLOCK) + return redisContextSetTimeout(c,tv); + return REDIS_ERR; +} + +/* Enable connection KeepAlive. */ +int redisEnableKeepAlive(redisContext *c) { + if (redisKeepAlive(c, REDIS_KEEPALIVE_INTERVAL) != REDIS_OK) + return REDIS_ERR; + return REDIS_OK; +} + +/* Set a user provided RESP3 PUSH handler and return any old one set. */ +redisPushFn *redisSetPushCallback(redisContext *c, redisPushFn *fn) { + redisPushFn *old = c->push_cb; + c->push_cb = fn; + return old; +} + +/* Use this function to handle a read event on the descriptor. It will try + * and read some bytes from the socket and feed them to the reply parser. + * + * After this function is called, you may use redisGetReplyFromReader to + * see if there is a reply available. */ +int redisBufferRead(redisContext *c) { + char buf[1024*16]; + int nread; + + /* Return early when the context has seen an error. */ + if (c->err) + return REDIS_ERR; + + nread = c->funcs->read(c, buf, sizeof(buf)); + if (nread > 0) { + if (redisReaderFeed(c->reader, buf, nread) != REDIS_OK) { + __redisSetError(c, c->reader->err, c->reader->errstr); + return REDIS_ERR; + } else { + } + } else if (nread < 0) { + return REDIS_ERR; + } + return REDIS_OK; +} + +/* Write the output buffer to the socket. + * + * Returns REDIS_OK when the buffer is empty, or (a part of) the buffer was + * successfully written to the socket. When the buffer is empty after the + * write operation, "done" is set to 1 (if given). + * + * Returns REDIS_ERR if an error occurred trying to write and sets + * c->errstr to hold the appropriate error string. + */ +int redisBufferWrite(redisContext *c, int *done) { + + /* Return early when the context has seen an error. */ + if (c->err) + return REDIS_ERR; + + if (sdslen(c->obuf) > 0) { + ssize_t nwritten = c->funcs->write(c); + if (nwritten < 0) { + return REDIS_ERR; + } else if (nwritten > 0) { + if (nwritten == (ssize_t)sdslen(c->obuf)) { + sdsfree(c->obuf); + c->obuf = sdsempty(); + if (c->obuf == NULL) + goto oom; + } else { + if (sdsrange(c->obuf,nwritten,-1) < 0) goto oom; + } + } + } + if (done != NULL) *done = (sdslen(c->obuf) == 0); + return REDIS_OK; + +oom: + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); + return REDIS_ERR; +} + +/* Internal helper function to try and get a reply from the reader, + * or set an error in the context otherwise. */ +int redisGetReplyFromReader(redisContext *c, void **reply) { + if (redisReaderGetReply(c->reader,reply) == REDIS_ERR) { + __redisSetError(c,c->reader->err,c->reader->errstr); + return REDIS_ERR; + } + + return REDIS_OK; +} + +/* Internal helper that returns 1 if the reply was a RESP3 PUSH + * message and we handled it with a user-provided callback. */ +static int redisHandledPushReply(redisContext *c, void *reply) { + if (reply && c->push_cb && redisIsPushReply(reply)) { + c->push_cb(c->privdata, reply); + return 1; + } + + return 0; +} + +int redisGetReply(redisContext *c, void **reply) { + int wdone = 0; + void *aux = NULL; + + /* Try to read pending replies */ + if (redisGetReplyFromReader(c,&aux) == REDIS_ERR) + return REDIS_ERR; + + /* For the blocking context, flush output buffer and read reply */ + if (aux == NULL && c->flags & REDIS_BLOCK) { + /* Write until done */ + do { + if (redisBufferWrite(c,&wdone) == REDIS_ERR) + return REDIS_ERR; + } while (!wdone); + + /* Read until there is a reply */ + do { + if (redisBufferRead(c) == REDIS_ERR) + return REDIS_ERR; + + /* We loop here in case the user has specified a RESP3 + * PUSH handler (e.g. for client tracking). */ + do { + if (redisGetReplyFromReader(c,&aux) == REDIS_ERR) + return REDIS_ERR; + } while (redisHandledPushReply(c, aux)); + } while (aux == NULL); + } + + /* Set reply or free it if we were passed NULL */ + if (reply != NULL) { + *reply = aux; + } else { + freeReplyObject(aux); + } + + return REDIS_OK; +} + + +/* Helper function for the redisAppendCommand* family of functions. + * + * Write a formatted command to the output buffer. When this family + * is used, you need to call redisGetReply yourself to retrieve + * the reply (or replies in pub/sub). + */ +int __redisAppendCommand(redisContext *c, const char *cmd, size_t len) { + sds newbuf; + + newbuf = sdscatlen(c->obuf,cmd,len); + if (newbuf == NULL) { + __redisSetError(c,REDIS_ERR_OOM,"Out of memory"); + return REDIS_ERR; + } + + c->obuf = newbuf; + return REDIS_OK; +} + +int redisAppendFormattedCommand(redisContext *c, const char *cmd, size_t len) { + + if (__redisAppendCommand(c, cmd, len) != REDIS_OK) { + return REDIS_ERR; + } + + return REDIS_OK; +} + +int redisvAppendCommand(redisContext *c, const char *format, va_list ap) { + char *cmd; + int len; + + len = redisvFormatCommand(&cmd,format,ap); + if (len == -1) { + __redisSetError(c,REDIS_ERR_OOM,"Out of memory"); + return REDIS_ERR; + } else if (len == -2) { + __redisSetError(c,REDIS_ERR_OTHER,"Invalid format string"); + return REDIS_ERR; + } + + if (__redisAppendCommand(c,cmd,len) != REDIS_OK) { + hi_free(cmd); + return REDIS_ERR; + } + + hi_free(cmd); + return REDIS_OK; +} + +int redisAppendCommand(redisContext *c, const char *format, ...) { + va_list ap; + int ret; + + va_start(ap,format); + ret = redisvAppendCommand(c,format,ap); + va_end(ap); + return ret; +} + +int redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen) { + sds cmd; + int len; + + len = redisFormatSdsCommandArgv(&cmd,argc,argv,argvlen); + if (len == -1) { + __redisSetError(c,REDIS_ERR_OOM,"Out of memory"); + return REDIS_ERR; + } + + if (__redisAppendCommand(c,cmd,len) != REDIS_OK) { + sdsfree(cmd); + return REDIS_ERR; + } + + sdsfree(cmd); + return REDIS_OK; +} + +/* Helper function for the redisCommand* family of functions. + * + * Write a formatted command to the output buffer. If the given context is + * blocking, immediately read the reply into the "reply" pointer. When the + * context is non-blocking, the "reply" pointer will not be used and the + * command is simply appended to the write buffer. + * + * Returns the reply when a reply was successfully retrieved. Returns NULL + * otherwise. When NULL is returned in a blocking context, the error field + * in the context will be set. + */ +static void *__redisBlockForReply(redisContext *c) { + void *reply; + + if (c->flags & REDIS_BLOCK) { + if (redisGetReply(c,&reply) != REDIS_OK) + return NULL; + return reply; + } + return NULL; +} + +void *redisvCommand(redisContext *c, const char *format, va_list ap) { + if (redisvAppendCommand(c,format,ap) != REDIS_OK) + return NULL; + return __redisBlockForReply(c); +} + +void *redisCommand(redisContext *c, const char *format, ...) { + va_list ap; + va_start(ap,format); + void *reply = redisvCommand(c,format,ap); + va_end(ap); + return reply; +} + +void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen) { + if (redisAppendCommandArgv(c,argc,argv,argvlen) != REDIS_OK) + return NULL; + return __redisBlockForReply(c); +} diff --git a/ext/hiredis-1.0.2/hiredis.h b/ext/hiredis-1.0.2/hiredis.h new file mode 100644 index 000000000..3bc46d992 --- /dev/null +++ b/ext/hiredis-1.0.2/hiredis.h @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2014, Pieter Noordhuis + * Copyright (c) 2015, Matt Stancliff , + * Jan-Erik Rediger + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_H +#define __HIREDIS_H +#include "read.h" +#include /* for va_list */ +#ifndef _MSC_VER +#include /* for struct timeval */ +#else +struct timeval; /* forward declaration */ +typedef long long ssize_t; +#endif +#include /* uintXX_t, etc */ +#include "sds.h" /* for sds */ +#include "alloc.h" /* for allocation wrappers */ + +#define HIREDIS_MAJOR 1 +#define HIREDIS_MINOR 0 +#define HIREDIS_PATCH 2 +#define HIREDIS_SONAME 1.0.0 + +/* Connection type can be blocking or non-blocking and is set in the + * least significant bit of the flags field in redisContext. */ +#define REDIS_BLOCK 0x1 + +/* Connection may be disconnected before being free'd. The second bit + * in the flags field is set when the context is connected. */ +#define REDIS_CONNECTED 0x2 + +/* The async API might try to disconnect cleanly and flush the output + * buffer and read all subsequent replies before disconnecting. + * This flag means no new commands can come in and the connection + * should be terminated once all replies have been read. */ +#define REDIS_DISCONNECTING 0x4 + +/* Flag specific to the async API which means that the context should be clean + * up as soon as possible. */ +#define REDIS_FREEING 0x8 + +/* Flag that is set when an async callback is executed. */ +#define REDIS_IN_CALLBACK 0x10 + +/* Flag that is set when the async context has one or more subscriptions. */ +#define REDIS_SUBSCRIBED 0x20 + +/* Flag that is set when monitor mode is active */ +#define REDIS_MONITORING 0x40 + +/* Flag that is set when we should set SO_REUSEADDR before calling bind() */ +#define REDIS_REUSEADDR 0x80 + +/** + * Flag that indicates the user does not want the context to + * be automatically freed upon error + */ +#define REDIS_NO_AUTO_FREE 0x200 + +#define REDIS_KEEPALIVE_INTERVAL 15 /* seconds */ + +/* number of times we retry to connect in the case of EADDRNOTAVAIL and + * SO_REUSEADDR is being used. */ +#define REDIS_CONNECT_RETRIES 10 + +/* Forward declarations for structs defined elsewhere */ +struct redisAsyncContext; +struct redisContext; + +/* RESP3 push helpers and callback prototypes */ +#define redisIsPushReply(r) (((redisReply*)(r))->type == REDIS_REPLY_PUSH) +typedef void (redisPushFn)(void *, void *); +typedef void (redisAsyncPushFn)(struct redisAsyncContext *, void *); + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is the reply object returned by redisCommand() */ +typedef struct redisReply { + int type; /* REDIS_REPLY_* */ + long long integer; /* The integer when type is REDIS_REPLY_INTEGER */ + double dval; /* The double when type is REDIS_REPLY_DOUBLE */ + size_t len; /* Length of string */ + char *str; /* Used for REDIS_REPLY_ERROR, REDIS_REPLY_STRING + REDIS_REPLY_VERB, and REDIS_REPLY_DOUBLE (in additional to dval). */ + char vtype[4]; /* Used for REDIS_REPLY_VERB, contains the null + terminated 3 character content type, such as "txt". */ + size_t elements; /* number of elements, for REDIS_REPLY_ARRAY */ + struct redisReply **element; /* elements vector for REDIS_REPLY_ARRAY */ +} redisReply; + +redisReader *redisReaderCreate(void); + +/* Function to free the reply objects hiredis returns by default. */ +void freeReplyObject(void *reply); + +/* Functions to format a command according to the protocol. */ +int redisvFormatCommand(char **target, const char *format, va_list ap); +int redisFormatCommand(char **target, const char *format, ...); +int redisFormatCommandArgv(char **target, int argc, const char **argv, const size_t *argvlen); +int redisFormatSdsCommandArgv(sds *target, int argc, const char ** argv, const size_t *argvlen); +void redisFreeCommand(char *cmd); +void redisFreeSdsCommand(sds cmd); + +enum redisConnectionType { + REDIS_CONN_TCP, + REDIS_CONN_UNIX, + REDIS_CONN_USERFD +}; + +struct redisSsl; + +#define REDIS_OPT_NONBLOCK 0x01 +#define REDIS_OPT_REUSEADDR 0x02 + +/** + * Don't automatically free the async object on a connection failure, + * or other implicit conditions. Only free on an explicit call to disconnect() or free() + */ +#define REDIS_OPT_NOAUTOFREE 0x04 + +/* Don't automatically intercept and free RESP3 PUSH replies. */ +#define REDIS_OPT_NO_PUSH_AUTOFREE 0x08 + +/* In Unix systems a file descriptor is a regular signed int, with -1 + * representing an invalid descriptor. In Windows it is a SOCKET + * (32- or 64-bit unsigned integer depending on the architecture), where + * all bits set (~0) is INVALID_SOCKET. */ +#ifndef _WIN32 +typedef int redisFD; +#define REDIS_INVALID_FD -1 +#else +#ifdef _WIN64 +typedef unsigned long long redisFD; /* SOCKET = 64-bit UINT_PTR */ +#else +typedef unsigned long redisFD; /* SOCKET = 32-bit UINT_PTR */ +#endif +#define REDIS_INVALID_FD ((redisFD)(~0)) /* INVALID_SOCKET */ +#endif + +typedef struct { + /* + * the type of connection to use. This also indicates which + * `endpoint` member field to use + */ + int type; + /* bit field of REDIS_OPT_xxx */ + int options; + /* timeout value for connect operation. If NULL, no timeout is used */ + const struct timeval *connect_timeout; + /* timeout value for commands. If NULL, no timeout is used. This can be + * updated at runtime with redisSetTimeout/redisAsyncSetTimeout. */ + const struct timeval *command_timeout; + union { + /** use this field for tcp/ip connections */ + struct { + const char *source_addr; + const char *ip; + int port; + } tcp; + /** use this field for unix domain sockets */ + const char *unix_socket; + /** + * use this field to have hiredis operate an already-open + * file descriptor */ + redisFD fd; + } endpoint; + + /* Optional user defined data/destructor */ + void *privdata; + void (*free_privdata)(void *); + + /* A user defined PUSH message callback */ + redisPushFn *push_cb; + redisAsyncPushFn *async_push_cb; +} redisOptions; + +/** + * Helper macros to initialize options to their specified fields. + */ +#define REDIS_OPTIONS_SET_TCP(opts, ip_, port_) \ + (opts)->type = REDIS_CONN_TCP; \ + (opts)->endpoint.tcp.ip = ip_; \ + (opts)->endpoint.tcp.port = port_; + +#define REDIS_OPTIONS_SET_UNIX(opts, path) \ + (opts)->type = REDIS_CONN_UNIX; \ + (opts)->endpoint.unix_socket = path; + +#define REDIS_OPTIONS_SET_PRIVDATA(opts, data, dtor) \ + (opts)->privdata = data; \ + (opts)->free_privdata = dtor; \ + +typedef struct redisContextFuncs { + void (*free_privctx)(void *); + void (*async_read)(struct redisAsyncContext *); + void (*async_write)(struct redisAsyncContext *); + ssize_t (*read)(struct redisContext *, char *, size_t); + ssize_t (*write)(struct redisContext *); +} redisContextFuncs; + +/* Context for a connection to Redis */ +typedef struct redisContext { + const redisContextFuncs *funcs; /* Function table */ + + int err; /* Error flags, 0 when there is no error */ + char errstr[128]; /* String representation of error when applicable */ + redisFD fd; + int flags; + char *obuf; /* Write buffer */ + redisReader *reader; /* Protocol reader */ + + enum redisConnectionType connection_type; + struct timeval *connect_timeout; + struct timeval *command_timeout; + + struct { + char *host; + char *source_addr; + int port; + } tcp; + + struct { + char *path; + } unix_sock; + + /* For non-blocking connect */ + struct sockadr *saddr; + size_t addrlen; + + /* Optional data and corresponding destructor users can use to provide + * context to a given redisContext. Not used by hiredis. */ + void *privdata; + void (*free_privdata)(void *); + + /* Internal context pointer presently used by hiredis to manage + * SSL connections. */ + void *privctx; + + /* An optional RESP3 PUSH handler */ + redisPushFn *push_cb; +} redisContext; + +redisContext *redisConnectWithOptions(const redisOptions *options); +redisContext *redisConnect(const char *ip, int port); +redisContext *redisConnectWithTimeout(const char *ip, int port, const struct timeval tv); +redisContext *redisConnectNonBlock(const char *ip, int port); +redisContext *redisConnectBindNonBlock(const char *ip, int port, + const char *source_addr); +redisContext *redisConnectBindNonBlockWithReuse(const char *ip, int port, + const char *source_addr); +redisContext *redisConnectUnix(const char *path); +redisContext *redisConnectUnixWithTimeout(const char *path, const struct timeval tv); +redisContext *redisConnectUnixNonBlock(const char *path); +redisContext *redisConnectFd(redisFD fd); + +/** + * Reconnect the given context using the saved information. + * + * This re-uses the exact same connect options as in the initial connection. + * host, ip (or path), timeout and bind address are reused, + * flags are used unmodified from the existing context. + * + * Returns REDIS_OK on successful connect or REDIS_ERR otherwise. + */ +int redisReconnect(redisContext *c); + +redisPushFn *redisSetPushCallback(redisContext *c, redisPushFn *fn); +int redisSetTimeout(redisContext *c, const struct timeval tv); +int redisEnableKeepAlive(redisContext *c); +void redisFree(redisContext *c); +redisFD redisFreeKeepFd(redisContext *c); +int redisBufferRead(redisContext *c); +int redisBufferWrite(redisContext *c, int *done); + +/* In a blocking context, this function first checks if there are unconsumed + * replies to return and returns one if so. Otherwise, it flushes the output + * buffer to the socket and reads until it has a reply. In a non-blocking + * context, it will return unconsumed replies until there are no more. */ +int redisGetReply(redisContext *c, void **reply); +int redisGetReplyFromReader(redisContext *c, void **reply); + +/* Write a formatted command to the output buffer. Use these functions in blocking mode + * to get a pipeline of commands. */ +int redisAppendFormattedCommand(redisContext *c, const char *cmd, size_t len); + +/* Write a command to the output buffer. Use these functions in blocking mode + * to get a pipeline of commands. */ +int redisvAppendCommand(redisContext *c, const char *format, va_list ap); +int redisAppendCommand(redisContext *c, const char *format, ...); +int redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); + +/* Issue a command to Redis. In a blocking context, it is identical to calling + * redisAppendCommand, followed by redisGetReply. The function will return + * NULL if there was an error in performing the request, otherwise it will + * return the reply. In a non-blocking context, it is identical to calling + * only redisAppendCommand and will always return NULL. */ +void *redisvCommand(redisContext *c, const char *format, va_list ap); +void *redisCommand(redisContext *c, const char *format, ...); +void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ext/hiredis-1.0.2/hiredis.pc.in b/ext/hiredis-1.0.2/hiredis.pc.in new file mode 100644 index 000000000..91b773183 --- /dev/null +++ b/ext/hiredis-1.0.2/hiredis.pc.in @@ -0,0 +1,12 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +install_libdir=@CMAKE_INSTALL_LIBDIR@ +exec_prefix=${prefix} +libdir=${exec_prefix}/${install_libdir} +includedir=${prefix}/include +pkgincludedir=${includedir}/hiredis + +Name: hiredis +Description: Minimalistic C client library for Redis. +Version: @PROJECT_VERSION@ +Libs: -L${libdir} -lhiredis +Cflags: -I${pkgincludedir} -D_FILE_OFFSET_BITS=64 diff --git a/ext/hiredis-1.0.2/hiredis_ssl-config.cmake.in b/ext/hiredis-1.0.2/hiredis_ssl-config.cmake.in new file mode 100644 index 000000000..9a283dfc2 --- /dev/null +++ b/ext/hiredis-1.0.2/hiredis_ssl-config.cmake.in @@ -0,0 +1,13 @@ +@PACKAGE_INIT@ + +set_and_check(hiredis_ssl_INCLUDEDIR "@PACKAGE_INCLUDE_INSTALL_DIR@") + +IF (NOT TARGET hiredis::hiredis_ssl) + INCLUDE(${CMAKE_CURRENT_LIST_DIR}/hiredis_ssl-targets.cmake) +ENDIF() + +SET(hiredis_ssl_LIBRARIES hiredis::hiredis_ssl) +SET(hiredis_ssl_INCLUDE_DIRS ${hiredis_ssl_INCLUDEDIR}) + +check_required_components(hiredis_ssl) + diff --git a/ext/hiredis-1.0.2/hiredis_ssl.h b/ext/hiredis-1.0.2/hiredis_ssl.h new file mode 100644 index 000000000..604efe0c1 --- /dev/null +++ b/ext/hiredis-1.0.2/hiredis_ssl.h @@ -0,0 +1,127 @@ + +/* + * Copyright (c) 2019, Redis Labs + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_SSL_H +#define __HIREDIS_SSL_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is the underlying struct for SSL in ssl.h, which is not included to + * keep build dependencies short here. + */ +struct ssl_st; + +/* A wrapper around OpenSSL SSL_CTX to allow easy SSL use without directly + * calling OpenSSL. + */ +typedef struct redisSSLContext redisSSLContext; + +/** + * Initialization errors that redisCreateSSLContext() may return. + */ + +typedef enum { + REDIS_SSL_CTX_NONE = 0, /* No Error */ + REDIS_SSL_CTX_CREATE_FAILED, /* Failed to create OpenSSL SSL_CTX */ + REDIS_SSL_CTX_CERT_KEY_REQUIRED, /* Client cert and key must both be specified or skipped */ + REDIS_SSL_CTX_CA_CERT_LOAD_FAILED, /* Failed to load CA Certificate or CA Path */ + REDIS_SSL_CTX_CLIENT_CERT_LOAD_FAILED, /* Failed to load client certificate */ + REDIS_SSL_CTX_PRIVATE_KEY_LOAD_FAILED /* Failed to load private key */ +} redisSSLContextError; + +/** + * Return the error message corresponding with the specified error code. + */ + +const char *redisSSLContextGetError(redisSSLContextError error); + +/** + * Helper function to initialize the OpenSSL library. + * + * OpenSSL requires one-time initialization before it can be used. Callers should + * call this function only once, and only if OpenSSL is not directly initialized + * elsewhere. + */ +int redisInitOpenSSL(void); + +/** + * Helper function to initialize an OpenSSL context that can be used + * to initiate SSL connections. + * + * cacert_filename is an optional name of a CA certificate/bundle file to load + * and use for validation. + * + * capath is an optional directory path where trusted CA certificate files are + * stored in an OpenSSL-compatible structure. + * + * cert_filename and private_key_filename are optional names of a client side + * certificate and private key files to use for authentication. They need to + * be both specified or omitted. + * + * server_name is an optional and will be used as a server name indication + * (SNI) TLS extension. + * + * If error is non-null, it will be populated in case the context creation fails + * (returning a NULL). + */ + +redisSSLContext *redisCreateSSLContext(const char *cacert_filename, const char *capath, + const char *cert_filename, const char *private_key_filename, + const char *server_name, redisSSLContextError *error); + +/** + * Free a previously created OpenSSL context. + */ +void redisFreeSSLContext(redisSSLContext *redis_ssl_ctx); + +/** + * Initiate SSL on an existing redisContext. + * + * This is similar to redisInitiateSSL() but does not require the caller + * to directly interact with OpenSSL, and instead uses a redisSSLContext + * previously created using redisCreateSSLContext(). + */ + +int redisInitiateSSLWithContext(redisContext *c, redisSSLContext *redis_ssl_ctx); + +/** + * Initiate SSL/TLS negotiation on a provided OpenSSL SSL object. + */ + +int redisInitiateSSL(redisContext *c, struct ssl_st *ssl); + +#ifdef __cplusplus +} +#endif + +#endif /* __HIREDIS_SSL_H */ diff --git a/ext/hiredis-1.0.2/hiredis_ssl.pc.in b/ext/hiredis-1.0.2/hiredis_ssl.pc.in new file mode 100644 index 000000000..588a978a5 --- /dev/null +++ b/ext/hiredis-1.0.2/hiredis_ssl.pc.in @@ -0,0 +1,12 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=${prefix} +libdir=${exec_prefix}/lib +includedir=${prefix}/include +pkgincludedir=${includedir}/hiredis + +Name: hiredis_ssl +Description: SSL Support for hiredis. +Version: @PROJECT_VERSION@ +Requires: hiredis +Libs: -L${libdir} -lhiredis_ssl +Libs.private: -lssl -lcrypto diff --git a/ext/hiredis-1.0.2/include/hiredis/alloc.h b/ext/hiredis-1.0.2/include/hiredis/alloc.h new file mode 100644 index 000000000..34a05f49f --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/alloc.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2020, Michael Grunder + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HIREDIS_ALLOC_H +#define HIREDIS_ALLOC_H + +#include /* for size_t */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Structure pointing to our actually configured allocators */ +typedef struct hiredisAllocFuncs { + void *(*mallocFn)(size_t); + void *(*callocFn)(size_t,size_t); + void *(*reallocFn)(void*,size_t); + char *(*strdupFn)(const char*); + void (*freeFn)(void*); +} hiredisAllocFuncs; + +hiredisAllocFuncs hiredisSetAllocators(hiredisAllocFuncs *ha); +void hiredisResetAllocators(void); + +#ifndef _WIN32 + +/* Hiredis' configured allocator function pointer struct */ +extern hiredisAllocFuncs hiredisAllocFns; + +static inline void *hi_malloc(size_t size) { + return hiredisAllocFns.mallocFn(size); +} + +static inline void *hi_calloc(size_t nmemb, size_t size) { + return hiredisAllocFns.callocFn(nmemb, size); +} + +static inline void *hi_realloc(void *ptr, size_t size) { + return hiredisAllocFns.reallocFn(ptr, size); +} + +static inline char *hi_strdup(const char *str) { + return hiredisAllocFns.strdupFn(str); +} + +static inline void hi_free(void *ptr) { + hiredisAllocFns.freeFn(ptr); +} + +#else + +void *hi_malloc(size_t size); +void *hi_calloc(size_t nmemb, size_t size); +void *hi_realloc(void *ptr, size_t size); +char *hi_strdup(const char *str); +void hi_free(void *ptr); + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* HIREDIS_ALLOC_H */ diff --git a/ext/hiredis-1.0.2/include/hiredis/async.h b/ext/hiredis-1.0.2/include/hiredis/async.h new file mode 100644 index 000000000..b1d2cb263 --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/async.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_ASYNC_H +#define __HIREDIS_ASYNC_H +#include "hiredis.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct redisAsyncContext; /* need forward declaration of redisAsyncContext */ +struct dict; /* dictionary header is included in async.c */ + +/* Reply callback prototype and container */ +typedef void (redisCallbackFn)(struct redisAsyncContext*, void*, void*); +typedef struct redisCallback { + struct redisCallback *next; /* simple singly linked list */ + redisCallbackFn *fn; + int pending_subs; + void *privdata; +} redisCallback; + +/* List of callbacks for either regular replies or pub/sub */ +typedef struct redisCallbackList { + redisCallback *head, *tail; +} redisCallbackList; + +/* Connection callback prototypes */ +typedef void (redisDisconnectCallback)(const struct redisAsyncContext*, int status); +typedef void (redisConnectCallback)(const struct redisAsyncContext*, int status); +typedef void(redisTimerCallback)(void *timer, void *privdata); + +/* Context for an async connection to Redis */ +typedef struct redisAsyncContext { + /* Hold the regular context, so it can be realloc'ed. */ + redisContext c; + + /* Setup error flags so they can be used directly. */ + int err; + char *errstr; + + /* Not used by hiredis */ + void *data; + void (*dataCleanup)(void *privdata); + + /* Event library data and hooks */ + struct { + void *data; + + /* Hooks that are called when the library expects to start + * reading/writing. These functions should be idempotent. */ + void (*addRead)(void *privdata); + void (*delRead)(void *privdata); + void (*addWrite)(void *privdata); + void (*delWrite)(void *privdata); + void (*cleanup)(void *privdata); + void (*scheduleTimer)(void *privdata, struct timeval tv); + } ev; + + /* Called when either the connection is terminated due to an error or per + * user request. The status is set accordingly (REDIS_OK, REDIS_ERR). */ + redisDisconnectCallback *onDisconnect; + + /* Called when the first write event was received. */ + redisConnectCallback *onConnect; + + /* Regular command callbacks */ + redisCallbackList replies; + + /* Address used for connect() */ + struct sockaddr *saddr; + size_t addrlen; + + /* Subscription callbacks */ + struct { + redisCallbackList invalid; + struct dict *channels; + struct dict *patterns; + } sub; + + /* Any configured RESP3 PUSH handler */ + redisAsyncPushFn *push_cb; +} redisAsyncContext; + +/* Functions that proxy to hiredis */ +redisAsyncContext *redisAsyncConnectWithOptions(const redisOptions *options); +redisAsyncContext *redisAsyncConnect(const char *ip, int port); +redisAsyncContext *redisAsyncConnectBind(const char *ip, int port, const char *source_addr); +redisAsyncContext *redisAsyncConnectBindWithReuse(const char *ip, int port, + const char *source_addr); +redisAsyncContext *redisAsyncConnectUnix(const char *path); +int redisAsyncSetConnectCallback(redisAsyncContext *ac, redisConnectCallback *fn); +int redisAsyncSetDisconnectCallback(redisAsyncContext *ac, redisDisconnectCallback *fn); + +redisAsyncPushFn *redisAsyncSetPushCallback(redisAsyncContext *ac, redisAsyncPushFn *fn); +int redisAsyncSetTimeout(redisAsyncContext *ac, struct timeval tv); +void redisAsyncDisconnect(redisAsyncContext *ac); +void redisAsyncFree(redisAsyncContext *ac); + +/* Handle read/write events */ +void redisAsyncHandleRead(redisAsyncContext *ac); +void redisAsyncHandleWrite(redisAsyncContext *ac); +void redisAsyncHandleTimeout(redisAsyncContext *ac); +void redisAsyncRead(redisAsyncContext *ac); +void redisAsyncWrite(redisAsyncContext *ac); + +/* Command functions for an async context. Write the command to the + * output buffer and register the provided callback. */ +int redisvAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, va_list ap); +int redisAsyncCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *format, ...); +int redisAsyncCommandArgv(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, int argc, const char **argv, const size_t *argvlen); +int redisAsyncFormattedCommand(redisAsyncContext *ac, redisCallbackFn *fn, void *privdata, const char *cmd, size_t len); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ext/hiredis-1.0.2/include/hiredis/async_private.h b/ext/hiredis-1.0.2/include/hiredis/async_private.h new file mode 100644 index 000000000..b9d23fffd --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/async_private.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_ASYNC_PRIVATE_H +#define __HIREDIS_ASYNC_PRIVATE_H + +#define _EL_ADD_READ(ctx) \ + do { \ + refreshTimeout(ctx); \ + if ((ctx)->ev.addRead) (ctx)->ev.addRead((ctx)->ev.data); \ + } while (0) +#define _EL_DEL_READ(ctx) do { \ + if ((ctx)->ev.delRead) (ctx)->ev.delRead((ctx)->ev.data); \ + } while(0) +#define _EL_ADD_WRITE(ctx) \ + do { \ + refreshTimeout(ctx); \ + if ((ctx)->ev.addWrite) (ctx)->ev.addWrite((ctx)->ev.data); \ + } while (0) +#define _EL_DEL_WRITE(ctx) do { \ + if ((ctx)->ev.delWrite) (ctx)->ev.delWrite((ctx)->ev.data); \ + } while(0) +#define _EL_CLEANUP(ctx) do { \ + if ((ctx)->ev.cleanup) (ctx)->ev.cleanup((ctx)->ev.data); \ + ctx->ev.cleanup = NULL; \ + } while(0); + +static inline void refreshTimeout(redisAsyncContext *ctx) { + #define REDIS_TIMER_ISSET(tvp) \ + (tvp && ((tvp)->tv_sec || (tvp)->tv_usec)) + + #define REDIS_EL_TIMER(ac, tvp) \ + if ((ac)->ev.scheduleTimer && REDIS_TIMER_ISSET(tvp)) { \ + (ac)->ev.scheduleTimer((ac)->ev.data, *(tvp)); \ + } + + if (ctx->c.flags & REDIS_CONNECTED) { + REDIS_EL_TIMER(ctx, ctx->c.command_timeout); + } else { + REDIS_EL_TIMER(ctx, ctx->c.connect_timeout); + } +} + +void __redisAsyncDisconnect(redisAsyncContext *ac); +void redisProcessCallbacks(redisAsyncContext *ac); + +#endif /* __HIREDIS_ASYNC_PRIVATE_H */ diff --git a/ext/hiredis-1.0.2/include/hiredis/dict.h b/ext/hiredis-1.0.2/include/hiredis/dict.h new file mode 100644 index 000000000..95fcd280e --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/dict.h @@ -0,0 +1,126 @@ +/* Hash table implementation. + * + * This file implements in memory hash tables with insert/del/replace/find/ + * get-random-element operations. Hash tables will auto resize if needed + * tables of power of two in size are used, collisions are handled by + * chaining. See the source code for more information... :) + * + * Copyright (c) 2006-2010, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DICT_H +#define __DICT_H + +#define DICT_OK 0 +#define DICT_ERR 1 + +/* Unused arguments generate annoying warnings... */ +#define DICT_NOTUSED(V) ((void) V) + +typedef struct dictEntry { + void *key; + void *val; + struct dictEntry *next; +} dictEntry; + +typedef struct dictType { + unsigned int (*hashFunction)(const void *key); + void *(*keyDup)(void *privdata, const void *key); + void *(*valDup)(void *privdata, const void *obj); + int (*keyCompare)(void *privdata, const void *key1, const void *key2); + void (*keyDestructor)(void *privdata, void *key); + void (*valDestructor)(void *privdata, void *obj); +} dictType; + +typedef struct dict { + dictEntry **table; + dictType *type; + unsigned long size; + unsigned long sizemask; + unsigned long used; + void *privdata; +} dict; + +typedef struct dictIterator { + dict *ht; + int index; + dictEntry *entry, *nextEntry; +} dictIterator; + +/* This is the initial size of every hash table */ +#define DICT_HT_INITIAL_SIZE 4 + +/* ------------------------------- Macros ------------------------------------*/ +#define dictFreeEntryVal(ht, entry) \ + if ((ht)->type->valDestructor) \ + (ht)->type->valDestructor((ht)->privdata, (entry)->val) + +#define dictSetHashVal(ht, entry, _val_) do { \ + if ((ht)->type->valDup) \ + entry->val = (ht)->type->valDup((ht)->privdata, _val_); \ + else \ + entry->val = (_val_); \ +} while(0) + +#define dictFreeEntryKey(ht, entry) \ + if ((ht)->type->keyDestructor) \ + (ht)->type->keyDestructor((ht)->privdata, (entry)->key) + +#define dictSetHashKey(ht, entry, _key_) do { \ + if ((ht)->type->keyDup) \ + entry->key = (ht)->type->keyDup((ht)->privdata, _key_); \ + else \ + entry->key = (_key_); \ +} while(0) + +#define dictCompareHashKeys(ht, key1, key2) \ + (((ht)->type->keyCompare) ? \ + (ht)->type->keyCompare((ht)->privdata, key1, key2) : \ + (key1) == (key2)) + +#define dictHashKey(ht, key) (ht)->type->hashFunction(key) + +#define dictGetEntryKey(he) ((he)->key) +#define dictGetEntryVal(he) ((he)->val) +#define dictSlots(ht) ((ht)->size) +#define dictSize(ht) ((ht)->used) + +/* API */ +static unsigned int dictGenHashFunction(const unsigned char *buf, int len); +static dict *dictCreate(dictType *type, void *privDataPtr); +static int dictExpand(dict *ht, unsigned long size); +static int dictAdd(dict *ht, void *key, void *val); +static int dictReplace(dict *ht, void *key, void *val); +static int dictDelete(dict *ht, const void *key); +static void dictRelease(dict *ht); +static dictEntry * dictFind(dict *ht, const void *key); +static dictIterator *dictGetIterator(dict *ht); +static dictEntry *dictNext(dictIterator *iter); +static void dictReleaseIterator(dictIterator *iter); + +#endif /* __DICT_H */ diff --git a/ext/hiredis-1.0.2/include/hiredis/fmacros.h b/ext/hiredis-1.0.2/include/hiredis/fmacros.h new file mode 100644 index 000000000..3227faafd --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/fmacros.h @@ -0,0 +1,12 @@ +#ifndef __HIREDIS_FMACRO_H +#define __HIREDIS_FMACRO_H + +#define _XOPEN_SOURCE 600 +#define _POSIX_C_SOURCE 200112L + +#if defined(__APPLE__) && defined(__MACH__) +/* Enable TCP_KEEPALIVE */ +#define _DARWIN_C_SOURCE +#endif + +#endif diff --git a/ext/hiredis-1.0.2/include/hiredis/hiredis.h b/ext/hiredis-1.0.2/include/hiredis/hiredis.h new file mode 100644 index 000000000..3bc46d992 --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/hiredis.h @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2014, Pieter Noordhuis + * Copyright (c) 2015, Matt Stancliff , + * Jan-Erik Rediger + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_H +#define __HIREDIS_H +#include "read.h" +#include /* for va_list */ +#ifndef _MSC_VER +#include /* for struct timeval */ +#else +struct timeval; /* forward declaration */ +typedef long long ssize_t; +#endif +#include /* uintXX_t, etc */ +#include "sds.h" /* for sds */ +#include "alloc.h" /* for allocation wrappers */ + +#define HIREDIS_MAJOR 1 +#define HIREDIS_MINOR 0 +#define HIREDIS_PATCH 2 +#define HIREDIS_SONAME 1.0.0 + +/* Connection type can be blocking or non-blocking and is set in the + * least significant bit of the flags field in redisContext. */ +#define REDIS_BLOCK 0x1 + +/* Connection may be disconnected before being free'd. The second bit + * in the flags field is set when the context is connected. */ +#define REDIS_CONNECTED 0x2 + +/* The async API might try to disconnect cleanly and flush the output + * buffer and read all subsequent replies before disconnecting. + * This flag means no new commands can come in and the connection + * should be terminated once all replies have been read. */ +#define REDIS_DISCONNECTING 0x4 + +/* Flag specific to the async API which means that the context should be clean + * up as soon as possible. */ +#define REDIS_FREEING 0x8 + +/* Flag that is set when an async callback is executed. */ +#define REDIS_IN_CALLBACK 0x10 + +/* Flag that is set when the async context has one or more subscriptions. */ +#define REDIS_SUBSCRIBED 0x20 + +/* Flag that is set when monitor mode is active */ +#define REDIS_MONITORING 0x40 + +/* Flag that is set when we should set SO_REUSEADDR before calling bind() */ +#define REDIS_REUSEADDR 0x80 + +/** + * Flag that indicates the user does not want the context to + * be automatically freed upon error + */ +#define REDIS_NO_AUTO_FREE 0x200 + +#define REDIS_KEEPALIVE_INTERVAL 15 /* seconds */ + +/* number of times we retry to connect in the case of EADDRNOTAVAIL and + * SO_REUSEADDR is being used. */ +#define REDIS_CONNECT_RETRIES 10 + +/* Forward declarations for structs defined elsewhere */ +struct redisAsyncContext; +struct redisContext; + +/* RESP3 push helpers and callback prototypes */ +#define redisIsPushReply(r) (((redisReply*)(r))->type == REDIS_REPLY_PUSH) +typedef void (redisPushFn)(void *, void *); +typedef void (redisAsyncPushFn)(struct redisAsyncContext *, void *); + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is the reply object returned by redisCommand() */ +typedef struct redisReply { + int type; /* REDIS_REPLY_* */ + long long integer; /* The integer when type is REDIS_REPLY_INTEGER */ + double dval; /* The double when type is REDIS_REPLY_DOUBLE */ + size_t len; /* Length of string */ + char *str; /* Used for REDIS_REPLY_ERROR, REDIS_REPLY_STRING + REDIS_REPLY_VERB, and REDIS_REPLY_DOUBLE (in additional to dval). */ + char vtype[4]; /* Used for REDIS_REPLY_VERB, contains the null + terminated 3 character content type, such as "txt". */ + size_t elements; /* number of elements, for REDIS_REPLY_ARRAY */ + struct redisReply **element; /* elements vector for REDIS_REPLY_ARRAY */ +} redisReply; + +redisReader *redisReaderCreate(void); + +/* Function to free the reply objects hiredis returns by default. */ +void freeReplyObject(void *reply); + +/* Functions to format a command according to the protocol. */ +int redisvFormatCommand(char **target, const char *format, va_list ap); +int redisFormatCommand(char **target, const char *format, ...); +int redisFormatCommandArgv(char **target, int argc, const char **argv, const size_t *argvlen); +int redisFormatSdsCommandArgv(sds *target, int argc, const char ** argv, const size_t *argvlen); +void redisFreeCommand(char *cmd); +void redisFreeSdsCommand(sds cmd); + +enum redisConnectionType { + REDIS_CONN_TCP, + REDIS_CONN_UNIX, + REDIS_CONN_USERFD +}; + +struct redisSsl; + +#define REDIS_OPT_NONBLOCK 0x01 +#define REDIS_OPT_REUSEADDR 0x02 + +/** + * Don't automatically free the async object on a connection failure, + * or other implicit conditions. Only free on an explicit call to disconnect() or free() + */ +#define REDIS_OPT_NOAUTOFREE 0x04 + +/* Don't automatically intercept and free RESP3 PUSH replies. */ +#define REDIS_OPT_NO_PUSH_AUTOFREE 0x08 + +/* In Unix systems a file descriptor is a regular signed int, with -1 + * representing an invalid descriptor. In Windows it is a SOCKET + * (32- or 64-bit unsigned integer depending on the architecture), where + * all bits set (~0) is INVALID_SOCKET. */ +#ifndef _WIN32 +typedef int redisFD; +#define REDIS_INVALID_FD -1 +#else +#ifdef _WIN64 +typedef unsigned long long redisFD; /* SOCKET = 64-bit UINT_PTR */ +#else +typedef unsigned long redisFD; /* SOCKET = 32-bit UINT_PTR */ +#endif +#define REDIS_INVALID_FD ((redisFD)(~0)) /* INVALID_SOCKET */ +#endif + +typedef struct { + /* + * the type of connection to use. This also indicates which + * `endpoint` member field to use + */ + int type; + /* bit field of REDIS_OPT_xxx */ + int options; + /* timeout value for connect operation. If NULL, no timeout is used */ + const struct timeval *connect_timeout; + /* timeout value for commands. If NULL, no timeout is used. This can be + * updated at runtime with redisSetTimeout/redisAsyncSetTimeout. */ + const struct timeval *command_timeout; + union { + /** use this field for tcp/ip connections */ + struct { + const char *source_addr; + const char *ip; + int port; + } tcp; + /** use this field for unix domain sockets */ + const char *unix_socket; + /** + * use this field to have hiredis operate an already-open + * file descriptor */ + redisFD fd; + } endpoint; + + /* Optional user defined data/destructor */ + void *privdata; + void (*free_privdata)(void *); + + /* A user defined PUSH message callback */ + redisPushFn *push_cb; + redisAsyncPushFn *async_push_cb; +} redisOptions; + +/** + * Helper macros to initialize options to their specified fields. + */ +#define REDIS_OPTIONS_SET_TCP(opts, ip_, port_) \ + (opts)->type = REDIS_CONN_TCP; \ + (opts)->endpoint.tcp.ip = ip_; \ + (opts)->endpoint.tcp.port = port_; + +#define REDIS_OPTIONS_SET_UNIX(opts, path) \ + (opts)->type = REDIS_CONN_UNIX; \ + (opts)->endpoint.unix_socket = path; + +#define REDIS_OPTIONS_SET_PRIVDATA(opts, data, dtor) \ + (opts)->privdata = data; \ + (opts)->free_privdata = dtor; \ + +typedef struct redisContextFuncs { + void (*free_privctx)(void *); + void (*async_read)(struct redisAsyncContext *); + void (*async_write)(struct redisAsyncContext *); + ssize_t (*read)(struct redisContext *, char *, size_t); + ssize_t (*write)(struct redisContext *); +} redisContextFuncs; + +/* Context for a connection to Redis */ +typedef struct redisContext { + const redisContextFuncs *funcs; /* Function table */ + + int err; /* Error flags, 0 when there is no error */ + char errstr[128]; /* String representation of error when applicable */ + redisFD fd; + int flags; + char *obuf; /* Write buffer */ + redisReader *reader; /* Protocol reader */ + + enum redisConnectionType connection_type; + struct timeval *connect_timeout; + struct timeval *command_timeout; + + struct { + char *host; + char *source_addr; + int port; + } tcp; + + struct { + char *path; + } unix_sock; + + /* For non-blocking connect */ + struct sockadr *saddr; + size_t addrlen; + + /* Optional data and corresponding destructor users can use to provide + * context to a given redisContext. Not used by hiredis. */ + void *privdata; + void (*free_privdata)(void *); + + /* Internal context pointer presently used by hiredis to manage + * SSL connections. */ + void *privctx; + + /* An optional RESP3 PUSH handler */ + redisPushFn *push_cb; +} redisContext; + +redisContext *redisConnectWithOptions(const redisOptions *options); +redisContext *redisConnect(const char *ip, int port); +redisContext *redisConnectWithTimeout(const char *ip, int port, const struct timeval tv); +redisContext *redisConnectNonBlock(const char *ip, int port); +redisContext *redisConnectBindNonBlock(const char *ip, int port, + const char *source_addr); +redisContext *redisConnectBindNonBlockWithReuse(const char *ip, int port, + const char *source_addr); +redisContext *redisConnectUnix(const char *path); +redisContext *redisConnectUnixWithTimeout(const char *path, const struct timeval tv); +redisContext *redisConnectUnixNonBlock(const char *path); +redisContext *redisConnectFd(redisFD fd); + +/** + * Reconnect the given context using the saved information. + * + * This re-uses the exact same connect options as in the initial connection. + * host, ip (or path), timeout and bind address are reused, + * flags are used unmodified from the existing context. + * + * Returns REDIS_OK on successful connect or REDIS_ERR otherwise. + */ +int redisReconnect(redisContext *c); + +redisPushFn *redisSetPushCallback(redisContext *c, redisPushFn *fn); +int redisSetTimeout(redisContext *c, const struct timeval tv); +int redisEnableKeepAlive(redisContext *c); +void redisFree(redisContext *c); +redisFD redisFreeKeepFd(redisContext *c); +int redisBufferRead(redisContext *c); +int redisBufferWrite(redisContext *c, int *done); + +/* In a blocking context, this function first checks if there are unconsumed + * replies to return and returns one if so. Otherwise, it flushes the output + * buffer to the socket and reads until it has a reply. In a non-blocking + * context, it will return unconsumed replies until there are no more. */ +int redisGetReply(redisContext *c, void **reply); +int redisGetReplyFromReader(redisContext *c, void **reply); + +/* Write a formatted command to the output buffer. Use these functions in blocking mode + * to get a pipeline of commands. */ +int redisAppendFormattedCommand(redisContext *c, const char *cmd, size_t len); + +/* Write a command to the output buffer. Use these functions in blocking mode + * to get a pipeline of commands. */ +int redisvAppendCommand(redisContext *c, const char *format, va_list ap); +int redisAppendCommand(redisContext *c, const char *format, ...); +int redisAppendCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); + +/* Issue a command to Redis. In a blocking context, it is identical to calling + * redisAppendCommand, followed by redisGetReply. The function will return + * NULL if there was an error in performing the request, otherwise it will + * return the reply. In a non-blocking context, it is identical to calling + * only redisAppendCommand and will always return NULL. */ +void *redisvCommand(redisContext *c, const char *format, va_list ap); +void *redisCommand(redisContext *c, const char *format, ...); +void *redisCommandArgv(redisContext *c, int argc, const char **argv, const size_t *argvlen); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ext/hiredis-1.0.2/include/hiredis/hiredis_ssl.h b/ext/hiredis-1.0.2/include/hiredis/hiredis_ssl.h new file mode 100644 index 000000000..604efe0c1 --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/hiredis_ssl.h @@ -0,0 +1,127 @@ + +/* + * Copyright (c) 2019, Redis Labs + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __HIREDIS_SSL_H +#define __HIREDIS_SSL_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* This is the underlying struct for SSL in ssl.h, which is not included to + * keep build dependencies short here. + */ +struct ssl_st; + +/* A wrapper around OpenSSL SSL_CTX to allow easy SSL use without directly + * calling OpenSSL. + */ +typedef struct redisSSLContext redisSSLContext; + +/** + * Initialization errors that redisCreateSSLContext() may return. + */ + +typedef enum { + REDIS_SSL_CTX_NONE = 0, /* No Error */ + REDIS_SSL_CTX_CREATE_FAILED, /* Failed to create OpenSSL SSL_CTX */ + REDIS_SSL_CTX_CERT_KEY_REQUIRED, /* Client cert and key must both be specified or skipped */ + REDIS_SSL_CTX_CA_CERT_LOAD_FAILED, /* Failed to load CA Certificate or CA Path */ + REDIS_SSL_CTX_CLIENT_CERT_LOAD_FAILED, /* Failed to load client certificate */ + REDIS_SSL_CTX_PRIVATE_KEY_LOAD_FAILED /* Failed to load private key */ +} redisSSLContextError; + +/** + * Return the error message corresponding with the specified error code. + */ + +const char *redisSSLContextGetError(redisSSLContextError error); + +/** + * Helper function to initialize the OpenSSL library. + * + * OpenSSL requires one-time initialization before it can be used. Callers should + * call this function only once, and only if OpenSSL is not directly initialized + * elsewhere. + */ +int redisInitOpenSSL(void); + +/** + * Helper function to initialize an OpenSSL context that can be used + * to initiate SSL connections. + * + * cacert_filename is an optional name of a CA certificate/bundle file to load + * and use for validation. + * + * capath is an optional directory path where trusted CA certificate files are + * stored in an OpenSSL-compatible structure. + * + * cert_filename and private_key_filename are optional names of a client side + * certificate and private key files to use for authentication. They need to + * be both specified or omitted. + * + * server_name is an optional and will be used as a server name indication + * (SNI) TLS extension. + * + * If error is non-null, it will be populated in case the context creation fails + * (returning a NULL). + */ + +redisSSLContext *redisCreateSSLContext(const char *cacert_filename, const char *capath, + const char *cert_filename, const char *private_key_filename, + const char *server_name, redisSSLContextError *error); + +/** + * Free a previously created OpenSSL context. + */ +void redisFreeSSLContext(redisSSLContext *redis_ssl_ctx); + +/** + * Initiate SSL on an existing redisContext. + * + * This is similar to redisInitiateSSL() but does not require the caller + * to directly interact with OpenSSL, and instead uses a redisSSLContext + * previously created using redisCreateSSLContext(). + */ + +int redisInitiateSSLWithContext(redisContext *c, redisSSLContext *redis_ssl_ctx); + +/** + * Initiate SSL/TLS negotiation on a provided OpenSSL SSL object. + */ + +int redisInitiateSSL(redisContext *c, struct ssl_st *ssl); + +#ifdef __cplusplus +} +#endif + +#endif /* __HIREDIS_SSL_H */ diff --git a/ext/hiredis-1.0.2/include/hiredis/net.h b/ext/hiredis-1.0.2/include/hiredis/net.h new file mode 100644 index 000000000..9f43283a5 --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/net.h @@ -0,0 +1,56 @@ +/* Extracted from anet.c to work properly with Hiredis error reporting. + * + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2014, Pieter Noordhuis + * Copyright (c) 2015, Matt Stancliff , + * Jan-Erik Rediger + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __NET_H +#define __NET_H + +#include "hiredis.h" + +void redisNetClose(redisContext *c); +ssize_t redisNetRead(redisContext *c, char *buf, size_t bufcap); +ssize_t redisNetWrite(redisContext *c); + +int redisCheckSocketError(redisContext *c); +int redisContextSetTimeout(redisContext *c, const struct timeval tv); +int redisContextConnectTcp(redisContext *c, const char *addr, int port, const struct timeval *timeout); +int redisContextConnectBindTcp(redisContext *c, const char *addr, int port, + const struct timeval *timeout, + const char *source_addr); +int redisContextConnectUnix(redisContext *c, const char *path, const struct timeval *timeout); +int redisKeepAlive(redisContext *c, int interval); +int redisCheckConnectDone(redisContext *c, int *completed); + +int redisSetTcpNoDelay(redisContext *c); + +#endif diff --git a/ext/hiredis-1.0.2/include/hiredis/read.h b/ext/hiredis-1.0.2/include/hiredis/read.h new file mode 100644 index 000000000..2d74d77a5 --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/read.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef __HIREDIS_READ_H +#define __HIREDIS_READ_H +#include /* for size_t */ + +#define REDIS_ERR -1 +#define REDIS_OK 0 + +/* When an error occurs, the err flag in a context is set to hold the type of + * error that occurred. REDIS_ERR_IO means there was an I/O error and you + * should use the "errno" variable to find out what is wrong. + * For other values, the "errstr" field will hold a description. */ +#define REDIS_ERR_IO 1 /* Error in read or write */ +#define REDIS_ERR_EOF 3 /* End of file */ +#define REDIS_ERR_PROTOCOL 4 /* Protocol error */ +#define REDIS_ERR_OOM 5 /* Out of memory */ +#define REDIS_ERR_TIMEOUT 6 /* Timed out */ +#define REDIS_ERR_OTHER 2 /* Everything else... */ + +#define REDIS_REPLY_STRING 1 +#define REDIS_REPLY_ARRAY 2 +#define REDIS_REPLY_INTEGER 3 +#define REDIS_REPLY_NIL 4 +#define REDIS_REPLY_STATUS 5 +#define REDIS_REPLY_ERROR 6 +#define REDIS_REPLY_DOUBLE 7 +#define REDIS_REPLY_BOOL 8 +#define REDIS_REPLY_MAP 9 +#define REDIS_REPLY_SET 10 +#define REDIS_REPLY_ATTR 11 +#define REDIS_REPLY_PUSH 12 +#define REDIS_REPLY_BIGNUM 13 +#define REDIS_REPLY_VERB 14 + +/* Default max unused reader buffer. */ +#define REDIS_READER_MAX_BUF (1024*16) + +/* Default multi-bulk element limit */ +#define REDIS_READER_MAX_ARRAY_ELEMENTS ((1LL<<32) - 1) + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct redisReadTask { + int type; + long long elements; /* number of elements in multibulk container */ + int idx; /* index in parent (array) object */ + void *obj; /* holds user-generated value for a read task */ + struct redisReadTask *parent; /* parent task */ + void *privdata; /* user-settable arbitrary field */ +} redisReadTask; + +typedef struct redisReplyObjectFunctions { + void *(*createString)(const redisReadTask*, char*, size_t); + void *(*createArray)(const redisReadTask*, size_t); + void *(*createInteger)(const redisReadTask*, long long); + void *(*createDouble)(const redisReadTask*, double, char*, size_t); + void *(*createNil)(const redisReadTask*); + void *(*createBool)(const redisReadTask*, int); + void (*freeObject)(void*); +} redisReplyObjectFunctions; + +typedef struct redisReader { + int err; /* Error flags, 0 when there is no error */ + char errstr[128]; /* String representation of error when applicable */ + + char *buf; /* Read buffer */ + size_t pos; /* Buffer cursor */ + size_t len; /* Buffer length */ + size_t maxbuf; /* Max length of unused buffer */ + long long maxelements; /* Max multi-bulk elements */ + + redisReadTask **task; + int tasks; + + int ridx; /* Index of current read task */ + void *reply; /* Temporary reply pointer */ + + redisReplyObjectFunctions *fn; + void *privdata; +} redisReader; + +/* Public API for the protocol parser. */ +redisReader *redisReaderCreateWithFunctions(redisReplyObjectFunctions *fn); +void redisReaderFree(redisReader *r); +int redisReaderFeed(redisReader *r, const char *buf, size_t len); +int redisReaderGetReply(redisReader *r, void **reply); + +#define redisReaderSetPrivdata(_r, _p) (int)(((redisReader*)(_r))->privdata = (_p)) +#define redisReaderGetObject(_r) (((redisReader*)(_r))->reply) +#define redisReaderGetError(_r) (((redisReader*)(_r))->errstr) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ext/hiredis-1.0.2/include/hiredis/sds.h b/ext/hiredis-1.0.2/include/hiredis/sds.h new file mode 100644 index 000000000..eda8833b5 --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/sds.h @@ -0,0 +1,278 @@ +/* SDSLib 2.0 -- A C dynamic strings library + * + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra + * Copyright (c) 2015, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SDS_H +#define __SDS_H + +#define SDS_MAX_PREALLOC (1024*1024) +#ifdef _MSC_VER +#define __attribute__(x) +typedef long long ssize_t; +#define SSIZE_MAX (LLONG_MAX >> 1) +#endif + +#include +#include +#include + +typedef char *sds; + +/* Note: sdshdr5 is never used, we just access the flags byte directly. + * However is here to document the layout of type 5 SDS strings. */ +struct __attribute__ ((__packed__)) sdshdr5 { + unsigned char flags; /* 3 lsb of type, and 5 msb of string length */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr8 { + uint8_t len; /* used */ + uint8_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr16 { + uint16_t len; /* used */ + uint16_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr32 { + uint32_t len; /* used */ + uint32_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr64 { + uint64_t len; /* used */ + uint64_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; + +#define SDS_TYPE_5 0 +#define SDS_TYPE_8 1 +#define SDS_TYPE_16 2 +#define SDS_TYPE_32 3 +#define SDS_TYPE_64 4 +#define SDS_TYPE_MASK 7 +#define SDS_TYPE_BITS 3 +#define SDS_HDR_VAR(T,s) struct sdshdr##T *sh = (struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T))); +#define SDS_HDR(T,s) ((struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T)))) +#define SDS_TYPE_5_LEN(f) ((f)>>SDS_TYPE_BITS) + +static inline size_t sdslen(const sds s) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + return SDS_TYPE_5_LEN(flags); + case SDS_TYPE_8: + return SDS_HDR(8,s)->len; + case SDS_TYPE_16: + return SDS_HDR(16,s)->len; + case SDS_TYPE_32: + return SDS_HDR(32,s)->len; + case SDS_TYPE_64: + return SDS_HDR(64,s)->len; + } + return 0; +} + +static inline size_t sdsavail(const sds s) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: { + return 0; + } + case SDS_TYPE_8: { + SDS_HDR_VAR(8,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_16: { + SDS_HDR_VAR(16,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_32: { + SDS_HDR_VAR(32,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_64: { + SDS_HDR_VAR(64,s); + return sh->alloc - sh->len; + } + } + return 0; +} + +static inline void sdssetlen(sds s, size_t newlen) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + { + unsigned char *fp = ((unsigned char*)s)-1; + *fp = (unsigned char)(SDS_TYPE_5 | (newlen << SDS_TYPE_BITS)); + } + break; + case SDS_TYPE_8: + SDS_HDR(8,s)->len = (uint8_t)newlen; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->len = (uint16_t)newlen; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->len = (uint32_t)newlen; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->len = (uint64_t)newlen; + break; + } +} + +static inline void sdsinclen(sds s, size_t inc) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + { + unsigned char *fp = ((unsigned char*)s)-1; + unsigned char newlen = SDS_TYPE_5_LEN(flags)+(unsigned char)inc; + *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); + } + break; + case SDS_TYPE_8: + SDS_HDR(8,s)->len += (uint8_t)inc; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->len += (uint16_t)inc; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->len += (uint32_t)inc; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->len += (uint64_t)inc; + break; + } +} + +/* sdsalloc() = sdsavail() + sdslen() */ +static inline size_t sdsalloc(const sds s) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + return SDS_TYPE_5_LEN(flags); + case SDS_TYPE_8: + return SDS_HDR(8,s)->alloc; + case SDS_TYPE_16: + return SDS_HDR(16,s)->alloc; + case SDS_TYPE_32: + return SDS_HDR(32,s)->alloc; + case SDS_TYPE_64: + return SDS_HDR(64,s)->alloc; + } + return 0; +} + +static inline void sdssetalloc(sds s, size_t newlen) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + /* Nothing to do, this type has no total allocation info. */ + break; + case SDS_TYPE_8: + SDS_HDR(8,s)->alloc = (uint8_t)newlen; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->alloc = (uint16_t)newlen; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->alloc = (uint32_t)newlen; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->alloc = (uint64_t)newlen; + break; + } +} + +sds sdsnewlen(const void *init, size_t initlen); +sds sdsnew(const char *init); +sds sdsempty(void); +sds sdsdup(const sds s); +void sdsfree(sds s); +sds sdsgrowzero(sds s, size_t len); +sds sdscatlen(sds s, const void *t, size_t len); +sds sdscat(sds s, const char *t); +sds sdscatsds(sds s, const sds t); +sds sdscpylen(sds s, const char *t, size_t len); +sds sdscpy(sds s, const char *t); + +sds sdscatvprintf(sds s, const char *fmt, va_list ap); +#ifdef __GNUC__ +sds sdscatprintf(sds s, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +#else +sds sdscatprintf(sds s, const char *fmt, ...); +#endif + +sds sdscatfmt(sds s, char const *fmt, ...); +sds sdstrim(sds s, const char *cset); +int sdsrange(sds s, ssize_t start, ssize_t end); +void sdsupdatelen(sds s); +void sdsclear(sds s); +int sdscmp(const sds s1, const sds s2); +sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count); +void sdsfreesplitres(sds *tokens, int count); +void sdstolower(sds s); +void sdstoupper(sds s); +sds sdsfromlonglong(long long value); +sds sdscatrepr(sds s, const char *p, size_t len); +sds *sdssplitargs(const char *line, int *argc); +sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen); +sds sdsjoin(char **argv, int argc, char *sep); +sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen); + +/* Low level functions exposed to the user API */ +sds sdsMakeRoomFor(sds s, size_t addlen); +void sdsIncrLen(sds s, int incr); +sds sdsRemoveFreeSpace(sds s); +size_t sdsAllocSize(sds s); +void *sdsAllocPtr(sds s); + +/* Export the allocator used by SDS to the program using SDS. + * Sometimes the program SDS is linked to, may use a different set of + * allocators, but may want to allocate or free things that SDS will + * respectively free or allocate. */ +void *sds_malloc(size_t size); +void *sds_realloc(void *ptr, size_t size); +void sds_free(void *ptr); + +#ifdef REDIS_TEST +int sdsTest(int argc, char *argv[]); +#endif + +#endif diff --git a/ext/hiredis-1.0.2/include/hiredis/sdsalloc.h b/ext/hiredis-1.0.2/include/hiredis/sdsalloc.h new file mode 100644 index 000000000..5538dd94c --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/sdsalloc.h @@ -0,0 +1,44 @@ +/* SDSLib 2.0 -- A C dynamic strings library + * + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra + * Copyright (c) 2015, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* SDS allocator selection. + * + * This file is used in order to change the SDS allocator at compile time. + * Just define the following defines to what you want to use. Also add + * the include of your alternate allocator if needed (not needed in order + * to use the default libc allocator). */ + +#include "alloc.h" + +#define s_malloc hi_malloc +#define s_realloc hi_realloc +#define s_free hi_free diff --git a/ext/hiredis-1.0.2/include/hiredis/sockcompat.h b/ext/hiredis-1.0.2/include/hiredis/sockcompat.h new file mode 100644 index 000000000..85810e848 --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/sockcompat.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2019, Marcus Geelnard + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SOCKCOMPAT_H +#define __SOCKCOMPAT_H + +#ifndef _WIN32 +/* For POSIX systems we use the standard BSD socket API. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#else +/* For Windows we use winsock. */ +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0600 /* To get WSAPoll etc. */ +#include +#include +#include +#include + +#ifdef _MSC_VER +typedef long long ssize_t; +#endif + +/* Emulate the parts of the BSD socket API that we need (override the winsock signatures). */ +int win32_getaddrinfo(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res); +const char *win32_gai_strerror(int errcode); +void win32_freeaddrinfo(struct addrinfo *res); +SOCKET win32_socket(int domain, int type, int protocol); +int win32_ioctl(SOCKET fd, unsigned long request, unsigned long *argp); +int win32_bind(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen); +int win32_connect(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen); +int win32_getsockopt(SOCKET sockfd, int level, int optname, void *optval, socklen_t *optlen); +int win32_setsockopt(SOCKET sockfd, int level, int optname, const void *optval, socklen_t optlen); +int win32_close(SOCKET fd); +ssize_t win32_recv(SOCKET sockfd, void *buf, size_t len, int flags); +ssize_t win32_send(SOCKET sockfd, const void *buf, size_t len, int flags); +typedef ULONG nfds_t; +int win32_poll(struct pollfd *fds, nfds_t nfds, int timeout); + +#ifndef REDIS_SOCKCOMPAT_IMPLEMENTATION +#define getaddrinfo(node, service, hints, res) win32_getaddrinfo(node, service, hints, res) +#undef gai_strerror +#define gai_strerror(errcode) win32_gai_strerror(errcode) +#define freeaddrinfo(res) win32_freeaddrinfo(res) +#define socket(domain, type, protocol) win32_socket(domain, type, protocol) +#define ioctl(fd, request, argp) win32_ioctl(fd, request, argp) +#define bind(sockfd, addr, addrlen) win32_bind(sockfd, addr, addrlen) +#define connect(sockfd, addr, addrlen) win32_connect(sockfd, addr, addrlen) +#define getsockopt(sockfd, level, optname, optval, optlen) win32_getsockopt(sockfd, level, optname, optval, optlen) +#define setsockopt(sockfd, level, optname, optval, optlen) win32_setsockopt(sockfd, level, optname, optval, optlen) +#define close(fd) win32_close(fd) +#define recv(sockfd, buf, len, flags) win32_recv(sockfd, buf, len, flags) +#define send(sockfd, buf, len, flags) win32_send(sockfd, buf, len, flags) +#define poll(fds, nfds, timeout) win32_poll(fds, nfds, timeout) +#endif /* REDIS_SOCKCOMPAT_IMPLEMENTATION */ +#endif /* _WIN32 */ + +#endif /* __SOCKCOMPAT_H */ diff --git a/ext/hiredis-1.0.2/include/hiredis/win32.h b/ext/hiredis-1.0.2/include/hiredis/win32.h new file mode 100644 index 000000000..04289c696 --- /dev/null +++ b/ext/hiredis-1.0.2/include/hiredis/win32.h @@ -0,0 +1,56 @@ +#ifndef _WIN32_HELPER_INCLUDE +#define _WIN32_HELPER_INCLUDE +#ifdef _MSC_VER + +#include /* for struct timeval */ + +#ifndef inline +#define inline __inline +#endif + +#ifndef strcasecmp +#define strcasecmp stricmp +#endif + +#ifndef strncasecmp +#define strncasecmp strnicmp +#endif + +#ifndef va_copy +#define va_copy(d,s) ((d) = (s)) +#endif + +#ifndef snprintf +#define snprintf c99_snprintf + +__inline int c99_vsnprintf(char* str, size_t size, const char* format, va_list ap) +{ + int count = -1; + + if (size != 0) + count = _vsnprintf_s(str, size, _TRUNCATE, format, ap); + if (count == -1) + count = _vscprintf(format, ap); + + return count; +} + +__inline int c99_snprintf(char* str, size_t size, const char* format, ...) +{ + int count; + va_list ap; + + va_start(ap, format); + count = c99_vsnprintf(str, size, format, ap); + va_end(ap); + + return count; +} +#endif +#endif /* _MSC_VER */ + +#ifdef _WIN32 +#define strerror_r(errno,buf,len) strerror_s(buf,len,errno) +#endif /* _WIN32 */ + +#endif /* _WIN32_HELPER_INCLUDE */ diff --git a/ext/hiredis-1.0.2/lib/ubuntu22.04/amd64/libhiredis.a b/ext/hiredis-1.0.2/lib/ubuntu22.04/amd64/libhiredis.a new file mode 100644 index 000000000..af1314f01 Binary files /dev/null and b/ext/hiredis-1.0.2/lib/ubuntu22.04/amd64/libhiredis.a differ diff --git a/ext/hiredis-1.0.2/lib/ubuntu22.04/arm64/libhiredis.a b/ext/hiredis-1.0.2/lib/ubuntu22.04/arm64/libhiredis.a new file mode 100644 index 000000000..081c92705 Binary files /dev/null and b/ext/hiredis-1.0.2/lib/ubuntu22.04/arm64/libhiredis.a differ diff --git a/ext/hiredis-1.0.2/net.c b/ext/hiredis-1.0.2/net.c new file mode 100644 index 000000000..c6b0e5d8e --- /dev/null +++ b/ext/hiredis-1.0.2/net.c @@ -0,0 +1,612 @@ +/* Extracted from anet.c to work properly with Hiredis error reporting. + * + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2014, Pieter Noordhuis + * Copyright (c) 2015, Matt Stancliff , + * Jan-Erik Rediger + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "net.h" +#include "sds.h" +#include "sockcompat.h" +#include "win32.h" + +/* Defined in hiredis.c */ +void __redisSetError(redisContext *c, int type, const char *str); + +void redisNetClose(redisContext *c) { + if (c && c->fd != REDIS_INVALID_FD) { + close(c->fd); + c->fd = REDIS_INVALID_FD; + } +} + +ssize_t redisNetRead(redisContext *c, char *buf, size_t bufcap) { + ssize_t nread = recv(c->fd, buf, bufcap, 0); + if (nread == -1) { + if ((errno == EWOULDBLOCK && !(c->flags & REDIS_BLOCK)) || (errno == EINTR)) { + /* Try again later */ + return 0; + } else if(errno == ETIMEDOUT && (c->flags & REDIS_BLOCK)) { + /* especially in windows */ + __redisSetError(c, REDIS_ERR_TIMEOUT, "recv timeout"); + return -1; + } else { + __redisSetError(c, REDIS_ERR_IO, NULL); + return -1; + } + } else if (nread == 0) { + __redisSetError(c, REDIS_ERR_EOF, "Server closed the connection"); + return -1; + } else { + return nread; + } +} + +ssize_t redisNetWrite(redisContext *c) { + ssize_t nwritten = send(c->fd, c->obuf, sdslen(c->obuf), 0); + if (nwritten < 0) { + if ((errno == EWOULDBLOCK && !(c->flags & REDIS_BLOCK)) || (errno == EINTR)) { + /* Try again later */ + } else { + __redisSetError(c, REDIS_ERR_IO, NULL); + return -1; + } + } + return nwritten; +} + +static void __redisSetErrorFromErrno(redisContext *c, int type, const char *prefix) { + int errorno = errno; /* snprintf() may change errno */ + char buf[128] = { 0 }; + size_t len = 0; + + if (prefix != NULL) + len = snprintf(buf,sizeof(buf),"%s: ",prefix); + strerror_r(errorno, (char *)(buf + len), sizeof(buf) - len); + __redisSetError(c,type,buf); +} + +static int redisSetReuseAddr(redisContext *c) { + int on = 1; + if (setsockopt(c->fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); + redisNetClose(c); + return REDIS_ERR; + } + return REDIS_OK; +} + +static int redisCreateSocket(redisContext *c, int type) { + redisFD s; + if ((s = socket(type, SOCK_STREAM, 0)) == REDIS_INVALID_FD) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); + return REDIS_ERR; + } + c->fd = s; + if (type == AF_INET) { + if (redisSetReuseAddr(c) == REDIS_ERR) { + return REDIS_ERR; + } + } + return REDIS_OK; +} + +static int redisSetBlocking(redisContext *c, int blocking) { +#ifndef _WIN32 + int flags; + + /* Set the socket nonblocking. + * Note that fcntl(2) for F_GETFL and F_SETFL can't be + * interrupted by a signal. */ + if ((flags = fcntl(c->fd, F_GETFL)) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"fcntl(F_GETFL)"); + redisNetClose(c); + return REDIS_ERR; + } + + if (blocking) + flags &= ~O_NONBLOCK; + else + flags |= O_NONBLOCK; + + if (fcntl(c->fd, F_SETFL, flags) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"fcntl(F_SETFL)"); + redisNetClose(c); + return REDIS_ERR; + } +#else + u_long mode = blocking ? 0 : 1; + if (ioctl(c->fd, FIONBIO, &mode) == -1) { + __redisSetErrorFromErrno(c, REDIS_ERR_IO, "ioctl(FIONBIO)"); + redisNetClose(c); + return REDIS_ERR; + } +#endif /* _WIN32 */ + return REDIS_OK; +} + +int redisKeepAlive(redisContext *c, int interval) { + int val = 1; + redisFD fd = c->fd; + + if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &val, sizeof(val)) == -1){ + __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); + return REDIS_ERR; + } + + val = interval; + +#if defined(__APPLE__) && defined(__MACH__) + if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &val, sizeof(val)) < 0) { + __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); + return REDIS_ERR; + } +#else +#if defined(__GLIBC__) && !defined(__FreeBSD_kernel__) + if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &val, sizeof(val)) < 0) { + __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); + return REDIS_ERR; + } + + val = interval/3; + if (val == 0) val = 1; + if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &val, sizeof(val)) < 0) { + __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); + return REDIS_ERR; + } + + val = 3; + if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &val, sizeof(val)) < 0) { + __redisSetError(c,REDIS_ERR_OTHER,strerror(errno)); + return REDIS_ERR; + } +#endif +#endif + + return REDIS_OK; +} + +int redisSetTcpNoDelay(redisContext *c) { + int yes = 1; + if (setsockopt(c->fd, IPPROTO_TCP, TCP_NODELAY, &yes, sizeof(yes)) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(TCP_NODELAY)"); + redisNetClose(c); + return REDIS_ERR; + } + return REDIS_OK; +} + +#define __MAX_MSEC (((LONG_MAX) - 999) / 1000) + +static int redisContextTimeoutMsec(redisContext *c, long *result) +{ + const struct timeval *timeout = c->connect_timeout; + long msec = -1; + + /* Only use timeout when not NULL. */ + if (timeout != NULL) { + if (timeout->tv_usec > 1000000 || timeout->tv_sec > __MAX_MSEC) { + *result = msec; + return REDIS_ERR; + } + + msec = (timeout->tv_sec * 1000) + ((timeout->tv_usec + 999) / 1000); + + if (msec < 0 || msec > INT_MAX) { + msec = INT_MAX; + } + } + + *result = msec; + return REDIS_OK; +} + +static int redisContextWaitReady(redisContext *c, long msec) { + struct pollfd wfd[1]; + + wfd[0].fd = c->fd; + wfd[0].events = POLLOUT; + + if (errno == EINPROGRESS) { + int res; + + if ((res = poll(wfd, 1, msec)) == -1) { + __redisSetErrorFromErrno(c, REDIS_ERR_IO, "poll(2)"); + redisNetClose(c); + return REDIS_ERR; + } else if (res == 0) { + errno = ETIMEDOUT; + __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); + redisNetClose(c); + return REDIS_ERR; + } + + if (redisCheckConnectDone(c, &res) != REDIS_OK || res == 0) { + redisCheckSocketError(c); + return REDIS_ERR; + } + + return REDIS_OK; + } + + __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); + redisNetClose(c); + return REDIS_ERR; +} + +int redisCheckConnectDone(redisContext *c, int *completed) { + int rc = connect(c->fd, (const struct sockaddr *)c->saddr, c->addrlen); + if (rc == 0) { + *completed = 1; + return REDIS_OK; + } + switch (errno) { + case EISCONN: + *completed = 1; + return REDIS_OK; + case EALREADY: + case EINPROGRESS: + case EWOULDBLOCK: + *completed = 0; + return REDIS_OK; + default: + return REDIS_ERR; + } +} + +int redisCheckSocketError(redisContext *c) { + int err = 0, errno_saved = errno; + socklen_t errlen = sizeof(err); + + if (getsockopt(c->fd, SOL_SOCKET, SO_ERROR, &err, &errlen) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"getsockopt(SO_ERROR)"); + return REDIS_ERR; + } + + if (err == 0) { + err = errno_saved; + } + + if (err) { + errno = err; + __redisSetErrorFromErrno(c,REDIS_ERR_IO,NULL); + return REDIS_ERR; + } + + return REDIS_OK; +} + +int redisContextSetTimeout(redisContext *c, const struct timeval tv) { + const void *to_ptr = &tv; + size_t to_sz = sizeof(tv); + + if (setsockopt(c->fd,SOL_SOCKET,SO_RCVTIMEO,to_ptr,to_sz) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(SO_RCVTIMEO)"); + return REDIS_ERR; + } + if (setsockopt(c->fd,SOL_SOCKET,SO_SNDTIMEO,to_ptr,to_sz) == -1) { + __redisSetErrorFromErrno(c,REDIS_ERR_IO,"setsockopt(SO_SNDTIMEO)"); + return REDIS_ERR; + } + return REDIS_OK; +} + +int redisContextUpdateConnectTimeout(redisContext *c, const struct timeval *timeout) { + /* Same timeval struct, short circuit */ + if (c->connect_timeout == timeout) + return REDIS_OK; + + /* Allocate context timeval if we need to */ + if (c->connect_timeout == NULL) { + c->connect_timeout = hi_malloc(sizeof(*c->connect_timeout)); + if (c->connect_timeout == NULL) + return REDIS_ERR; + } + + memcpy(c->connect_timeout, timeout, sizeof(*c->connect_timeout)); + return REDIS_OK; +} + +int redisContextUpdateCommandTimeout(redisContext *c, const struct timeval *timeout) { + /* Same timeval struct, short circuit */ + if (c->command_timeout == timeout) + return REDIS_OK; + + /* Allocate context timeval if we need to */ + if (c->command_timeout == NULL) { + c->command_timeout = hi_malloc(sizeof(*c->command_timeout)); + if (c->command_timeout == NULL) + return REDIS_ERR; + } + + memcpy(c->command_timeout, timeout, sizeof(*c->command_timeout)); + return REDIS_OK; +} + +static int _redisContextConnectTcp(redisContext *c, const char *addr, int port, + const struct timeval *timeout, + const char *source_addr) { + redisFD s; + int rv, n; + char _port[6]; /* strlen("65535"); */ + struct addrinfo hints, *servinfo, *bservinfo, *p, *b; + int blocking = (c->flags & REDIS_BLOCK); + int reuseaddr = (c->flags & REDIS_REUSEADDR); + int reuses = 0; + long timeout_msec = -1; + + servinfo = NULL; + c->connection_type = REDIS_CONN_TCP; + c->tcp.port = port; + + /* We need to take possession of the passed parameters + * to make them reusable for a reconnect. + * We also carefully check we don't free data we already own, + * as in the case of the reconnect method. + * + * This is a bit ugly, but atleast it works and doesn't leak memory. + **/ + if (c->tcp.host != addr) { + hi_free(c->tcp.host); + + c->tcp.host = hi_strdup(addr); + if (c->tcp.host == NULL) + goto oom; + } + + if (timeout) { + if (redisContextUpdateConnectTimeout(c, timeout) == REDIS_ERR) + goto oom; + } else { + hi_free(c->connect_timeout); + c->connect_timeout = NULL; + } + + if (redisContextTimeoutMsec(c, &timeout_msec) != REDIS_OK) { + __redisSetError(c, REDIS_ERR_IO, "Invalid timeout specified"); + goto error; + } + + if (source_addr == NULL) { + hi_free(c->tcp.source_addr); + c->tcp.source_addr = NULL; + } else if (c->tcp.source_addr != source_addr) { + hi_free(c->tcp.source_addr); + c->tcp.source_addr = hi_strdup(source_addr); + } + + snprintf(_port, 6, "%d", port); + memset(&hints,0,sizeof(hints)); + hints.ai_family = AF_INET; + hints.ai_socktype = SOCK_STREAM; + + /* Try with IPv6 if no IPv4 address was found. We do it in this order since + * in a Redis client you can't afford to test if you have IPv6 connectivity + * as this would add latency to every connect. Otherwise a more sensible + * route could be: Use IPv6 if both addresses are available and there is IPv6 + * connectivity. */ + if ((rv = getaddrinfo(c->tcp.host,_port,&hints,&servinfo)) != 0) { + hints.ai_family = AF_INET6; + if ((rv = getaddrinfo(addr,_port,&hints,&servinfo)) != 0) { + __redisSetError(c,REDIS_ERR_OTHER,gai_strerror(rv)); + return REDIS_ERR; + } + } + for (p = servinfo; p != NULL; p = p->ai_next) { +addrretry: + if ((s = socket(p->ai_family,p->ai_socktype,p->ai_protocol)) == REDIS_INVALID_FD) + continue; + + c->fd = s; + if (redisSetBlocking(c,0) != REDIS_OK) + goto error; + if (c->tcp.source_addr) { + int bound = 0; + /* Using getaddrinfo saves us from self-determining IPv4 vs IPv6 */ + if ((rv = getaddrinfo(c->tcp.source_addr, NULL, &hints, &bservinfo)) != 0) { + char buf[128]; + snprintf(buf,sizeof(buf),"Can't get addr: %s",gai_strerror(rv)); + __redisSetError(c,REDIS_ERR_OTHER,buf); + goto error; + } + + if (reuseaddr) { + n = 1; + if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char*) &n, + sizeof(n)) < 0) { + freeaddrinfo(bservinfo); + goto error; + } + } + + for (b = bservinfo; b != NULL; b = b->ai_next) { + if (bind(s,b->ai_addr,b->ai_addrlen) != -1) { + bound = 1; + break; + } + } + freeaddrinfo(bservinfo); + if (!bound) { + char buf[128]; + snprintf(buf,sizeof(buf),"Can't bind socket: %s",strerror(errno)); + __redisSetError(c,REDIS_ERR_OTHER,buf); + goto error; + } + } + + /* For repeat connection */ + hi_free(c->saddr); + c->saddr = hi_malloc(p->ai_addrlen); + if (c->saddr == NULL) + goto oom; + + memcpy(c->saddr, p->ai_addr, p->ai_addrlen); + c->addrlen = p->ai_addrlen; + + if (connect(s,p->ai_addr,p->ai_addrlen) == -1) { + if (errno == EHOSTUNREACH) { + redisNetClose(c); + continue; + } else if (errno == EINPROGRESS) { + if (blocking) { + goto wait_for_ready; + } + /* This is ok. + * Note that even when it's in blocking mode, we unset blocking + * for `connect()` + */ + } else if (errno == EADDRNOTAVAIL && reuseaddr) { + if (++reuses >= REDIS_CONNECT_RETRIES) { + goto error; + } else { + redisNetClose(c); + goto addrretry; + } + } else { + wait_for_ready: + if (redisContextWaitReady(c,timeout_msec) != REDIS_OK) + goto error; + if (redisSetTcpNoDelay(c) != REDIS_OK) + goto error; + } + } + if (blocking && redisSetBlocking(c,1) != REDIS_OK) + goto error; + + c->flags |= REDIS_CONNECTED; + rv = REDIS_OK; + goto end; + } + if (p == NULL) { + char buf[128]; + snprintf(buf,sizeof(buf),"Can't create socket: %s",strerror(errno)); + __redisSetError(c,REDIS_ERR_OTHER,buf); + goto error; + } + +oom: + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); +error: + rv = REDIS_ERR; +end: + if(servinfo) { + freeaddrinfo(servinfo); + } + + return rv; // Need to return REDIS_OK if alright +} + +int redisContextConnectTcp(redisContext *c, const char *addr, int port, + const struct timeval *timeout) { + return _redisContextConnectTcp(c, addr, port, timeout, NULL); +} + +int redisContextConnectBindTcp(redisContext *c, const char *addr, int port, + const struct timeval *timeout, + const char *source_addr) { + return _redisContextConnectTcp(c, addr, port, timeout, source_addr); +} + +int redisContextConnectUnix(redisContext *c, const char *path, const struct timeval *timeout) { +#ifndef _WIN32 + int blocking = (c->flags & REDIS_BLOCK); + struct sockaddr_un *sa; + long timeout_msec = -1; + + if (redisCreateSocket(c,AF_UNIX) < 0) + return REDIS_ERR; + if (redisSetBlocking(c,0) != REDIS_OK) + return REDIS_ERR; + + c->connection_type = REDIS_CONN_UNIX; + if (c->unix_sock.path != path) { + hi_free(c->unix_sock.path); + + c->unix_sock.path = hi_strdup(path); + if (c->unix_sock.path == NULL) + goto oom; + } + + if (timeout) { + if (redisContextUpdateConnectTimeout(c, timeout) == REDIS_ERR) + goto oom; + } else { + hi_free(c->connect_timeout); + c->connect_timeout = NULL; + } + + if (redisContextTimeoutMsec(c,&timeout_msec) != REDIS_OK) + return REDIS_ERR; + + /* Don't leak sockaddr if we're reconnecting */ + if (c->saddr) hi_free(c->saddr); + + sa = (struct sockaddr_un*)(c->saddr = hi_malloc(sizeof(struct sockaddr_un))); + if (sa == NULL) + goto oom; + + c->addrlen = sizeof(struct sockaddr_un); + sa->sun_family = AF_UNIX; + strncpy(sa->sun_path, path, sizeof(sa->sun_path) - 1); + if (connect(c->fd, (struct sockaddr*)sa, sizeof(*sa)) == -1) { + if (errno == EINPROGRESS && !blocking) { + /* This is ok. */ + } else { + if (redisContextWaitReady(c,timeout_msec) != REDIS_OK) + return REDIS_ERR; + } + } + + /* Reset socket to be blocking after connect(2). */ + if (blocking && redisSetBlocking(c,1) != REDIS_OK) + return REDIS_ERR; + + c->flags |= REDIS_CONNECTED; + return REDIS_OK; +#else + /* We currently do not support Unix sockets for Windows. */ + /* TODO(m): https://devblogs.microsoft.com/commandline/af_unix-comes-to-windows/ */ + errno = EPROTONOSUPPORT; + return REDIS_ERR; +#endif /* _WIN32 */ +oom: + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); + return REDIS_ERR; +} diff --git a/ext/hiredis-1.0.2/net.h b/ext/hiredis-1.0.2/net.h new file mode 100644 index 000000000..9f43283a5 --- /dev/null +++ b/ext/hiredis-1.0.2/net.h @@ -0,0 +1,56 @@ +/* Extracted from anet.c to work properly with Hiredis error reporting. + * + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2014, Pieter Noordhuis + * Copyright (c) 2015, Matt Stancliff , + * Jan-Erik Rediger + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __NET_H +#define __NET_H + +#include "hiredis.h" + +void redisNetClose(redisContext *c); +ssize_t redisNetRead(redisContext *c, char *buf, size_t bufcap); +ssize_t redisNetWrite(redisContext *c); + +int redisCheckSocketError(redisContext *c); +int redisContextSetTimeout(redisContext *c, const struct timeval tv); +int redisContextConnectTcp(redisContext *c, const char *addr, int port, const struct timeval *timeout); +int redisContextConnectBindTcp(redisContext *c, const char *addr, int port, + const struct timeval *timeout, + const char *source_addr); +int redisContextConnectUnix(redisContext *c, const char *path, const struct timeval *timeout); +int redisKeepAlive(redisContext *c, int interval); +int redisCheckConnectDone(redisContext *c, int *completed); + +int redisSetTcpNoDelay(redisContext *c); + +#endif diff --git a/ext/hiredis-1.0.2/read.c b/ext/hiredis-1.0.2/read.c new file mode 100644 index 000000000..09524692b --- /dev/null +++ b/ext/hiredis-1.0.2/read.c @@ -0,0 +1,739 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include +#include +#ifndef _MSC_VER +#include +#include +#endif +#include +#include +#include +#include +#include + +#include "alloc.h" +#include "read.h" +#include "sds.h" +#include "win32.h" + +/* Initial size of our nested reply stack and how much we grow it when needd */ +#define REDIS_READER_STACK_SIZE 9 + +static void __redisReaderSetError(redisReader *r, int type, const char *str) { + size_t len; + + if (r->reply != NULL && r->fn && r->fn->freeObject) { + r->fn->freeObject(r->reply); + r->reply = NULL; + } + + /* Clear input buffer on errors. */ + sdsfree(r->buf); + r->buf = NULL; + r->pos = r->len = 0; + + /* Reset task stack. */ + r->ridx = -1; + + /* Set error. */ + r->err = type; + len = strlen(str); + len = len < (sizeof(r->errstr)-1) ? len : (sizeof(r->errstr)-1); + memcpy(r->errstr,str,len); + r->errstr[len] = '\0'; +} + +static size_t chrtos(char *buf, size_t size, char byte) { + size_t len = 0; + + switch(byte) { + case '\\': + case '"': + len = snprintf(buf,size,"\"\\%c\"",byte); + break; + case '\n': len = snprintf(buf,size,"\"\\n\""); break; + case '\r': len = snprintf(buf,size,"\"\\r\""); break; + case '\t': len = snprintf(buf,size,"\"\\t\""); break; + case '\a': len = snprintf(buf,size,"\"\\a\""); break; + case '\b': len = snprintf(buf,size,"\"\\b\""); break; + default: + if (isprint(byte)) + len = snprintf(buf,size,"\"%c\"",byte); + else + len = snprintf(buf,size,"\"\\x%02x\"",(unsigned char)byte); + break; + } + + return len; +} + +static void __redisReaderSetErrorProtocolByte(redisReader *r, char byte) { + char cbuf[8], sbuf[128]; + + chrtos(cbuf,sizeof(cbuf),byte); + snprintf(sbuf,sizeof(sbuf), + "Protocol error, got %s as reply type byte", cbuf); + __redisReaderSetError(r,REDIS_ERR_PROTOCOL,sbuf); +} + +static void __redisReaderSetErrorOOM(redisReader *r) { + __redisReaderSetError(r,REDIS_ERR_OOM,"Out of memory"); +} + +static char *readBytes(redisReader *r, unsigned int bytes) { + char *p; + if (r->len-r->pos >= bytes) { + p = r->buf+r->pos; + r->pos += bytes; + return p; + } + return NULL; +} + +/* Find pointer to \r\n. */ +static char *seekNewline(char *s, size_t len) { + int pos = 0; + int _len = len-1; + + /* Position should be < len-1 because the character at "pos" should be + * followed by a \n. Note that strchr cannot be used because it doesn't + * allow to search a limited length and the buffer that is being searched + * might not have a trailing NULL character. */ + while (pos < _len) { + while(pos < _len && s[pos] != '\r') pos++; + if (pos==_len) { + /* Not found. */ + return NULL; + } else { + if (s[pos+1] == '\n') { + /* Found. */ + return s+pos; + } else { + /* Continue searching. */ + pos++; + } + } + } + return NULL; +} + +/* Convert a string into a long long. Returns REDIS_OK if the string could be + * parsed into a (non-overflowing) long long, REDIS_ERR otherwise. The value + * will be set to the parsed value when appropriate. + * + * Note that this function demands that the string strictly represents + * a long long: no spaces or other characters before or after the string + * representing the number are accepted, nor zeroes at the start if not + * for the string "0" representing the zero number. + * + * Because of its strictness, it is safe to use this function to check if + * you can convert a string into a long long, and obtain back the string + * from the number without any loss in the string representation. */ +static int string2ll(const char *s, size_t slen, long long *value) { + const char *p = s; + size_t plen = 0; + int negative = 0; + unsigned long long v; + + if (plen == slen) + return REDIS_ERR; + + /* Special case: first and only digit is 0. */ + if (slen == 1 && p[0] == '0') { + if (value != NULL) *value = 0; + return REDIS_OK; + } + + if (p[0] == '-') { + negative = 1; + p++; plen++; + + /* Abort on only a negative sign. */ + if (plen == slen) + return REDIS_ERR; + } + + /* First digit should be 1-9, otherwise the string should just be 0. */ + if (p[0] >= '1' && p[0] <= '9') { + v = p[0]-'0'; + p++; plen++; + } else if (p[0] == '0' && slen == 1) { + *value = 0; + return REDIS_OK; + } else { + return REDIS_ERR; + } + + while (plen < slen && p[0] >= '0' && p[0] <= '9') { + if (v > (ULLONG_MAX / 10)) /* Overflow. */ + return REDIS_ERR; + v *= 10; + + if (v > (ULLONG_MAX - (p[0]-'0'))) /* Overflow. */ + return REDIS_ERR; + v += p[0]-'0'; + + p++; plen++; + } + + /* Return if not all bytes were used. */ + if (plen < slen) + return REDIS_ERR; + + if (negative) { + if (v > ((unsigned long long)(-(LLONG_MIN+1))+1)) /* Overflow. */ + return REDIS_ERR; + if (value != NULL) *value = -v; + } else { + if (v > LLONG_MAX) /* Overflow. */ + return REDIS_ERR; + if (value != NULL) *value = v; + } + return REDIS_OK; +} + +static char *readLine(redisReader *r, int *_len) { + char *p, *s; + int len; + + p = r->buf+r->pos; + s = seekNewline(p,(r->len-r->pos)); + if (s != NULL) { + len = s-(r->buf+r->pos); + r->pos += len+2; /* skip \r\n */ + if (_len) *_len = len; + return p; + } + return NULL; +} + +static void moveToNextTask(redisReader *r) { + redisReadTask *cur, *prv; + while (r->ridx >= 0) { + /* Return a.s.a.p. when the stack is now empty. */ + if (r->ridx == 0) { + r->ridx--; + return; + } + + cur = r->task[r->ridx]; + prv = r->task[r->ridx-1]; + assert(prv->type == REDIS_REPLY_ARRAY || + prv->type == REDIS_REPLY_MAP || + prv->type == REDIS_REPLY_SET || + prv->type == REDIS_REPLY_PUSH); + if (cur->idx == prv->elements-1) { + r->ridx--; + } else { + /* Reset the type because the next item can be anything */ + assert(cur->idx < prv->elements); + cur->type = -1; + cur->elements = -1; + cur->idx++; + return; + } + } +} + +static int processLineItem(redisReader *r) { + redisReadTask *cur = r->task[r->ridx]; + void *obj; + char *p; + int len; + + if ((p = readLine(r,&len)) != NULL) { + if (cur->type == REDIS_REPLY_INTEGER) { + if (r->fn && r->fn->createInteger) { + long long v; + if (string2ll(p, len, &v) == REDIS_ERR) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad integer value"); + return REDIS_ERR; + } + obj = r->fn->createInteger(cur,v); + } else { + obj = (void*)REDIS_REPLY_INTEGER; + } + } else if (cur->type == REDIS_REPLY_DOUBLE) { + if (r->fn && r->fn->createDouble) { + char buf[326], *eptr; + double d; + + if ((size_t)len >= sizeof(buf)) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Double value is too large"); + return REDIS_ERR; + } + + memcpy(buf,p,len); + buf[len] = '\0'; + + if (strcasecmp(buf,",inf") == 0) { + d = INFINITY; /* Positive infinite. */ + } else if (strcasecmp(buf,",-inf") == 0) { + d = -INFINITY; /* Negative infinite. */ + } else { + d = strtod((char*)buf,&eptr); + if (buf[0] == '\0' || eptr[0] != '\0' || isnan(d)) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad double value"); + return REDIS_ERR; + } + } + obj = r->fn->createDouble(cur,d,buf,len); + } else { + obj = (void*)REDIS_REPLY_DOUBLE; + } + } else if (cur->type == REDIS_REPLY_NIL) { + if (r->fn && r->fn->createNil) + obj = r->fn->createNil(cur); + else + obj = (void*)REDIS_REPLY_NIL; + } else if (cur->type == REDIS_REPLY_BOOL) { + int bval = p[0] == 't' || p[0] == 'T'; + if (r->fn && r->fn->createBool) + obj = r->fn->createBool(cur,bval); + else + obj = (void*)REDIS_REPLY_BOOL; + } else { + /* Type will be error or status. */ + if (r->fn && r->fn->createString) + obj = r->fn->createString(cur,p,len); + else + obj = (void*)(size_t)(cur->type); + } + + if (obj == NULL) { + __redisReaderSetErrorOOM(r); + return REDIS_ERR; + } + + /* Set reply if this is the root object. */ + if (r->ridx == 0) r->reply = obj; + moveToNextTask(r); + return REDIS_OK; + } + + return REDIS_ERR; +} + +static int processBulkItem(redisReader *r) { + redisReadTask *cur = r->task[r->ridx]; + void *obj = NULL; + char *p, *s; + long long len; + unsigned long bytelen; + int success = 0; + + p = r->buf+r->pos; + s = seekNewline(p,r->len-r->pos); + if (s != NULL) { + p = r->buf+r->pos; + bytelen = s-(r->buf+r->pos)+2; /* include \r\n */ + + if (string2ll(p, bytelen - 2, &len) == REDIS_ERR) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad bulk string length"); + return REDIS_ERR; + } + + if (len < -1 || (LLONG_MAX > SIZE_MAX && len > (long long)SIZE_MAX)) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bulk string length out of range"); + return REDIS_ERR; + } + + if (len == -1) { + /* The nil object can always be created. */ + if (r->fn && r->fn->createNil) + obj = r->fn->createNil(cur); + else + obj = (void*)REDIS_REPLY_NIL; + success = 1; + } else { + /* Only continue when the buffer contains the entire bulk item. */ + bytelen += len+2; /* include \r\n */ + if (r->pos+bytelen <= r->len) { + if ((cur->type == REDIS_REPLY_VERB && len < 4) || + (cur->type == REDIS_REPLY_VERB && s[5] != ':')) + { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Verbatim string 4 bytes of content type are " + "missing or incorrectly encoded."); + return REDIS_ERR; + } + if (r->fn && r->fn->createString) + obj = r->fn->createString(cur,s+2,len); + else + obj = (void*)(long)cur->type; + success = 1; + } + } + + /* Proceed when obj was created. */ + if (success) { + if (obj == NULL) { + __redisReaderSetErrorOOM(r); + return REDIS_ERR; + } + + r->pos += bytelen; + + /* Set reply if this is the root object. */ + if (r->ridx == 0) r->reply = obj; + moveToNextTask(r); + return REDIS_OK; + } + } + + return REDIS_ERR; +} + +static int redisReaderGrow(redisReader *r) { + redisReadTask **aux; + int newlen; + + /* Grow our stack size */ + newlen = r->tasks + REDIS_READER_STACK_SIZE; + aux = hi_realloc(r->task, sizeof(*r->task) * newlen); + if (aux == NULL) + goto oom; + + r->task = aux; + + /* Allocate new tasks */ + for (; r->tasks < newlen; r->tasks++) { + r->task[r->tasks] = hi_calloc(1, sizeof(**r->task)); + if (r->task[r->tasks] == NULL) + goto oom; + } + + return REDIS_OK; +oom: + __redisReaderSetErrorOOM(r); + return REDIS_ERR; +} + +/* Process the array, map and set types. */ +static int processAggregateItem(redisReader *r) { + redisReadTask *cur = r->task[r->ridx]; + void *obj; + char *p; + long long elements; + int root = 0, len; + + /* Set error for nested multi bulks with depth > 7 */ + if (r->ridx == r->tasks - 1) { + if (redisReaderGrow(r) == REDIS_ERR) + return REDIS_ERR; + } + + if ((p = readLine(r,&len)) != NULL) { + if (string2ll(p, len, &elements) == REDIS_ERR) { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Bad multi-bulk length"); + return REDIS_ERR; + } + + root = (r->ridx == 0); + + if (elements < -1 || (LLONG_MAX > SIZE_MAX && elements > SIZE_MAX) || + (r->maxelements > 0 && elements > r->maxelements)) + { + __redisReaderSetError(r,REDIS_ERR_PROTOCOL, + "Multi-bulk length out of range"); + return REDIS_ERR; + } + + if (elements == -1) { + if (r->fn && r->fn->createNil) + obj = r->fn->createNil(cur); + else + obj = (void*)REDIS_REPLY_NIL; + + if (obj == NULL) { + __redisReaderSetErrorOOM(r); + return REDIS_ERR; + } + + moveToNextTask(r); + } else { + if (cur->type == REDIS_REPLY_MAP) elements *= 2; + + if (r->fn && r->fn->createArray) + obj = r->fn->createArray(cur,elements); + else + obj = (void*)(long)cur->type; + + if (obj == NULL) { + __redisReaderSetErrorOOM(r); + return REDIS_ERR; + } + + /* Modify task stack when there are more than 0 elements. */ + if (elements > 0) { + cur->elements = elements; + cur->obj = obj; + r->ridx++; + r->task[r->ridx]->type = -1; + r->task[r->ridx]->elements = -1; + r->task[r->ridx]->idx = 0; + r->task[r->ridx]->obj = NULL; + r->task[r->ridx]->parent = cur; + r->task[r->ridx]->privdata = r->privdata; + } else { + moveToNextTask(r); + } + } + + /* Set reply if this is the root object. */ + if (root) r->reply = obj; + return REDIS_OK; + } + + return REDIS_ERR; +} + +static int processItem(redisReader *r) { + redisReadTask *cur = r->task[r->ridx]; + char *p; + + /* check if we need to read type */ + if (cur->type < 0) { + if ((p = readBytes(r,1)) != NULL) { + switch (p[0]) { + case '-': + cur->type = REDIS_REPLY_ERROR; + break; + case '+': + cur->type = REDIS_REPLY_STATUS; + break; + case ':': + cur->type = REDIS_REPLY_INTEGER; + break; + case ',': + cur->type = REDIS_REPLY_DOUBLE; + break; + case '_': + cur->type = REDIS_REPLY_NIL; + break; + case '$': + cur->type = REDIS_REPLY_STRING; + break; + case '*': + cur->type = REDIS_REPLY_ARRAY; + break; + case '%': + cur->type = REDIS_REPLY_MAP; + break; + case '~': + cur->type = REDIS_REPLY_SET; + break; + case '#': + cur->type = REDIS_REPLY_BOOL; + break; + case '=': + cur->type = REDIS_REPLY_VERB; + break; + case '>': + cur->type = REDIS_REPLY_PUSH; + break; + default: + __redisReaderSetErrorProtocolByte(r,*p); + return REDIS_ERR; + } + } else { + /* could not consume 1 byte */ + return REDIS_ERR; + } + } + + /* process typed item */ + switch(cur->type) { + case REDIS_REPLY_ERROR: + case REDIS_REPLY_STATUS: + case REDIS_REPLY_INTEGER: + case REDIS_REPLY_DOUBLE: + case REDIS_REPLY_NIL: + case REDIS_REPLY_BOOL: + return processLineItem(r); + case REDIS_REPLY_STRING: + case REDIS_REPLY_VERB: + return processBulkItem(r); + case REDIS_REPLY_ARRAY: + case REDIS_REPLY_MAP: + case REDIS_REPLY_SET: + case REDIS_REPLY_PUSH: + return processAggregateItem(r); + default: + assert(NULL); + return REDIS_ERR; /* Avoid warning. */ + } +} + +redisReader *redisReaderCreateWithFunctions(redisReplyObjectFunctions *fn) { + redisReader *r; + + r = hi_calloc(1,sizeof(redisReader)); + if (r == NULL) + return NULL; + + r->buf = sdsempty(); + if (r->buf == NULL) + goto oom; + + r->task = hi_calloc(REDIS_READER_STACK_SIZE, sizeof(*r->task)); + if (r->task == NULL) + goto oom; + + for (; r->tasks < REDIS_READER_STACK_SIZE; r->tasks++) { + r->task[r->tasks] = hi_calloc(1, sizeof(**r->task)); + if (r->task[r->tasks] == NULL) + goto oom; + } + + r->fn = fn; + r->maxbuf = REDIS_READER_MAX_BUF; + r->maxelements = REDIS_READER_MAX_ARRAY_ELEMENTS; + r->ridx = -1; + + return r; +oom: + redisReaderFree(r); + return NULL; +} + +void redisReaderFree(redisReader *r) { + if (r == NULL) + return; + + if (r->reply != NULL && r->fn && r->fn->freeObject) + r->fn->freeObject(r->reply); + + if (r->task) { + /* We know r->task[i] is allocated if i < r->tasks */ + for (int i = 0; i < r->tasks; i++) { + hi_free(r->task[i]); + } + + hi_free(r->task); + } + + sdsfree(r->buf); + hi_free(r); +} + +int redisReaderFeed(redisReader *r, const char *buf, size_t len) { + sds newbuf; + + /* Return early when this reader is in an erroneous state. */ + if (r->err) + return REDIS_ERR; + + /* Copy the provided buffer. */ + if (buf != NULL && len >= 1) { + /* Destroy internal buffer when it is empty and is quite large. */ + if (r->len == 0 && r->maxbuf != 0 && sdsavail(r->buf) > r->maxbuf) { + sdsfree(r->buf); + r->buf = sdsempty(); + if (r->buf == 0) goto oom; + + r->pos = 0; + } + + newbuf = sdscatlen(r->buf,buf,len); + if (newbuf == NULL) goto oom; + + r->buf = newbuf; + r->len = sdslen(r->buf); + } + + return REDIS_OK; +oom: + __redisReaderSetErrorOOM(r); + return REDIS_ERR; +} + +int redisReaderGetReply(redisReader *r, void **reply) { + /* Default target pointer to NULL. */ + if (reply != NULL) + *reply = NULL; + + /* Return early when this reader is in an erroneous state. */ + if (r->err) + return REDIS_ERR; + + /* When the buffer is empty, there will never be a reply. */ + if (r->len == 0) + return REDIS_OK; + + /* Set first item to process when the stack is empty. */ + if (r->ridx == -1) { + r->task[0]->type = -1; + r->task[0]->elements = -1; + r->task[0]->idx = -1; + r->task[0]->obj = NULL; + r->task[0]->parent = NULL; + r->task[0]->privdata = r->privdata; + r->ridx = 0; + } + + /* Process items in reply. */ + while (r->ridx >= 0) + if (processItem(r) != REDIS_OK) + break; + + /* Return ASAP when an error occurred. */ + if (r->err) + return REDIS_ERR; + + /* Discard part of the buffer when we've consumed at least 1k, to avoid + * doing unnecessary calls to memmove() in sds.c. */ + if (r->pos >= 1024) { + if (sdsrange(r->buf,r->pos,-1) < 0) return REDIS_ERR; + r->pos = 0; + r->len = sdslen(r->buf); + } + + /* Emit a reply when there is one. */ + if (r->ridx == -1) { + if (reply != NULL) { + *reply = r->reply; + } else if (r->reply != NULL && r->fn && r->fn->freeObject) { + r->fn->freeObject(r->reply); + } + r->reply = NULL; + } + return REDIS_OK; +} diff --git a/ext/hiredis-1.0.2/read.h b/ext/hiredis-1.0.2/read.h new file mode 100644 index 000000000..2d74d77a5 --- /dev/null +++ b/ext/hiredis-1.0.2/read.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#ifndef __HIREDIS_READ_H +#define __HIREDIS_READ_H +#include /* for size_t */ + +#define REDIS_ERR -1 +#define REDIS_OK 0 + +/* When an error occurs, the err flag in a context is set to hold the type of + * error that occurred. REDIS_ERR_IO means there was an I/O error and you + * should use the "errno" variable to find out what is wrong. + * For other values, the "errstr" field will hold a description. */ +#define REDIS_ERR_IO 1 /* Error in read or write */ +#define REDIS_ERR_EOF 3 /* End of file */ +#define REDIS_ERR_PROTOCOL 4 /* Protocol error */ +#define REDIS_ERR_OOM 5 /* Out of memory */ +#define REDIS_ERR_TIMEOUT 6 /* Timed out */ +#define REDIS_ERR_OTHER 2 /* Everything else... */ + +#define REDIS_REPLY_STRING 1 +#define REDIS_REPLY_ARRAY 2 +#define REDIS_REPLY_INTEGER 3 +#define REDIS_REPLY_NIL 4 +#define REDIS_REPLY_STATUS 5 +#define REDIS_REPLY_ERROR 6 +#define REDIS_REPLY_DOUBLE 7 +#define REDIS_REPLY_BOOL 8 +#define REDIS_REPLY_MAP 9 +#define REDIS_REPLY_SET 10 +#define REDIS_REPLY_ATTR 11 +#define REDIS_REPLY_PUSH 12 +#define REDIS_REPLY_BIGNUM 13 +#define REDIS_REPLY_VERB 14 + +/* Default max unused reader buffer. */ +#define REDIS_READER_MAX_BUF (1024*16) + +/* Default multi-bulk element limit */ +#define REDIS_READER_MAX_ARRAY_ELEMENTS ((1LL<<32) - 1) + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct redisReadTask { + int type; + long long elements; /* number of elements in multibulk container */ + int idx; /* index in parent (array) object */ + void *obj; /* holds user-generated value for a read task */ + struct redisReadTask *parent; /* parent task */ + void *privdata; /* user-settable arbitrary field */ +} redisReadTask; + +typedef struct redisReplyObjectFunctions { + void *(*createString)(const redisReadTask*, char*, size_t); + void *(*createArray)(const redisReadTask*, size_t); + void *(*createInteger)(const redisReadTask*, long long); + void *(*createDouble)(const redisReadTask*, double, char*, size_t); + void *(*createNil)(const redisReadTask*); + void *(*createBool)(const redisReadTask*, int); + void (*freeObject)(void*); +} redisReplyObjectFunctions; + +typedef struct redisReader { + int err; /* Error flags, 0 when there is no error */ + char errstr[128]; /* String representation of error when applicable */ + + char *buf; /* Read buffer */ + size_t pos; /* Buffer cursor */ + size_t len; /* Buffer length */ + size_t maxbuf; /* Max length of unused buffer */ + long long maxelements; /* Max multi-bulk elements */ + + redisReadTask **task; + int tasks; + + int ridx; /* Index of current read task */ + void *reply; /* Temporary reply pointer */ + + redisReplyObjectFunctions *fn; + void *privdata; +} redisReader; + +/* Public API for the protocol parser. */ +redisReader *redisReaderCreateWithFunctions(redisReplyObjectFunctions *fn); +void redisReaderFree(redisReader *r); +int redisReaderFeed(redisReader *r, const char *buf, size_t len); +int redisReaderGetReply(redisReader *r, void **reply); + +#define redisReaderSetPrivdata(_r, _p) (int)(((redisReader*)(_r))->privdata = (_p)) +#define redisReaderGetObject(_r) (((redisReader*)(_r))->reply) +#define redisReaderGetError(_r) (((redisReader*)(_r))->errstr) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/ext/hiredis-1.0.2/sds.c b/ext/hiredis-1.0.2/sds.c new file mode 100644 index 000000000..49d2096b7 --- /dev/null +++ b/ext/hiredis-1.0.2/sds.c @@ -0,0 +1,1289 @@ +/* SDSLib 2.0 -- A C dynamic strings library + * + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra + * Copyright (c) 2015, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "fmacros.h" +#include +#include +#include +#include +#include +#include +#include "sds.h" +#include "sdsalloc.h" + +static inline int sdsHdrSize(char type) { + switch(type&SDS_TYPE_MASK) { + case SDS_TYPE_5: + return sizeof(struct sdshdr5); + case SDS_TYPE_8: + return sizeof(struct sdshdr8); + case SDS_TYPE_16: + return sizeof(struct sdshdr16); + case SDS_TYPE_32: + return sizeof(struct sdshdr32); + case SDS_TYPE_64: + return sizeof(struct sdshdr64); + } + return 0; +} + +static inline char sdsReqType(size_t string_size) { + if (string_size < 32) + return SDS_TYPE_5; + if (string_size < 0xff) + return SDS_TYPE_8; + if (string_size < 0xffff) + return SDS_TYPE_16; + if (string_size < 0xffffffff) + return SDS_TYPE_32; + return SDS_TYPE_64; +} + +/* Create a new sds string with the content specified by the 'init' pointer + * and 'initlen'. + * If NULL is used for 'init' the string is initialized with zero bytes. + * + * The string is always null-termined (all the sds strings are, always) so + * even if you create an sds string with: + * + * mystring = sdsnewlen("abc",3); + * + * You can print the string with printf() as there is an implicit \0 at the + * end of the string. However the string is binary safe and can contain + * \0 characters in the middle, as the length is stored in the sds header. */ +sds sdsnewlen(const void *init, size_t initlen) { + void *sh; + sds s; + char type = sdsReqType(initlen); + /* Empty strings are usually created in order to append. Use type 8 + * since type 5 is not good at this. */ + if (type == SDS_TYPE_5 && initlen == 0) type = SDS_TYPE_8; + int hdrlen = sdsHdrSize(type); + unsigned char *fp; /* flags pointer. */ + + sh = s_malloc(hdrlen+initlen+1); + if (sh == NULL) return NULL; + if (!init) + memset(sh, 0, hdrlen+initlen+1); + s = (char*)sh+hdrlen; + fp = ((unsigned char*)s)-1; + switch(type) { + case SDS_TYPE_5: { + *fp = type | (initlen << SDS_TYPE_BITS); + break; + } + case SDS_TYPE_8: { + SDS_HDR_VAR(8,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + case SDS_TYPE_16: { + SDS_HDR_VAR(16,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + case SDS_TYPE_32: { + SDS_HDR_VAR(32,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + case SDS_TYPE_64: { + SDS_HDR_VAR(64,s); + sh->len = initlen; + sh->alloc = initlen; + *fp = type; + break; + } + } + if (initlen && init) + memcpy(s, init, initlen); + s[initlen] = '\0'; + return s; +} + +/* Create an empty (zero length) sds string. Even in this case the string + * always has an implicit null term. */ +sds sdsempty(void) { + return sdsnewlen("",0); +} + +/* Create a new sds string starting from a null terminated C string. */ +sds sdsnew(const char *init) { + size_t initlen = (init == NULL) ? 0 : strlen(init); + return sdsnewlen(init, initlen); +} + +/* Duplicate an sds string. */ +sds sdsdup(const sds s) { + return sdsnewlen(s, sdslen(s)); +} + +/* Free an sds string. No operation is performed if 's' is NULL. */ +void sdsfree(sds s) { + if (s == NULL) return; + s_free((char*)s-sdsHdrSize(s[-1])); +} + +/* Set the sds string length to the length as obtained with strlen(), so + * considering as content only up to the first null term character. + * + * This function is useful when the sds string is hacked manually in some + * way, like in the following example: + * + * s = sdsnew("foobar"); + * s[2] = '\0'; + * sdsupdatelen(s); + * printf("%d\n", sdslen(s)); + * + * The output will be "2", but if we comment out the call to sdsupdatelen() + * the output will be "6" as the string was modified but the logical length + * remains 6 bytes. */ +void sdsupdatelen(sds s) { + int reallen = strlen(s); + sdssetlen(s, reallen); +} + +/* Modify an sds string in-place to make it empty (zero length). + * However all the existing buffer is not discarded but set as free space + * so that next append operations will not require allocations up to the + * number of bytes previously available. */ +void sdsclear(sds s) { + sdssetlen(s, 0); + s[0] = '\0'; +} + +/* Enlarge the free space at the end of the sds string so that the caller + * is sure that after calling this function can overwrite up to addlen + * bytes after the end of the string, plus one more byte for nul term. + * + * Note: this does not change the *length* of the sds string as returned + * by sdslen(), but only the free buffer space we have. */ +sds sdsMakeRoomFor(sds s, size_t addlen) { + void *sh, *newsh; + size_t avail = sdsavail(s); + size_t len, newlen; + char type, oldtype = s[-1] & SDS_TYPE_MASK; + int hdrlen; + + /* Return ASAP if there is enough space left. */ + if (avail >= addlen) return s; + + len = sdslen(s); + sh = (char*)s-sdsHdrSize(oldtype); + newlen = (len+addlen); + if (newlen < SDS_MAX_PREALLOC) + newlen *= 2; + else + newlen += SDS_MAX_PREALLOC; + + type = sdsReqType(newlen); + + /* Don't use type 5: the user is appending to the string and type 5 is + * not able to remember empty space, so sdsMakeRoomFor() must be called + * at every appending operation. */ + if (type == SDS_TYPE_5) type = SDS_TYPE_8; + + hdrlen = sdsHdrSize(type); + if (oldtype==type) { + newsh = s_realloc(sh, hdrlen+newlen+1); + if (newsh == NULL) return NULL; + s = (char*)newsh+hdrlen; + } else { + /* Since the header size changes, need to move the string forward, + * and can't use realloc */ + newsh = s_malloc(hdrlen+newlen+1); + if (newsh == NULL) return NULL; + memcpy((char*)newsh+hdrlen, s, len+1); + s_free(sh); + s = (char*)newsh+hdrlen; + s[-1] = type; + sdssetlen(s, len); + } + sdssetalloc(s, newlen); + return s; +} + +/* Reallocate the sds string so that it has no free space at the end. The + * contained string remains not altered, but next concatenation operations + * will require a reallocation. + * + * After the call, the passed sds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. */ +sds sdsRemoveFreeSpace(sds s) { + void *sh, *newsh; + char type, oldtype = s[-1] & SDS_TYPE_MASK; + int hdrlen; + size_t len = sdslen(s); + sh = (char*)s-sdsHdrSize(oldtype); + + type = sdsReqType(len); + hdrlen = sdsHdrSize(type); + if (oldtype==type) { + newsh = s_realloc(sh, hdrlen+len+1); + if (newsh == NULL) return NULL; + s = (char*)newsh+hdrlen; + } else { + newsh = s_malloc(hdrlen+len+1); + if (newsh == NULL) return NULL; + memcpy((char*)newsh+hdrlen, s, len+1); + s_free(sh); + s = (char*)newsh+hdrlen; + s[-1] = type; + sdssetlen(s, len); + } + sdssetalloc(s, len); + return s; +} + +/* Return the total size of the allocation of the specifed sds string, + * including: + * 1) The sds header before the pointer. + * 2) The string. + * 3) The free buffer at the end if any. + * 4) The implicit null term. + */ +size_t sdsAllocSize(sds s) { + size_t alloc = sdsalloc(s); + return sdsHdrSize(s[-1])+alloc+1; +} + +/* Return the pointer of the actual SDS allocation (normally SDS strings + * are referenced by the start of the string buffer). */ +void *sdsAllocPtr(sds s) { + return (void*) (s-sdsHdrSize(s[-1])); +} + +/* Increment the sds length and decrements the left free space at the + * end of the string according to 'incr'. Also set the null term + * in the new end of the string. + * + * This function is used in order to fix the string length after the + * user calls sdsMakeRoomFor(), writes something after the end of + * the current string, and finally needs to set the new length. + * + * Note: it is possible to use a negative increment in order to + * right-trim the string. + * + * Usage example: + * + * Using sdsIncrLen() and sdsMakeRoomFor() it is possible to mount the + * following schema, to cat bytes coming from the kernel to the end of an + * sds string without copying into an intermediate buffer: + * + * oldlen = sdslen(s); + * s = sdsMakeRoomFor(s, BUFFER_SIZE); + * nread = read(fd, s+oldlen, BUFFER_SIZE); + * ... check for nread <= 0 and handle it ... + * sdsIncrLen(s, nread); + */ +void sdsIncrLen(sds s, int incr) { + unsigned char flags = s[-1]; + size_t len; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: { + unsigned char *fp = ((unsigned char*)s)-1; + unsigned char oldlen = SDS_TYPE_5_LEN(flags); + assert((incr > 0 && oldlen+incr < 32) || (incr < 0 && oldlen >= (unsigned int)(-incr))); + *fp = SDS_TYPE_5 | ((oldlen+incr) << SDS_TYPE_BITS); + len = oldlen+incr; + break; + } + case SDS_TYPE_8: { + SDS_HDR_VAR(8,s); + assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case SDS_TYPE_16: { + SDS_HDR_VAR(16,s); + assert((incr >= 0 && sh->alloc-sh->len >= incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case SDS_TYPE_32: { + SDS_HDR_VAR(32,s); + assert((incr >= 0 && sh->alloc-sh->len >= (unsigned int)incr) || (incr < 0 && sh->len >= (unsigned int)(-incr))); + len = (sh->len += incr); + break; + } + case SDS_TYPE_64: { + SDS_HDR_VAR(64,s); + assert((incr >= 0 && sh->alloc-sh->len >= (uint64_t)incr) || (incr < 0 && sh->len >= (uint64_t)(-incr))); + len = (sh->len += incr); + break; + } + default: len = 0; /* Just to avoid compilation warnings. */ + } + s[len] = '\0'; +} + +/* Grow the sds to have the specified length. Bytes that were not part of + * the original length of the sds will be set to zero. + * + * if the specified length is smaller than the current length, no operation + * is performed. */ +sds sdsgrowzero(sds s, size_t len) { + size_t curlen = sdslen(s); + + if (len <= curlen) return s; + s = sdsMakeRoomFor(s,len-curlen); + if (s == NULL) return NULL; + + /* Make sure added region doesn't contain garbage */ + memset(s+curlen,0,(len-curlen+1)); /* also set trailing \0 byte */ + sdssetlen(s, len); + return s; +} + +/* Append the specified binary-safe string pointed by 't' of 'len' bytes to the + * end of the specified sds string 's'. + * + * After the call, the passed sds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. */ +sds sdscatlen(sds s, const void *t, size_t len) { + size_t curlen = sdslen(s); + + s = sdsMakeRoomFor(s,len); + if (s == NULL) return NULL; + memcpy(s+curlen, t, len); + sdssetlen(s, curlen+len); + s[curlen+len] = '\0'; + return s; +} + +/* Append the specified null termianted C string to the sds string 's'. + * + * After the call, the passed sds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. */ +sds sdscat(sds s, const char *t) { + return sdscatlen(s, t, strlen(t)); +} + +/* Append the specified sds 't' to the existing sds 's'. + * + * After the call, the modified sds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. */ +sds sdscatsds(sds s, const sds t) { + return sdscatlen(s, t, sdslen(t)); +} + +/* Destructively modify the sds string 's' to hold the specified binary + * safe string pointed by 't' of length 'len' bytes. */ +sds sdscpylen(sds s, const char *t, size_t len) { + if (sdsalloc(s) < len) { + s = sdsMakeRoomFor(s,len-sdslen(s)); + if (s == NULL) return NULL; + } + memcpy(s, t, len); + s[len] = '\0'; + sdssetlen(s, len); + return s; +} + +/* Like sdscpylen() but 't' must be a null-termined string so that the length + * of the string is obtained with strlen(). */ +sds sdscpy(sds s, const char *t) { + return sdscpylen(s, t, strlen(t)); +} + +/* Helper for sdscatlonglong() doing the actual number -> string + * conversion. 's' must point to a string with room for at least + * SDS_LLSTR_SIZE bytes. + * + * The function returns the length of the null-terminated string + * representation stored at 's'. */ +#define SDS_LLSTR_SIZE 21 +int sdsll2str(char *s, long long value) { + char *p, aux; + unsigned long long v; + size_t l; + + /* Generate the string representation, this method produces + * an reversed string. */ + v = (value < 0) ? -value : value; + p = s; + do { + *p++ = '0'+(v%10); + v /= 10; + } while(v); + if (value < 0) *p++ = '-'; + + /* Compute length and add null term. */ + l = p-s; + *p = '\0'; + + /* Reverse the string. */ + p--; + while(s < p) { + aux = *s; + *s = *p; + *p = aux; + s++; + p--; + } + return l; +} + +/* Identical sdsll2str(), but for unsigned long long type. */ +int sdsull2str(char *s, unsigned long long v) { + char *p, aux; + size_t l; + + /* Generate the string representation, this method produces + * an reversed string. */ + p = s; + do { + *p++ = '0'+(v%10); + v /= 10; + } while(v); + + /* Compute length and add null term. */ + l = p-s; + *p = '\0'; + + /* Reverse the string. */ + p--; + while(s < p) { + aux = *s; + *s = *p; + *p = aux; + s++; + p--; + } + return l; +} + +/* Create an sds string from a long long value. It is much faster than: + * + * sdscatprintf(sdsempty(),"%lld\n", value); + */ +sds sdsfromlonglong(long long value) { + char buf[SDS_LLSTR_SIZE]; + int len = sdsll2str(buf,value); + + return sdsnewlen(buf,len); +} + +/* Like sdscatprintf() but gets va_list instead of being variadic. */ +sds sdscatvprintf(sds s, const char *fmt, va_list ap) { + va_list cpy; + char staticbuf[1024], *buf = staticbuf, *t; + size_t buflen = strlen(fmt)*2; + + /* We try to start using a static buffer for speed. + * If not possible we revert to heap allocation. */ + if (buflen > sizeof(staticbuf)) { + buf = s_malloc(buflen); + if (buf == NULL) return NULL; + } else { + buflen = sizeof(staticbuf); + } + + /* Try with buffers two times bigger every time we fail to + * fit the string in the current buffer size. */ + while(1) { + buf[buflen-2] = '\0'; + va_copy(cpy,ap); + vsnprintf(buf, buflen, fmt, cpy); + va_end(cpy); + if (buf[buflen-2] != '\0') { + if (buf != staticbuf) s_free(buf); + buflen *= 2; + buf = s_malloc(buflen); + if (buf == NULL) return NULL; + continue; + } + break; + } + + /* Finally concat the obtained string to the SDS string and return it. */ + t = sdscat(s, buf); + if (buf != staticbuf) s_free(buf); + return t; +} + +/* Append to the sds string 's' a string obtained using printf-alike format + * specifier. + * + * After the call, the modified sds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. + * + * Example: + * + * s = sdsnew("Sum is: "); + * s = sdscatprintf(s,"%d+%d = %d",a,b,a+b). + * + * Often you need to create a string from scratch with the printf-alike + * format. When this is the need, just use sdsempty() as the target string: + * + * s = sdscatprintf(sdsempty(), "... your format ...", args); + */ +sds sdscatprintf(sds s, const char *fmt, ...) { + va_list ap; + char *t; + va_start(ap, fmt); + t = sdscatvprintf(s,fmt,ap); + va_end(ap); + return t; +} + +/* This function is similar to sdscatprintf, but much faster as it does + * not rely on sprintf() family functions implemented by the libc that + * are often very slow. Moreover directly handling the sds string as + * new data is concatenated provides a performance improvement. + * + * However this function only handles an incompatible subset of printf-alike + * format specifiers: + * + * %s - C String + * %S - SDS string + * %i - signed int + * %I - 64 bit signed integer (long long, int64_t) + * %u - unsigned int + * %U - 64 bit unsigned integer (unsigned long long, uint64_t) + * %% - Verbatim "%" character. + */ +sds sdscatfmt(sds s, char const *fmt, ...) { + const char *f = fmt; + int i; + va_list ap; + + va_start(ap,fmt); + i = sdslen(s); /* Position of the next byte to write to dest str. */ + while(*f) { + char next, *str; + size_t l; + long long num; + unsigned long long unum; + + /* Make sure there is always space for at least 1 char. */ + if (sdsavail(s)==0) { + s = sdsMakeRoomFor(s,1); + if (s == NULL) goto fmt_error; + } + + switch(*f) { + case '%': + next = *(f+1); + f++; + switch(next) { + case 's': + case 'S': + str = va_arg(ap,char*); + l = (next == 's') ? strlen(str) : sdslen(str); + if (sdsavail(s) < l) { + s = sdsMakeRoomFor(s,l); + if (s == NULL) goto fmt_error; + } + memcpy(s+i,str,l); + sdsinclen(s,l); + i += l; + break; + case 'i': + case 'I': + if (next == 'i') + num = va_arg(ap,int); + else + num = va_arg(ap,long long); + { + char buf[SDS_LLSTR_SIZE]; + l = sdsll2str(buf,num); + if (sdsavail(s) < l) { + s = sdsMakeRoomFor(s,l); + if (s == NULL) goto fmt_error; + } + memcpy(s+i,buf,l); + sdsinclen(s,l); + i += l; + } + break; + case 'u': + case 'U': + if (next == 'u') + unum = va_arg(ap,unsigned int); + else + unum = va_arg(ap,unsigned long long); + { + char buf[SDS_LLSTR_SIZE]; + l = sdsull2str(buf,unum); + if (sdsavail(s) < l) { + s = sdsMakeRoomFor(s,l); + if (s == NULL) goto fmt_error; + } + memcpy(s+i,buf,l); + sdsinclen(s,l); + i += l; + } + break; + default: /* Handle %% and generally %. */ + s[i++] = next; + sdsinclen(s,1); + break; + } + break; + default: + s[i++] = *f; + sdsinclen(s,1); + break; + } + f++; + } + va_end(ap); + + /* Add null-term */ + s[i] = '\0'; + return s; + +fmt_error: + va_end(ap); + return NULL; +} + +/* Remove the part of the string from left and from right composed just of + * contiguous characters found in 'cset', that is a null terminted C string. + * + * After the call, the modified sds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. + * + * Example: + * + * s = sdsnew("AA...AA.a.aa.aHelloWorld :::"); + * s = sdstrim(s,"Aa. :"); + * printf("%s\n", s); + * + * Output will be just "Hello World". + */ +sds sdstrim(sds s, const char *cset) { + char *start, *end, *sp, *ep; + size_t len; + + sp = start = s; + ep = end = s+sdslen(s)-1; + while(sp <= end && strchr(cset, *sp)) sp++; + while(ep > sp && strchr(cset, *ep)) ep--; + len = (sp > ep) ? 0 : ((ep-sp)+1); + if (s != sp) memmove(s, sp, len); + s[len] = '\0'; + sdssetlen(s,len); + return s; +} + +/* Turn the string into a smaller (or equal) string containing only the + * substring specified by the 'start' and 'end' indexes. + * + * start and end can be negative, where -1 means the last character of the + * string, -2 the penultimate character, and so forth. + * + * The interval is inclusive, so the start and end characters will be part + * of the resulting string. + * + * The string is modified in-place. + * + * Return value: + * -1 (error) if sdslen(s) is larger than maximum positive ssize_t value. + * 0 on success. + * + * Example: + * + * s = sdsnew("Hello World"); + * sdsrange(s,1,-1); => "ello World" + */ +int sdsrange(sds s, ssize_t start, ssize_t end) { + size_t newlen, len = sdslen(s); + if (len > SSIZE_MAX) return -1; + + if (len == 0) return 0; + if (start < 0) { + start = len+start; + if (start < 0) start = 0; + } + if (end < 0) { + end = len+end; + if (end < 0) end = 0; + } + newlen = (start > end) ? 0 : (end-start)+1; + if (newlen != 0) { + if (start >= (ssize_t)len) { + newlen = 0; + } else if (end >= (ssize_t)len) { + end = len-1; + newlen = (start > end) ? 0 : (end-start)+1; + } + } else { + start = 0; + } + if (start && newlen) memmove(s, s+start, newlen); + s[newlen] = 0; + sdssetlen(s,newlen); + return 0; +} + +/* Apply tolower() to every character of the sds string 's'. */ +void sdstolower(sds s) { + int len = sdslen(s), j; + + for (j = 0; j < len; j++) s[j] = tolower(s[j]); +} + +/* Apply toupper() to every character of the sds string 's'. */ +void sdstoupper(sds s) { + int len = sdslen(s), j; + + for (j = 0; j < len; j++) s[j] = toupper(s[j]); +} + +/* Compare two sds strings s1 and s2 with memcmp(). + * + * Return value: + * + * positive if s1 > s2. + * negative if s1 < s2. + * 0 if s1 and s2 are exactly the same binary string. + * + * If two strings share exactly the same prefix, but one of the two has + * additional characters, the longer string is considered to be greater than + * the smaller one. */ +int sdscmp(const sds s1, const sds s2) { + size_t l1, l2, minlen; + int cmp; + + l1 = sdslen(s1); + l2 = sdslen(s2); + minlen = (l1 < l2) ? l1 : l2; + cmp = memcmp(s1,s2,minlen); + if (cmp == 0) return l1-l2; + return cmp; +} + +/* Split 's' with separator in 'sep'. An array + * of sds strings is returned. *count will be set + * by reference to the number of tokens returned. + * + * On out of memory, zero length string, zero length + * separator, NULL is returned. + * + * Note that 'sep' is able to split a string using + * a multi-character separator. For example + * sdssplit("foo_-_bar","_-_"); will return two + * elements "foo" and "bar". + * + * This version of the function is binary-safe but + * requires length arguments. sdssplit() is just the + * same function but for zero-terminated strings. + */ +sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count) { + int elements = 0, slots = 5, start = 0, j; + sds *tokens; + + if (seplen < 1 || len < 0) return NULL; + + tokens = s_malloc(sizeof(sds)*slots); + if (tokens == NULL) return NULL; + + if (len == 0) { + *count = 0; + return tokens; + } + for (j = 0; j < (len-(seplen-1)); j++) { + /* make sure there is room for the next element and the final one */ + if (slots < elements+2) { + sds *newtokens; + + slots *= 2; + newtokens = s_realloc(tokens,sizeof(sds)*slots); + if (newtokens == NULL) goto cleanup; + tokens = newtokens; + } + /* search the separator */ + if ((seplen == 1 && *(s+j) == sep[0]) || (memcmp(s+j,sep,seplen) == 0)) { + tokens[elements] = sdsnewlen(s+start,j-start); + if (tokens[elements] == NULL) goto cleanup; + elements++; + start = j+seplen; + j = j+seplen-1; /* skip the separator */ + } + } + /* Add the final element. We are sure there is room in the tokens array. */ + tokens[elements] = sdsnewlen(s+start,len-start); + if (tokens[elements] == NULL) goto cleanup; + elements++; + *count = elements; + return tokens; + +cleanup: + { + int i; + for (i = 0; i < elements; i++) sdsfree(tokens[i]); + s_free(tokens); + *count = 0; + return NULL; + } +} + +/* Free the result returned by sdssplitlen(), or do nothing if 'tokens' is NULL. */ +void sdsfreesplitres(sds *tokens, int count) { + if (!tokens) return; + while(count--) + sdsfree(tokens[count]); + s_free(tokens); +} + +/* Append to the sds string "s" an escaped string representation where + * all the non-printable characters (tested with isprint()) are turned into + * escapes in the form "\n\r\a...." or "\x". + * + * After the call, the modified sds string is no longer valid and all the + * references must be substituted with the new pointer returned by the call. */ +sds sdscatrepr(sds s, const char *p, size_t len) { + s = sdscatlen(s,"\"",1); + while(len--) { + switch(*p) { + case '\\': + case '"': + s = sdscatprintf(s,"\\%c",*p); + break; + case '\n': s = sdscatlen(s,"\\n",2); break; + case '\r': s = sdscatlen(s,"\\r",2); break; + case '\t': s = sdscatlen(s,"\\t",2); break; + case '\a': s = sdscatlen(s,"\\a",2); break; + case '\b': s = sdscatlen(s,"\\b",2); break; + default: + if (isprint(*p)) + s = sdscatprintf(s,"%c",*p); + else + s = sdscatprintf(s,"\\x%02x",(unsigned char)*p); + break; + } + p++; + } + return sdscatlen(s,"\"",1); +} + +/* Helper function for sdssplitargs() that converts a hex digit into an + * integer from 0 to 15 */ +int hex_digit_to_int(char c) { + switch(c) { + case '0': return 0; + case '1': return 1; + case '2': return 2; + case '3': return 3; + case '4': return 4; + case '5': return 5; + case '6': return 6; + case '7': return 7; + case '8': return 8; + case '9': return 9; + case 'a': case 'A': return 10; + case 'b': case 'B': return 11; + case 'c': case 'C': return 12; + case 'd': case 'D': return 13; + case 'e': case 'E': return 14; + case 'f': case 'F': return 15; + default: return 0; + } +} + +/* Split a line into arguments, where every argument can be in the + * following programming-language REPL-alike form: + * + * foo bar "newline are supported\n" and "\xff\x00otherstuff" + * + * The number of arguments is stored into *argc, and an array + * of sds is returned. + * + * The caller should free the resulting array of sds strings with + * sdsfreesplitres(). + * + * Note that sdscatrepr() is able to convert back a string into + * a quoted string in the same format sdssplitargs() is able to parse. + * + * The function returns the allocated tokens on success, even when the + * input string is empty, or NULL if the input contains unbalanced + * quotes or closed quotes followed by non space characters + * as in: "foo"bar or "foo' + */ +sds *sdssplitargs(const char *line, int *argc) { + const char *p = line; + char *current = NULL; + char **vector = NULL; + + *argc = 0; + while(1) { + /* skip blanks */ + while(*p && isspace(*p)) p++; + if (*p) { + /* get a token */ + int inq=0; /* set to 1 if we are in "quotes" */ + int insq=0; /* set to 1 if we are in 'single quotes' */ + int done=0; + + if (current == NULL) current = sdsempty(); + while(!done) { + if (inq) { + if (*p == '\\' && *(p+1) == 'x' && + isxdigit(*(p+2)) && + isxdigit(*(p+3))) + { + unsigned char byte; + + byte = (hex_digit_to_int(*(p+2))*16)+ + hex_digit_to_int(*(p+3)); + current = sdscatlen(current,(char*)&byte,1); + p += 3; + } else if (*p == '\\' && *(p+1)) { + char c; + + p++; + switch(*p) { + case 'n': c = '\n'; break; + case 'r': c = '\r'; break; + case 't': c = '\t'; break; + case 'b': c = '\b'; break; + case 'a': c = '\a'; break; + default: c = *p; break; + } + current = sdscatlen(current,&c,1); + } else if (*p == '"') { + /* closing quote must be followed by a space or + * nothing at all. */ + if (*(p+1) && !isspace(*(p+1))) goto err; + done=1; + } else if (!*p) { + /* unterminated quotes */ + goto err; + } else { + current = sdscatlen(current,p,1); + } + } else if (insq) { + if (*p == '\\' && *(p+1) == '\'') { + p++; + current = sdscatlen(current,"'",1); + } else if (*p == '\'') { + /* closing quote must be followed by a space or + * nothing at all. */ + if (*(p+1) && !isspace(*(p+1))) goto err; + done=1; + } else if (!*p) { + /* unterminated quotes */ + goto err; + } else { + current = sdscatlen(current,p,1); + } + } else { + switch(*p) { + case ' ': + case '\n': + case '\r': + case '\t': + case '\0': + done=1; + break; + case '"': + inq=1; + break; + case '\'': + insq=1; + break; + default: + current = sdscatlen(current,p,1); + break; + } + } + if (*p) p++; + } + /* add the token to the vector */ + { + char **new_vector = s_realloc(vector,((*argc)+1)*sizeof(char*)); + if (new_vector == NULL) { + s_free(vector); + return NULL; + } + + vector = new_vector; + vector[*argc] = current; + (*argc)++; + current = NULL; + } + } else { + /* Even on empty input string return something not NULL. */ + if (vector == NULL) vector = s_malloc(sizeof(void*)); + return vector; + } + } + +err: + while((*argc)--) + sdsfree(vector[*argc]); + s_free(vector); + if (current) sdsfree(current); + *argc = 0; + return NULL; +} + +/* Modify the string substituting all the occurrences of the set of + * characters specified in the 'from' string to the corresponding character + * in the 'to' array. + * + * For instance: sdsmapchars(mystring, "ho", "01", 2) + * will have the effect of turning the string "hello" into "0ell1". + * + * The function returns the sds string pointer, that is always the same + * as the input pointer since no resize is needed. */ +sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen) { + size_t j, i, l = sdslen(s); + + for (j = 0; j < l; j++) { + for (i = 0; i < setlen; i++) { + if (s[j] == from[i]) { + s[j] = to[i]; + break; + } + } + } + return s; +} + +/* Join an array of C strings using the specified separator (also a C string). + * Returns the result as an sds string. */ +sds sdsjoin(char **argv, int argc, char *sep) { + sds join = sdsempty(); + int j; + + for (j = 0; j < argc; j++) { + join = sdscat(join, argv[j]); + if (j != argc-1) join = sdscat(join,sep); + } + return join; +} + +/* Like sdsjoin, but joins an array of SDS strings. */ +sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen) { + sds join = sdsempty(); + int j; + + for (j = 0; j < argc; j++) { + join = sdscatsds(join, argv[j]); + if (j != argc-1) join = sdscatlen(join,sep,seplen); + } + return join; +} + +/* Wrappers to the allocators used by SDS. Note that SDS will actually + * just use the macros defined into sdsalloc.h in order to avoid to pay + * the overhead of function calls. Here we define these wrappers only for + * the programs SDS is linked to, if they want to touch the SDS internals + * even if they use a different allocator. */ +void *sds_malloc(size_t size) { return s_malloc(size); } +void *sds_realloc(void *ptr, size_t size) { return s_realloc(ptr,size); } +void sds_free(void *ptr) { s_free(ptr); } + +#if defined(SDS_TEST_MAIN) +#include +#include "testhelp.h" +#include "limits.h" + +#define UNUSED(x) (void)(x) +int sdsTest(void) { + { + sds x = sdsnew("foo"), y; + + test_cond("Create a string and obtain the length", + sdslen(x) == 3 && memcmp(x,"foo\0",4) == 0) + + sdsfree(x); + x = sdsnewlen("foo",2); + test_cond("Create a string with specified length", + sdslen(x) == 2 && memcmp(x,"fo\0",3) == 0) + + x = sdscat(x,"bar"); + test_cond("Strings concatenation", + sdslen(x) == 5 && memcmp(x,"fobar\0",6) == 0); + + x = sdscpy(x,"a"); + test_cond("sdscpy() against an originally longer string", + sdslen(x) == 1 && memcmp(x,"a\0",2) == 0) + + x = sdscpy(x,"xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk"); + test_cond("sdscpy() against an originally shorter string", + sdslen(x) == 33 && + memcmp(x,"xyzxxxxxxxxxxyyyyyyyyyykkkkkkkkkk\0",33) == 0) + + sdsfree(x); + x = sdscatprintf(sdsempty(),"%d",123); + test_cond("sdscatprintf() seems working in the base case", + sdslen(x) == 3 && memcmp(x,"123\0",4) == 0) + + sdsfree(x); + x = sdsnew("--"); + x = sdscatfmt(x, "Hello %s World %I,%I--", "Hi!", LLONG_MIN,LLONG_MAX); + test_cond("sdscatfmt() seems working in the base case", + sdslen(x) == 60 && + memcmp(x,"--Hello Hi! World -9223372036854775808," + "9223372036854775807--",60) == 0) + printf("[%s]\n",x); + + sdsfree(x); + x = sdsnew("--"); + x = sdscatfmt(x, "%u,%U--", UINT_MAX, ULLONG_MAX); + test_cond("sdscatfmt() seems working with unsigned numbers", + sdslen(x) == 35 && + memcmp(x,"--4294967295,18446744073709551615--",35) == 0) + + sdsfree(x); + x = sdsnew(" x "); + sdstrim(x," x"); + test_cond("sdstrim() works when all chars match", + sdslen(x) == 0) + + sdsfree(x); + x = sdsnew(" x "); + sdstrim(x," "); + test_cond("sdstrim() works when a single char remains", + sdslen(x) == 1 && x[0] == 'x') + + sdsfree(x); + x = sdsnew("xxciaoyyy"); + sdstrim(x,"xy"); + test_cond("sdstrim() correctly trims characters", + sdslen(x) == 4 && memcmp(x,"ciao\0",5) == 0) + + y = sdsdup(x); + sdsrange(y,1,1); + test_cond("sdsrange(...,1,1)", + sdslen(y) == 1 && memcmp(y,"i\0",2) == 0) + + sdsfree(y); + y = sdsdup(x); + sdsrange(y,1,-1); + test_cond("sdsrange(...,1,-1)", + sdslen(y) == 3 && memcmp(y,"iao\0",4) == 0) + + sdsfree(y); + y = sdsdup(x); + sdsrange(y,-2,-1); + test_cond("sdsrange(...,-2,-1)", + sdslen(y) == 2 && memcmp(y,"ao\0",3) == 0) + + sdsfree(y); + y = sdsdup(x); + sdsrange(y,2,1); + test_cond("sdsrange(...,2,1)", + sdslen(y) == 0 && memcmp(y,"\0",1) == 0) + + sdsfree(y); + y = sdsdup(x); + sdsrange(y,1,100); + test_cond("sdsrange(...,1,100)", + sdslen(y) == 3 && memcmp(y,"iao\0",4) == 0) + + sdsfree(y); + y = sdsdup(x); + sdsrange(y,100,100); + test_cond("sdsrange(...,100,100)", + sdslen(y) == 0 && memcmp(y,"\0",1) == 0) + + sdsfree(y); + sdsfree(x); + x = sdsnew("foo"); + y = sdsnew("foa"); + test_cond("sdscmp(foo,foa)", sdscmp(x,y) > 0) + + sdsfree(y); + sdsfree(x); + x = sdsnew("bar"); + y = sdsnew("bar"); + test_cond("sdscmp(bar,bar)", sdscmp(x,y) == 0) + + sdsfree(y); + sdsfree(x); + x = sdsnew("aar"); + y = sdsnew("bar"); + test_cond("sdscmp(bar,bar)", sdscmp(x,y) < 0) + + sdsfree(y); + sdsfree(x); + x = sdsnewlen("\a\n\0foo\r",7); + y = sdscatrepr(sdsempty(),x,sdslen(x)); + test_cond("sdscatrepr(...data...)", + memcmp(y,"\"\\a\\n\\x00foo\\r\"",15) == 0) + + { + unsigned int oldfree; + char *p; + int step = 10, j, i; + + sdsfree(x); + sdsfree(y); + x = sdsnew("0"); + test_cond("sdsnew() free/len buffers", sdslen(x) == 1 && sdsavail(x) == 0); + + /* Run the test a few times in order to hit the first two + * SDS header types. */ + for (i = 0; i < 10; i++) { + int oldlen = sdslen(x); + x = sdsMakeRoomFor(x,step); + int type = x[-1]&SDS_TYPE_MASK; + + test_cond("sdsMakeRoomFor() len", sdslen(x) == oldlen); + if (type != SDS_TYPE_5) { + test_cond("sdsMakeRoomFor() free", sdsavail(x) >= step); + oldfree = sdsavail(x); + } + p = x+oldlen; + for (j = 0; j < step; j++) { + p[j] = 'A'+j; + } + sdsIncrLen(x,step); + } + test_cond("sdsMakeRoomFor() content", + memcmp("0ABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJABCDEFGHIJ",x,101) == 0); + test_cond("sdsMakeRoomFor() final length",sdslen(x)==101); + + sdsfree(x); + } + } + test_report() + return 0; +} +#endif + +#ifdef SDS_TEST_MAIN +int main(void) { + return sdsTest(); +} +#endif diff --git a/ext/hiredis-1.0.2/sds.h b/ext/hiredis-1.0.2/sds.h new file mode 100644 index 000000000..eda8833b5 --- /dev/null +++ b/ext/hiredis-1.0.2/sds.h @@ -0,0 +1,278 @@ +/* SDSLib 2.0 -- A C dynamic strings library + * + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra + * Copyright (c) 2015, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SDS_H +#define __SDS_H + +#define SDS_MAX_PREALLOC (1024*1024) +#ifdef _MSC_VER +#define __attribute__(x) +typedef long long ssize_t; +#define SSIZE_MAX (LLONG_MAX >> 1) +#endif + +#include +#include +#include + +typedef char *sds; + +/* Note: sdshdr5 is never used, we just access the flags byte directly. + * However is here to document the layout of type 5 SDS strings. */ +struct __attribute__ ((__packed__)) sdshdr5 { + unsigned char flags; /* 3 lsb of type, and 5 msb of string length */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr8 { + uint8_t len; /* used */ + uint8_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr16 { + uint16_t len; /* used */ + uint16_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr32 { + uint32_t len; /* used */ + uint32_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; +struct __attribute__ ((__packed__)) sdshdr64 { + uint64_t len; /* used */ + uint64_t alloc; /* excluding the header and null terminator */ + unsigned char flags; /* 3 lsb of type, 5 unused bits */ + char buf[]; +}; + +#define SDS_TYPE_5 0 +#define SDS_TYPE_8 1 +#define SDS_TYPE_16 2 +#define SDS_TYPE_32 3 +#define SDS_TYPE_64 4 +#define SDS_TYPE_MASK 7 +#define SDS_TYPE_BITS 3 +#define SDS_HDR_VAR(T,s) struct sdshdr##T *sh = (struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T))); +#define SDS_HDR(T,s) ((struct sdshdr##T *)((s)-(sizeof(struct sdshdr##T)))) +#define SDS_TYPE_5_LEN(f) ((f)>>SDS_TYPE_BITS) + +static inline size_t sdslen(const sds s) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + return SDS_TYPE_5_LEN(flags); + case SDS_TYPE_8: + return SDS_HDR(8,s)->len; + case SDS_TYPE_16: + return SDS_HDR(16,s)->len; + case SDS_TYPE_32: + return SDS_HDR(32,s)->len; + case SDS_TYPE_64: + return SDS_HDR(64,s)->len; + } + return 0; +} + +static inline size_t sdsavail(const sds s) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: { + return 0; + } + case SDS_TYPE_8: { + SDS_HDR_VAR(8,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_16: { + SDS_HDR_VAR(16,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_32: { + SDS_HDR_VAR(32,s); + return sh->alloc - sh->len; + } + case SDS_TYPE_64: { + SDS_HDR_VAR(64,s); + return sh->alloc - sh->len; + } + } + return 0; +} + +static inline void sdssetlen(sds s, size_t newlen) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + { + unsigned char *fp = ((unsigned char*)s)-1; + *fp = (unsigned char)(SDS_TYPE_5 | (newlen << SDS_TYPE_BITS)); + } + break; + case SDS_TYPE_8: + SDS_HDR(8,s)->len = (uint8_t)newlen; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->len = (uint16_t)newlen; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->len = (uint32_t)newlen; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->len = (uint64_t)newlen; + break; + } +} + +static inline void sdsinclen(sds s, size_t inc) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + { + unsigned char *fp = ((unsigned char*)s)-1; + unsigned char newlen = SDS_TYPE_5_LEN(flags)+(unsigned char)inc; + *fp = SDS_TYPE_5 | (newlen << SDS_TYPE_BITS); + } + break; + case SDS_TYPE_8: + SDS_HDR(8,s)->len += (uint8_t)inc; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->len += (uint16_t)inc; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->len += (uint32_t)inc; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->len += (uint64_t)inc; + break; + } +} + +/* sdsalloc() = sdsavail() + sdslen() */ +static inline size_t sdsalloc(const sds s) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + return SDS_TYPE_5_LEN(flags); + case SDS_TYPE_8: + return SDS_HDR(8,s)->alloc; + case SDS_TYPE_16: + return SDS_HDR(16,s)->alloc; + case SDS_TYPE_32: + return SDS_HDR(32,s)->alloc; + case SDS_TYPE_64: + return SDS_HDR(64,s)->alloc; + } + return 0; +} + +static inline void sdssetalloc(sds s, size_t newlen) { + unsigned char flags = s[-1]; + switch(flags&SDS_TYPE_MASK) { + case SDS_TYPE_5: + /* Nothing to do, this type has no total allocation info. */ + break; + case SDS_TYPE_8: + SDS_HDR(8,s)->alloc = (uint8_t)newlen; + break; + case SDS_TYPE_16: + SDS_HDR(16,s)->alloc = (uint16_t)newlen; + break; + case SDS_TYPE_32: + SDS_HDR(32,s)->alloc = (uint32_t)newlen; + break; + case SDS_TYPE_64: + SDS_HDR(64,s)->alloc = (uint64_t)newlen; + break; + } +} + +sds sdsnewlen(const void *init, size_t initlen); +sds sdsnew(const char *init); +sds sdsempty(void); +sds sdsdup(const sds s); +void sdsfree(sds s); +sds sdsgrowzero(sds s, size_t len); +sds sdscatlen(sds s, const void *t, size_t len); +sds sdscat(sds s, const char *t); +sds sdscatsds(sds s, const sds t); +sds sdscpylen(sds s, const char *t, size_t len); +sds sdscpy(sds s, const char *t); + +sds sdscatvprintf(sds s, const char *fmt, va_list ap); +#ifdef __GNUC__ +sds sdscatprintf(sds s, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); +#else +sds sdscatprintf(sds s, const char *fmt, ...); +#endif + +sds sdscatfmt(sds s, char const *fmt, ...); +sds sdstrim(sds s, const char *cset); +int sdsrange(sds s, ssize_t start, ssize_t end); +void sdsupdatelen(sds s); +void sdsclear(sds s); +int sdscmp(const sds s1, const sds s2); +sds *sdssplitlen(const char *s, int len, const char *sep, int seplen, int *count); +void sdsfreesplitres(sds *tokens, int count); +void sdstolower(sds s); +void sdstoupper(sds s); +sds sdsfromlonglong(long long value); +sds sdscatrepr(sds s, const char *p, size_t len); +sds *sdssplitargs(const char *line, int *argc); +sds sdsmapchars(sds s, const char *from, const char *to, size_t setlen); +sds sdsjoin(char **argv, int argc, char *sep); +sds sdsjoinsds(sds *argv, int argc, const char *sep, size_t seplen); + +/* Low level functions exposed to the user API */ +sds sdsMakeRoomFor(sds s, size_t addlen); +void sdsIncrLen(sds s, int incr); +sds sdsRemoveFreeSpace(sds s); +size_t sdsAllocSize(sds s); +void *sdsAllocPtr(sds s); + +/* Export the allocator used by SDS to the program using SDS. + * Sometimes the program SDS is linked to, may use a different set of + * allocators, but may want to allocate or free things that SDS will + * respectively free or allocate. */ +void *sds_malloc(size_t size); +void *sds_realloc(void *ptr, size_t size); +void sds_free(void *ptr); + +#ifdef REDIS_TEST +int sdsTest(int argc, char *argv[]); +#endif + +#endif diff --git a/ext/hiredis-1.0.2/sdsalloc.h b/ext/hiredis-1.0.2/sdsalloc.h new file mode 100644 index 000000000..5538dd94c --- /dev/null +++ b/ext/hiredis-1.0.2/sdsalloc.h @@ -0,0 +1,44 @@ +/* SDSLib 2.0 -- A C dynamic strings library + * + * Copyright (c) 2006-2015, Salvatore Sanfilippo + * Copyright (c) 2015, Oran Agra + * Copyright (c) 2015, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* SDS allocator selection. + * + * This file is used in order to change the SDS allocator at compile time. + * Just define the following defines to what you want to use. Also add + * the include of your alternate allocator if needed (not needed in order + * to use the default libc allocator). */ + +#include "alloc.h" + +#define s_malloc hi_malloc +#define s_realloc hi_realloc +#define s_free hi_free diff --git a/ext/hiredis-1.0.2/sockcompat.c b/ext/hiredis-1.0.2/sockcompat.c new file mode 100644 index 000000000..f99d14b05 --- /dev/null +++ b/ext/hiredis-1.0.2/sockcompat.c @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2019, Marcus Geelnard + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define REDIS_SOCKCOMPAT_IMPLEMENTATION +#include "sockcompat.h" + +#ifdef _WIN32 +static int _wsaErrorToErrno(int err) { + switch (err) { + case WSAEWOULDBLOCK: + return EWOULDBLOCK; + case WSAEINPROGRESS: + return EINPROGRESS; + case WSAEALREADY: + return EALREADY; + case WSAENOTSOCK: + return ENOTSOCK; + case WSAEDESTADDRREQ: + return EDESTADDRREQ; + case WSAEMSGSIZE: + return EMSGSIZE; + case WSAEPROTOTYPE: + return EPROTOTYPE; + case WSAENOPROTOOPT: + return ENOPROTOOPT; + case WSAEPROTONOSUPPORT: + return EPROTONOSUPPORT; + case WSAEOPNOTSUPP: + return EOPNOTSUPP; + case WSAEAFNOSUPPORT: + return EAFNOSUPPORT; + case WSAEADDRINUSE: + return EADDRINUSE; + case WSAEADDRNOTAVAIL: + return EADDRNOTAVAIL; + case WSAENETDOWN: + return ENETDOWN; + case WSAENETUNREACH: + return ENETUNREACH; + case WSAENETRESET: + return ENETRESET; + case WSAECONNABORTED: + return ECONNABORTED; + case WSAECONNRESET: + return ECONNRESET; + case WSAENOBUFS: + return ENOBUFS; + case WSAEISCONN: + return EISCONN; + case WSAENOTCONN: + return ENOTCONN; + case WSAETIMEDOUT: + return ETIMEDOUT; + case WSAECONNREFUSED: + return ECONNREFUSED; + case WSAELOOP: + return ELOOP; + case WSAENAMETOOLONG: + return ENAMETOOLONG; + case WSAEHOSTUNREACH: + return EHOSTUNREACH; + case WSAENOTEMPTY: + return ENOTEMPTY; + default: + /* We just return a generic I/O error if we could not find a relevant error. */ + return EIO; + } +} + +static void _updateErrno(int success) { + errno = success ? 0 : _wsaErrorToErrno(WSAGetLastError()); +} + +static int _initWinsock() { + static int s_initialized = 0; + if (!s_initialized) { + static WSADATA wsadata; + int err = WSAStartup(MAKEWORD(2,2), &wsadata); + if (err != 0) { + errno = _wsaErrorToErrno(err); + return 0; + } + s_initialized = 1; + } + return 1; +} + +int win32_getaddrinfo(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res) { + /* Note: This function is likely to be called before other functions, so run init here. */ + if (!_initWinsock()) { + return EAI_FAIL; + } + + switch (getaddrinfo(node, service, hints, res)) { + case 0: return 0; + case WSATRY_AGAIN: return EAI_AGAIN; + case WSAEINVAL: return EAI_BADFLAGS; + case WSAEAFNOSUPPORT: return EAI_FAMILY; + case WSA_NOT_ENOUGH_MEMORY: return EAI_MEMORY; + case WSAHOST_NOT_FOUND: return EAI_NONAME; + case WSATYPE_NOT_FOUND: return EAI_SERVICE; + case WSAESOCKTNOSUPPORT: return EAI_SOCKTYPE; + default: return EAI_FAIL; /* Including WSANO_RECOVERY */ + } +} + +const char *win32_gai_strerror(int errcode) { + switch (errcode) { + case 0: errcode = 0; break; + case EAI_AGAIN: errcode = WSATRY_AGAIN; break; + case EAI_BADFLAGS: errcode = WSAEINVAL; break; + case EAI_FAMILY: errcode = WSAEAFNOSUPPORT; break; + case EAI_MEMORY: errcode = WSA_NOT_ENOUGH_MEMORY; break; + case EAI_NONAME: errcode = WSAHOST_NOT_FOUND; break; + case EAI_SERVICE: errcode = WSATYPE_NOT_FOUND; break; + case EAI_SOCKTYPE: errcode = WSAESOCKTNOSUPPORT; break; + default: errcode = WSANO_RECOVERY; break; /* Including EAI_FAIL */ + } + return gai_strerror(errcode); +} + +void win32_freeaddrinfo(struct addrinfo *res) { + freeaddrinfo(res); +} + +SOCKET win32_socket(int domain, int type, int protocol) { + SOCKET s; + + /* Note: This function is likely to be called before other functions, so run init here. */ + if (!_initWinsock()) { + return INVALID_SOCKET; + } + + _updateErrno((s = socket(domain, type, protocol)) != INVALID_SOCKET); + return s; +} + +int win32_ioctl(SOCKET fd, unsigned long request, unsigned long *argp) { + int ret = ioctlsocket(fd, (long)request, argp); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_bind(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen) { + int ret = bind(sockfd, addr, addrlen); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_connect(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen) { + int ret = connect(sockfd, addr, addrlen); + _updateErrno(ret != SOCKET_ERROR); + + /* For Winsock connect(), the WSAEWOULDBLOCK error means the same thing as + * EINPROGRESS for POSIX connect(), so we do that translation to keep POSIX + * logic consistent. */ + if (errno == EWOULDBLOCK) { + errno = EINPROGRESS; + } + + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_getsockopt(SOCKET sockfd, int level, int optname, void *optval, socklen_t *optlen) { + int ret = 0; + if ((level == SOL_SOCKET) && ((optname == SO_RCVTIMEO) || (optname == SO_SNDTIMEO))) { + if (*optlen >= sizeof (struct timeval)) { + struct timeval *tv = optval; + DWORD timeout = 0; + socklen_t dwlen = 0; + ret = getsockopt(sockfd, level, optname, (char *)&timeout, &dwlen); + tv->tv_sec = timeout / 1000; + tv->tv_usec = (timeout * 1000) % 1000000; + } else { + ret = WSAEFAULT; + } + *optlen = sizeof (struct timeval); + } else { + ret = getsockopt(sockfd, level, optname, (char*)optval, optlen); + } + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_setsockopt(SOCKET sockfd, int level, int optname, const void *optval, socklen_t optlen) { + int ret = 0; + if ((level == SOL_SOCKET) && ((optname == SO_RCVTIMEO) || (optname == SO_SNDTIMEO))) { + const struct timeval *tv = optval; + DWORD timeout = tv->tv_sec * 1000 + tv->tv_usec / 1000; + ret = setsockopt(sockfd, level, optname, (const char*)&timeout, sizeof(DWORD)); + } else { + ret = setsockopt(sockfd, level, optname, (const char*)optval, optlen); + } + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_close(SOCKET fd) { + int ret = closesocket(fd); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +ssize_t win32_recv(SOCKET sockfd, void *buf, size_t len, int flags) { + int ret = recv(sockfd, (char*)buf, (int)len, flags); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +ssize_t win32_send(SOCKET sockfd, const void *buf, size_t len, int flags) { + int ret = send(sockfd, (const char*)buf, (int)len, flags); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} + +int win32_poll(struct pollfd *fds, nfds_t nfds, int timeout) { + int ret = WSAPoll(fds, nfds, timeout); + _updateErrno(ret != SOCKET_ERROR); + return ret != SOCKET_ERROR ? ret : -1; +} +#endif /* _WIN32 */ diff --git a/ext/hiredis-1.0.2/sockcompat.h b/ext/hiredis-1.0.2/sockcompat.h new file mode 100644 index 000000000..85810e848 --- /dev/null +++ b/ext/hiredis-1.0.2/sockcompat.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2019, Marcus Geelnard + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SOCKCOMPAT_H +#define __SOCKCOMPAT_H + +#ifndef _WIN32 +/* For POSIX systems we use the standard BSD socket API. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#else +/* For Windows we use winsock. */ +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0600 /* To get WSAPoll etc. */ +#include +#include +#include +#include + +#ifdef _MSC_VER +typedef long long ssize_t; +#endif + +/* Emulate the parts of the BSD socket API that we need (override the winsock signatures). */ +int win32_getaddrinfo(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res); +const char *win32_gai_strerror(int errcode); +void win32_freeaddrinfo(struct addrinfo *res); +SOCKET win32_socket(int domain, int type, int protocol); +int win32_ioctl(SOCKET fd, unsigned long request, unsigned long *argp); +int win32_bind(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen); +int win32_connect(SOCKET sockfd, const struct sockaddr *addr, socklen_t addrlen); +int win32_getsockopt(SOCKET sockfd, int level, int optname, void *optval, socklen_t *optlen); +int win32_setsockopt(SOCKET sockfd, int level, int optname, const void *optval, socklen_t optlen); +int win32_close(SOCKET fd); +ssize_t win32_recv(SOCKET sockfd, void *buf, size_t len, int flags); +ssize_t win32_send(SOCKET sockfd, const void *buf, size_t len, int flags); +typedef ULONG nfds_t; +int win32_poll(struct pollfd *fds, nfds_t nfds, int timeout); + +#ifndef REDIS_SOCKCOMPAT_IMPLEMENTATION +#define getaddrinfo(node, service, hints, res) win32_getaddrinfo(node, service, hints, res) +#undef gai_strerror +#define gai_strerror(errcode) win32_gai_strerror(errcode) +#define freeaddrinfo(res) win32_freeaddrinfo(res) +#define socket(domain, type, protocol) win32_socket(domain, type, protocol) +#define ioctl(fd, request, argp) win32_ioctl(fd, request, argp) +#define bind(sockfd, addr, addrlen) win32_bind(sockfd, addr, addrlen) +#define connect(sockfd, addr, addrlen) win32_connect(sockfd, addr, addrlen) +#define getsockopt(sockfd, level, optname, optval, optlen) win32_getsockopt(sockfd, level, optname, optval, optlen) +#define setsockopt(sockfd, level, optname, optval, optlen) win32_setsockopt(sockfd, level, optname, optval, optlen) +#define close(fd) win32_close(fd) +#define recv(sockfd, buf, len, flags) win32_recv(sockfd, buf, len, flags) +#define send(sockfd, buf, len, flags) win32_send(sockfd, buf, len, flags) +#define poll(fds, nfds, timeout) win32_poll(fds, nfds, timeout) +#endif /* REDIS_SOCKCOMPAT_IMPLEMENTATION */ +#endif /* _WIN32 */ + +#endif /* __SOCKCOMPAT_H */ diff --git a/ext/hiredis-1.0.2/ssl.c b/ext/hiredis-1.0.2/ssl.c new file mode 100644 index 000000000..7df58fbde --- /dev/null +++ b/ext/hiredis-1.0.2/ssl.c @@ -0,0 +1,526 @@ +/* + * Copyright (c) 2009-2011, Salvatore Sanfilippo + * Copyright (c) 2010-2011, Pieter Noordhuis + * Copyright (c) 2019, Redis Labs + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "hiredis.h" +#include "async.h" + +#include +#include +#include +#ifdef _WIN32 +#include +#else +#include +#endif + +#include +#include + +#include "win32.h" +#include "async_private.h" +#include "hiredis_ssl.h" + +void __redisSetError(redisContext *c, int type, const char *str); + +struct redisSSLContext { + /* Associated OpenSSL SSL_CTX as created by redisCreateSSLContext() */ + SSL_CTX *ssl_ctx; + + /* Requested SNI, or NULL */ + char *server_name; +}; + +/* The SSL connection context is attached to SSL/TLS connections as a privdata. */ +typedef struct redisSSL { + /** + * OpenSSL SSL object. + */ + SSL *ssl; + + /** + * SSL_write() requires to be called again with the same arguments it was + * previously called with in the event of an SSL_read/SSL_write situation + */ + size_t lastLen; + + /** Whether the SSL layer requires read (possibly before a write) */ + int wantRead; + + /** + * Whether a write was requested prior to a read. If set, the write() + * should resume whenever a read takes place, if possible + */ + int pendingWrite; +} redisSSL; + +/* Forward declaration */ +redisContextFuncs redisContextSSLFuncs; + +/** + * OpenSSL global initialization and locking handling callbacks. + * Note that this is only required for OpenSSL < 1.1.0. + */ + +#if OPENSSL_VERSION_NUMBER < 0x10100000L +#define HIREDIS_USE_CRYPTO_LOCKS +#endif + +#ifdef HIREDIS_USE_CRYPTO_LOCKS +#ifdef _WIN32 +typedef CRITICAL_SECTION sslLockType; +static void sslLockInit(sslLockType* l) { + InitializeCriticalSection(l); +} +static void sslLockAcquire(sslLockType* l) { + EnterCriticalSection(l); +} +static void sslLockRelease(sslLockType* l) { + LeaveCriticalSection(l); +} +#else +typedef pthread_mutex_t sslLockType; +static void sslLockInit(sslLockType *l) { + pthread_mutex_init(l, NULL); +} +static void sslLockAcquire(sslLockType *l) { + pthread_mutex_lock(l); +} +static void sslLockRelease(sslLockType *l) { + pthread_mutex_unlock(l); +} +#endif + +static sslLockType* ossl_locks; + +static void opensslDoLock(int mode, int lkid, const char *f, int line) { + sslLockType *l = ossl_locks + lkid; + + if (mode & CRYPTO_LOCK) { + sslLockAcquire(l); + } else { + sslLockRelease(l); + } + + (void)f; + (void)line; +} + +static int initOpensslLocks(void) { + unsigned ii, nlocks; + if (CRYPTO_get_locking_callback() != NULL) { + /* Someone already set the callback before us. Don't destroy it! */ + return REDIS_OK; + } + nlocks = CRYPTO_num_locks(); + ossl_locks = hi_malloc(sizeof(*ossl_locks) * nlocks); + if (ossl_locks == NULL) + return REDIS_ERR; + + for (ii = 0; ii < nlocks; ii++) { + sslLockInit(ossl_locks + ii); + } + CRYPTO_set_locking_callback(opensslDoLock); + return REDIS_OK; +} +#endif /* HIREDIS_USE_CRYPTO_LOCKS */ + +int redisInitOpenSSL(void) +{ + SSL_library_init(); +#ifdef HIREDIS_USE_CRYPTO_LOCKS + initOpensslLocks(); +#endif + + return REDIS_OK; +} + +/** + * redisSSLContext helper context destruction. + */ + +const char *redisSSLContextGetError(redisSSLContextError error) +{ + switch (error) { + case REDIS_SSL_CTX_NONE: + return "No Error"; + case REDIS_SSL_CTX_CREATE_FAILED: + return "Failed to create OpenSSL SSL_CTX"; + case REDIS_SSL_CTX_CERT_KEY_REQUIRED: + return "Client cert and key must both be specified or skipped"; + case REDIS_SSL_CTX_CA_CERT_LOAD_FAILED: + return "Failed to load CA Certificate or CA Path"; + case REDIS_SSL_CTX_CLIENT_CERT_LOAD_FAILED: + return "Failed to load client certificate"; + case REDIS_SSL_CTX_PRIVATE_KEY_LOAD_FAILED: + return "Failed to load private key"; + default: + return "Unknown error code"; + } +} + +void redisFreeSSLContext(redisSSLContext *ctx) +{ + if (!ctx) + return; + + if (ctx->server_name) { + hi_free(ctx->server_name); + ctx->server_name = NULL; + } + + if (ctx->ssl_ctx) { + SSL_CTX_free(ctx->ssl_ctx); + ctx->ssl_ctx = NULL; + } + + hi_free(ctx); +} + + +/** + * redisSSLContext helper context initialization. + */ + +redisSSLContext *redisCreateSSLContext(const char *cacert_filename, const char *capath, + const char *cert_filename, const char *private_key_filename, + const char *server_name, redisSSLContextError *error) +{ + redisSSLContext *ctx = hi_calloc(1, sizeof(redisSSLContext)); + if (ctx == NULL) + goto error; + + ctx->ssl_ctx = SSL_CTX_new(SSLv23_client_method()); + if (!ctx->ssl_ctx) { + if (error) *error = REDIS_SSL_CTX_CREATE_FAILED; + goto error; + } + + SSL_CTX_set_options(ctx->ssl_ctx, SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3); + SSL_CTX_set_verify(ctx->ssl_ctx, SSL_VERIFY_PEER, NULL); + + if ((cert_filename != NULL && private_key_filename == NULL) || + (private_key_filename != NULL && cert_filename == NULL)) { + if (error) *error = REDIS_SSL_CTX_CERT_KEY_REQUIRED; + goto error; + } + + if (capath || cacert_filename) { + if (!SSL_CTX_load_verify_locations(ctx->ssl_ctx, cacert_filename, capath)) { + if (error) *error = REDIS_SSL_CTX_CA_CERT_LOAD_FAILED; + goto error; + } + } + + if (cert_filename) { + if (!SSL_CTX_use_certificate_chain_file(ctx->ssl_ctx, cert_filename)) { + if (error) *error = REDIS_SSL_CTX_CLIENT_CERT_LOAD_FAILED; + goto error; + } + if (!SSL_CTX_use_PrivateKey_file(ctx->ssl_ctx, private_key_filename, SSL_FILETYPE_PEM)) { + if (error) *error = REDIS_SSL_CTX_PRIVATE_KEY_LOAD_FAILED; + goto error; + } + } + + if (server_name) + ctx->server_name = hi_strdup(server_name); + + return ctx; + +error: + redisFreeSSLContext(ctx); + return NULL; +} + +/** + * SSL Connection initialization. + */ + + +static int redisSSLConnect(redisContext *c, SSL *ssl) { + if (c->privctx) { + __redisSetError(c, REDIS_ERR_OTHER, "redisContext was already associated"); + return REDIS_ERR; + } + + redisSSL *rssl = hi_calloc(1, sizeof(redisSSL)); + if (rssl == NULL) { + __redisSetError(c, REDIS_ERR_OOM, "Out of memory"); + return REDIS_ERR; + } + + c->funcs = &redisContextSSLFuncs; + rssl->ssl = ssl; + + SSL_set_mode(rssl->ssl, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER); + SSL_set_fd(rssl->ssl, c->fd); + SSL_set_connect_state(rssl->ssl); + + ERR_clear_error(); + int rv = SSL_connect(rssl->ssl); + if (rv == 1) { + c->privctx = rssl; + return REDIS_OK; + } + + rv = SSL_get_error(rssl->ssl, rv); + if (((c->flags & REDIS_BLOCK) == 0) && + (rv == SSL_ERROR_WANT_READ || rv == SSL_ERROR_WANT_WRITE)) { + c->privctx = rssl; + return REDIS_OK; + } + + if (c->err == 0) { + char err[512]; + if (rv == SSL_ERROR_SYSCALL) + snprintf(err,sizeof(err)-1,"SSL_connect failed: %s",strerror(errno)); + else { + unsigned long e = ERR_peek_last_error(); + snprintf(err,sizeof(err)-1,"SSL_connect failed: %s", + ERR_reason_error_string(e)); + } + __redisSetError(c, REDIS_ERR_IO, err); + } + + hi_free(rssl); + return REDIS_ERR; +} + +/** + * A wrapper around redisSSLConnect() for users who manage their own context and + * create their own SSL object. + */ + +int redisInitiateSSL(redisContext *c, SSL *ssl) { + return redisSSLConnect(c, ssl); +} + +/** + * A wrapper around redisSSLConnect() for users who use redisSSLContext and don't + * manage their own SSL objects. + */ + +int redisInitiateSSLWithContext(redisContext *c, redisSSLContext *redis_ssl_ctx) +{ + if (!c || !redis_ssl_ctx) + return REDIS_ERR; + + /* We want to verify that redisSSLConnect() won't fail on this, as it will + * not own the SSL object in that case and we'll end up leaking. + */ + if (c->privctx) + return REDIS_ERR; + + SSL *ssl = SSL_new(redis_ssl_ctx->ssl_ctx); + if (!ssl) { + __redisSetError(c, REDIS_ERR_OTHER, "Couldn't create new SSL instance"); + goto error; + } + + if (redis_ssl_ctx->server_name) { + if (!SSL_set_tlsext_host_name(ssl, redis_ssl_ctx->server_name)) { + __redisSetError(c, REDIS_ERR_OTHER, "Failed to set server_name/SNI"); + goto error; + } + } + + return redisSSLConnect(c, ssl); + +error: + if (ssl) + SSL_free(ssl); + return REDIS_ERR; +} + +static int maybeCheckWant(redisSSL *rssl, int rv) { + /** + * If the error is WANT_READ or WANT_WRITE, the appropriate flags are set + * and true is returned. False is returned otherwise + */ + if (rv == SSL_ERROR_WANT_READ) { + rssl->wantRead = 1; + return 1; + } else if (rv == SSL_ERROR_WANT_WRITE) { + rssl->pendingWrite = 1; + return 1; + } else { + return 0; + } +} + +/** + * Implementation of redisContextFuncs for SSL connections. + */ + +static void redisSSLFree(void *privctx){ + redisSSL *rsc = privctx; + + if (!rsc) return; + if (rsc->ssl) { + SSL_free(rsc->ssl); + rsc->ssl = NULL; + } + hi_free(rsc); +} + +static ssize_t redisSSLRead(redisContext *c, char *buf, size_t bufcap) { + redisSSL *rssl = c->privctx; + + int nread = SSL_read(rssl->ssl, buf, bufcap); + if (nread > 0) { + return nread; + } else if (nread == 0) { + __redisSetError(c, REDIS_ERR_EOF, "Server closed the connection"); + return -1; + } else { + int err = SSL_get_error(rssl->ssl, nread); + if (c->flags & REDIS_BLOCK) { + /** + * In blocking mode, we should never end up in a situation where + * we get an error without it being an actual error, except + * in the case of EINTR, which can be spuriously received from + * debuggers or whatever. + */ + if (errno == EINTR) { + return 0; + } else { + const char *msg = NULL; + if (errno == EAGAIN) { + msg = "Resource temporarily unavailable"; + } + __redisSetError(c, REDIS_ERR_IO, msg); + return -1; + } + } + + /** + * We can very well get an EWOULDBLOCK/EAGAIN, however + */ + if (maybeCheckWant(rssl, err)) { + return 0; + } else { + __redisSetError(c, REDIS_ERR_IO, NULL); + return -1; + } + } +} + +static ssize_t redisSSLWrite(redisContext *c) { + redisSSL *rssl = c->privctx; + + size_t len = rssl->lastLen ? rssl->lastLen : sdslen(c->obuf); + int rv = SSL_write(rssl->ssl, c->obuf, len); + + if (rv > 0) { + rssl->lastLen = 0; + } else if (rv < 0) { + rssl->lastLen = len; + + int err = SSL_get_error(rssl->ssl, rv); + if ((c->flags & REDIS_BLOCK) == 0 && maybeCheckWant(rssl, err)) { + return 0; + } else { + __redisSetError(c, REDIS_ERR_IO, NULL); + return -1; + } + } + return rv; +} + +static void redisSSLAsyncRead(redisAsyncContext *ac) { + int rv; + redisSSL *rssl = ac->c.privctx; + redisContext *c = &ac->c; + + rssl->wantRead = 0; + + if (rssl->pendingWrite) { + int done; + + /* This is probably just a write event */ + rssl->pendingWrite = 0; + rv = redisBufferWrite(c, &done); + if (rv == REDIS_ERR) { + __redisAsyncDisconnect(ac); + return; + } else if (!done) { + _EL_ADD_WRITE(ac); + } + } + + rv = redisBufferRead(c); + if (rv == REDIS_ERR) { + __redisAsyncDisconnect(ac); + } else { + _EL_ADD_READ(ac); + redisProcessCallbacks(ac); + } +} + +static void redisSSLAsyncWrite(redisAsyncContext *ac) { + int rv, done = 0; + redisSSL *rssl = ac->c.privctx; + redisContext *c = &ac->c; + + rssl->pendingWrite = 0; + rv = redisBufferWrite(c, &done); + if (rv == REDIS_ERR) { + __redisAsyncDisconnect(ac); + return; + } + + if (!done) { + if (rssl->wantRead) { + /* Need to read-before-write */ + rssl->pendingWrite = 1; + _EL_DEL_WRITE(ac); + } else { + /* No extra reads needed, just need to write more */ + _EL_ADD_WRITE(ac); + } + } else { + /* Already done! */ + _EL_DEL_WRITE(ac); + } + + /* Always reschedule a read */ + _EL_ADD_READ(ac); +} + +redisContextFuncs redisContextSSLFuncs = { + .free_privctx = redisSSLFree, + .async_read = redisSSLAsyncRead, + .async_write = redisSSLAsyncWrite, + .read = redisSSLRead, + .write = redisSSLWrite +}; + diff --git a/ext/hiredis-1.0.2/test.c b/ext/hiredis-1.0.2/test.c new file mode 100644 index 000000000..397f5640e --- /dev/null +++ b/ext/hiredis-1.0.2/test.c @@ -0,0 +1,1401 @@ +#include "fmacros.h" +#include "sockcompat.h" +#include +#include +#include +#ifndef _WIN32 +#include +#include +#endif +#include +#include +#include +#include + +#include "hiredis.h" +#include "async.h" +#ifdef HIREDIS_TEST_SSL +#include "hiredis_ssl.h" +#endif +#include "net.h" +#include "win32.h" + +enum connection_type { + CONN_TCP, + CONN_UNIX, + CONN_FD, + CONN_SSL +}; + +struct config { + enum connection_type type; + + struct { + const char *host; + int port; + struct timeval timeout; + } tcp; + + struct { + const char *path; + } unix_sock; + + struct { + const char *host; + int port; + const char *ca_cert; + const char *cert; + const char *key; + } ssl; +}; + +struct privdata { + int dtor_counter; +}; + +#ifdef HIREDIS_TEST_SSL +redisSSLContext *_ssl_ctx = NULL; +#endif + +/* The following lines make up our testing "framework" :) */ +static int tests = 0, fails = 0, skips = 0; +#define test(_s) { printf("#%02d ", ++tests); printf(_s); } +#define test_cond(_c) if(_c) printf("\033[0;32mPASSED\033[0;0m\n"); else {printf("\033[0;31mFAILED\033[0;0m\n"); fails++;} +#define test_skipped() { printf("\033[01;33mSKIPPED\033[0;0m\n"); skips++; } + +static long long usec(void) { +#ifndef _MSC_VER + struct timeval tv; + gettimeofday(&tv,NULL); + return (((long long)tv.tv_sec)*1000000)+tv.tv_usec; +#else + FILETIME ft; + GetSystemTimeAsFileTime(&ft); + return (((long long)ft.dwHighDateTime << 32) | ft.dwLowDateTime) / 10; +#endif +} + +/* The assert() calls below have side effects, so we need assert() + * even if we are compiling without asserts (-DNDEBUG). */ +#ifdef NDEBUG +#undef assert +#define assert(e) (void)(e) +#endif + +/* Helper to extract Redis version information. Aborts on any failure. */ +#define REDIS_VERSION_FIELD "redis_version:" +void get_redis_version(redisContext *c, int *majorptr, int *minorptr) { + redisReply *reply; + char *eptr, *s, *e; + int major, minor; + + reply = redisCommand(c, "INFO"); + if (reply == NULL || c->err || reply->type != REDIS_REPLY_STRING) + goto abort; + if ((s = strstr(reply->str, REDIS_VERSION_FIELD)) == NULL) + goto abort; + + s += strlen(REDIS_VERSION_FIELD); + + /* We need a field terminator and at least 'x.y.z' (5) bytes of data */ + if ((e = strstr(s, "\r\n")) == NULL || (e - s) < 5) + goto abort; + + /* Extract version info */ + major = strtol(s, &eptr, 10); + if (*eptr != '.') goto abort; + minor = strtol(eptr+1, NULL, 10); + + /* Push info the caller wants */ + if (majorptr) *majorptr = major; + if (minorptr) *minorptr = minor; + + freeReplyObject(reply); + return; + +abort: + freeReplyObject(reply); + fprintf(stderr, "Error: Cannot determine Redis version, aborting\n"); + exit(1); +} + +static redisContext *select_database(redisContext *c) { + redisReply *reply; + + /* Switch to DB 9 for testing, now that we know we can chat. */ + reply = redisCommand(c,"SELECT 9"); + assert(reply != NULL); + freeReplyObject(reply); + + /* Make sure the DB is emtpy */ + reply = redisCommand(c,"DBSIZE"); + assert(reply != NULL); + if (reply->type == REDIS_REPLY_INTEGER && reply->integer == 0) { + /* Awesome, DB 9 is empty and we can continue. */ + freeReplyObject(reply); + } else { + printf("Database #9 is not empty, test can not continue\n"); + exit(1); + } + + return c; +} + +/* Switch protocol */ +static void send_hello(redisContext *c, int version) { + redisReply *reply; + int expected; + + reply = redisCommand(c, "HELLO %d", version); + expected = version == 3 ? REDIS_REPLY_MAP : REDIS_REPLY_ARRAY; + assert(reply != NULL && reply->type == expected); + freeReplyObject(reply); +} + +/* Togggle client tracking */ +static void send_client_tracking(redisContext *c, const char *str) { + redisReply *reply; + + reply = redisCommand(c, "CLIENT TRACKING %s", str); + assert(reply != NULL && reply->type == REDIS_REPLY_STATUS); + freeReplyObject(reply); +} + +static int disconnect(redisContext *c, int keep_fd) { + redisReply *reply; + + /* Make sure we're on DB 9. */ + reply = redisCommand(c,"SELECT 9"); + assert(reply != NULL); + freeReplyObject(reply); + reply = redisCommand(c,"FLUSHDB"); + assert(reply != NULL); + freeReplyObject(reply); + + /* Free the context as well, but keep the fd if requested. */ + if (keep_fd) + return redisFreeKeepFd(c); + redisFree(c); + return -1; +} + +static void do_ssl_handshake(redisContext *c) { +#ifdef HIREDIS_TEST_SSL + redisInitiateSSLWithContext(c, _ssl_ctx); + if (c->err) { + printf("SSL error: %s\n", c->errstr); + redisFree(c); + exit(1); + } +#else + (void) c; +#endif +} + +static redisContext *do_connect(struct config config) { + redisContext *c = NULL; + + if (config.type == CONN_TCP) { + c = redisConnect(config.tcp.host, config.tcp.port); + } else if (config.type == CONN_SSL) { + c = redisConnect(config.ssl.host, config.ssl.port); + } else if (config.type == CONN_UNIX) { + c = redisConnectUnix(config.unix_sock.path); + } else if (config.type == CONN_FD) { + /* Create a dummy connection just to get an fd to inherit */ + redisContext *dummy_ctx = redisConnectUnix(config.unix_sock.path); + if (dummy_ctx) { + int fd = disconnect(dummy_ctx, 1); + printf("Connecting to inherited fd %d\n", fd); + c = redisConnectFd(fd); + } + } else { + assert(NULL); + } + + if (c == NULL) { + printf("Connection error: can't allocate redis context\n"); + exit(1); + } else if (c->err) { + printf("Connection error: %s\n", c->errstr); + redisFree(c); + exit(1); + } + + if (config.type == CONN_SSL) { + do_ssl_handshake(c); + } + + return select_database(c); +} + +static void do_reconnect(redisContext *c, struct config config) { + redisReconnect(c); + + if (config.type == CONN_SSL) { + do_ssl_handshake(c); + } +} + +static void test_format_commands(void) { + char *cmd; + int len; + + test("Format command without interpolation: "); + len = redisFormatCommand(&cmd,"SET foo bar"); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(3+2)); + hi_free(cmd); + + test("Format command with %%s string interpolation: "); + len = redisFormatCommand(&cmd,"SET %s %s","foo","bar"); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(3+2)); + hi_free(cmd); + + test("Format command with %%s and an empty string: "); + len = redisFormatCommand(&cmd,"SET %s %s","foo",""); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$0\r\n\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(0+2)); + hi_free(cmd); + + test("Format command with an empty string in between proper interpolations: "); + len = redisFormatCommand(&cmd,"SET %s %s","","foo"); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$0\r\n\r\n$3\r\nfoo\r\n",len) == 0 && + len == 4+4+(3+2)+4+(0+2)+4+(3+2)); + hi_free(cmd); + + test("Format command with %%b string interpolation: "); + len = redisFormatCommand(&cmd,"SET %b %b","foo",(size_t)3,"b\0r",(size_t)3); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nb\0r\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(3+2)); + hi_free(cmd); + + test("Format command with %%b and an empty string: "); + len = redisFormatCommand(&cmd,"SET %b %b","foo",(size_t)3,"",(size_t)0); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$0\r\n\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(0+2)); + hi_free(cmd); + + test("Format command with literal %%: "); + len = redisFormatCommand(&cmd,"SET %% %%"); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$1\r\n%\r\n$1\r\n%\r\n",len) == 0 && + len == 4+4+(3+2)+4+(1+2)+4+(1+2)); + hi_free(cmd); + + /* Vararg width depends on the type. These tests make sure that the + * width is correctly determined using the format and subsequent varargs + * can correctly be interpolated. */ +#define INTEGER_WIDTH_TEST(fmt, type) do { \ + type value = 123; \ + test("Format command with printf-delegation (" #type "): "); \ + len = redisFormatCommand(&cmd,"key:%08" fmt " str:%s", value, "hello"); \ + test_cond(strncmp(cmd,"*2\r\n$12\r\nkey:00000123\r\n$9\r\nstr:hello\r\n",len) == 0 && \ + len == 4+5+(12+2)+4+(9+2)); \ + hi_free(cmd); \ +} while(0) + +#define FLOAT_WIDTH_TEST(type) do { \ + type value = 123.0; \ + test("Format command with printf-delegation (" #type "): "); \ + len = redisFormatCommand(&cmd,"key:%08.3f str:%s", value, "hello"); \ + test_cond(strncmp(cmd,"*2\r\n$12\r\nkey:0123.000\r\n$9\r\nstr:hello\r\n",len) == 0 && \ + len == 4+5+(12+2)+4+(9+2)); \ + hi_free(cmd); \ +} while(0) + + INTEGER_WIDTH_TEST("d", int); + INTEGER_WIDTH_TEST("hhd", char); + INTEGER_WIDTH_TEST("hd", short); + INTEGER_WIDTH_TEST("ld", long); + INTEGER_WIDTH_TEST("lld", long long); + INTEGER_WIDTH_TEST("u", unsigned int); + INTEGER_WIDTH_TEST("hhu", unsigned char); + INTEGER_WIDTH_TEST("hu", unsigned short); + INTEGER_WIDTH_TEST("lu", unsigned long); + INTEGER_WIDTH_TEST("llu", unsigned long long); + FLOAT_WIDTH_TEST(float); + FLOAT_WIDTH_TEST(double); + + test("Format command with invalid printf format: "); + len = redisFormatCommand(&cmd,"key:%08p %b",(void*)1234,"foo",(size_t)3); + test_cond(len == -1); + + const char *argv[3]; + argv[0] = "SET"; + argv[1] = "foo\0xxx"; + argv[2] = "bar"; + size_t lens[3] = { 3, 7, 3 }; + int argc = 3; + + test("Format command by passing argc/argv without lengths: "); + len = redisFormatCommandArgv(&cmd,argc,argv,NULL); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(3+2)); + hi_free(cmd); + + test("Format command by passing argc/argv with lengths: "); + len = redisFormatCommandArgv(&cmd,argc,argv,lens); + test_cond(strncmp(cmd,"*3\r\n$3\r\nSET\r\n$7\r\nfoo\0xxx\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(7+2)+4+(3+2)); + hi_free(cmd); + + sds sds_cmd; + + sds_cmd = NULL; + test("Format command into sds by passing argc/argv without lengths: "); + len = redisFormatSdsCommandArgv(&sds_cmd,argc,argv,NULL); + test_cond(strncmp(sds_cmd,"*3\r\n$3\r\nSET\r\n$3\r\nfoo\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(3+2)+4+(3+2)); + sdsfree(sds_cmd); + + sds_cmd = NULL; + test("Format command into sds by passing argc/argv with lengths: "); + len = redisFormatSdsCommandArgv(&sds_cmd,argc,argv,lens); + test_cond(strncmp(sds_cmd,"*3\r\n$3\r\nSET\r\n$7\r\nfoo\0xxx\r\n$3\r\nbar\r\n",len) == 0 && + len == 4+4+(3+2)+4+(7+2)+4+(3+2)); + sdsfree(sds_cmd); +} + +static void test_append_formatted_commands(struct config config) { + redisContext *c; + redisReply *reply; + char *cmd; + int len; + + c = do_connect(config); + + test("Append format command: "); + + len = redisFormatCommand(&cmd, "SET foo bar"); + + test_cond(redisAppendFormattedCommand(c, cmd, len) == REDIS_OK); + + assert(redisGetReply(c, (void*)&reply) == REDIS_OK); + + hi_free(cmd); + freeReplyObject(reply); + + disconnect(c, 0); +} + +static void test_reply_reader(void) { + redisReader *reader; + void *reply, *root; + int ret; + int i; + + test("Error handling in reply parser: "); + reader = redisReaderCreate(); + redisReaderFeed(reader,(char*)"@foo\r\n",6); + ret = redisReaderGetReply(reader,NULL); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Protocol error, got \"@\" as reply type byte") == 0); + redisReaderFree(reader); + + /* when the reply already contains multiple items, they must be free'd + * on an error. valgrind will bark when this doesn't happen. */ + test("Memory cleanup in reply parser: "); + reader = redisReaderCreate(); + redisReaderFeed(reader,(char*)"*2\r\n",4); + redisReaderFeed(reader,(char*)"$5\r\nhello\r\n",11); + redisReaderFeed(reader,(char*)"@foo\r\n",6); + ret = redisReaderGetReply(reader,NULL); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Protocol error, got \"@\" as reply type byte") == 0); + redisReaderFree(reader); + + reader = redisReaderCreate(); + test("Can handle arbitrarily nested multi-bulks: "); + for (i = 0; i < 128; i++) { + redisReaderFeed(reader,(char*)"*1\r\n", 4); + } + redisReaderFeed(reader,(char*)"$6\r\nLOLWUT\r\n",12); + ret = redisReaderGetReply(reader,&reply); + root = reply; /* Keep track of the root reply */ + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_ARRAY && + ((redisReply*)reply)->elements == 1); + + test("Can parse arbitrarily nested multi-bulks correctly: "); + while(i--) { + assert(reply != NULL && ((redisReply*)reply)->type == REDIS_REPLY_ARRAY); + reply = ((redisReply*)reply)->element[0]; + } + test_cond(((redisReply*)reply)->type == REDIS_REPLY_STRING && + !memcmp(((redisReply*)reply)->str, "LOLWUT", 6)); + freeReplyObject(root); + redisReaderFree(reader); + + test("Correctly parses LLONG_MAX: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ":9223372036854775807\r\n",22); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_INTEGER && + ((redisReply*)reply)->integer == LLONG_MAX); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error when > LLONG_MAX: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ":9223372036854775808\r\n",22); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bad integer value") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Correctly parses LLONG_MIN: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ":-9223372036854775808\r\n",23); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_INTEGER && + ((redisReply*)reply)->integer == LLONG_MIN); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error when < LLONG_MIN: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, ":-9223372036854775809\r\n",23); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bad integer value") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error when array < -1: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "*-2\r\n+asdf\r\n",12); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Multi-bulk length out of range") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error when bulk < -1: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "$-2\r\nasdf\r\n",11); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bulk string length out of range") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Can configure maximum multi-bulk elements: "); + reader = redisReaderCreate(); + reader->maxelements = 1024; + redisReaderFeed(reader, "*1025\r\n", 7); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr, "Multi-bulk length out of range") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Multi-bulk never overflows regardless of maxelements: "); + size_t bad_mbulk_len = (SIZE_MAX / sizeof(void *)) + 3; + char bad_mbulk_reply[100]; + snprintf(bad_mbulk_reply, sizeof(bad_mbulk_reply), "*%llu\r\n+asdf\r\n", + (unsigned long long) bad_mbulk_len); + + reader = redisReaderCreate(); + reader->maxelements = 0; /* Don't rely on default limit */ + redisReaderFeed(reader, bad_mbulk_reply, strlen(bad_mbulk_reply)); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && strcasecmp(reader->errstr, "Out of memory") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + +#if LLONG_MAX > SIZE_MAX + test("Set error when array > SIZE_MAX: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "*9223372036854775807\r\n+asdf\r\n",29); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Multi-bulk length out of range") == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + test("Set error when bulk > SIZE_MAX: "); + reader = redisReaderCreate(); + redisReaderFeed(reader, "$9223372036854775807\r\nasdf\r\n",28); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && + strcasecmp(reader->errstr,"Bulk string length out of range") == 0); + freeReplyObject(reply); + redisReaderFree(reader); +#endif + + test("Works with NULL functions for reply: "); + reader = redisReaderCreate(); + reader->fn = NULL; + redisReaderFeed(reader,(char*)"+OK\r\n",5); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && reply == (void*)REDIS_REPLY_STATUS); + redisReaderFree(reader); + + test("Works when a single newline (\\r\\n) covers two calls to feed: "); + reader = redisReaderCreate(); + reader->fn = NULL; + redisReaderFeed(reader,(char*)"+OK\r",4); + ret = redisReaderGetReply(reader,&reply); + assert(ret == REDIS_OK && reply == NULL); + redisReaderFeed(reader,(char*)"\n",1); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && reply == (void*)REDIS_REPLY_STATUS); + redisReaderFree(reader); + + test("Don't reset state after protocol error: "); + reader = redisReaderCreate(); + reader->fn = NULL; + redisReaderFeed(reader,(char*)"x",1); + ret = redisReaderGetReply(reader,&reply); + assert(ret == REDIS_ERR); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_ERR && reply == NULL); + redisReaderFree(reader); + + /* Regression test for issue #45 on GitHub. */ + test("Don't do empty allocation for empty multi bulk: "); + reader = redisReaderCreate(); + redisReaderFeed(reader,(char*)"*0\r\n",4); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_ARRAY && + ((redisReply*)reply)->elements == 0); + freeReplyObject(reply); + redisReaderFree(reader); + + /* RESP3 verbatim strings (GitHub issue #802) */ + test("Can parse RESP3 verbatim strings: "); + reader = redisReaderCreate(); + redisReaderFeed(reader,(char*)"=10\r\ntxt:LOLWUT\r\n",17); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_VERB && + !memcmp(((redisReply*)reply)->str,"LOLWUT", 6)); + freeReplyObject(reply); + redisReaderFree(reader); + + /* RESP3 push messages (Github issue #815) */ + test("Can parse RESP3 push messages: "); + reader = redisReaderCreate(); + redisReaderFeed(reader,(char*)">2\r\n$6\r\nLOLWUT\r\n:42\r\n",21); + ret = redisReaderGetReply(reader,&reply); + test_cond(ret == REDIS_OK && + ((redisReply*)reply)->type == REDIS_REPLY_PUSH && + ((redisReply*)reply)->elements == 2 && + ((redisReply*)reply)->element[0]->type == REDIS_REPLY_STRING && + !memcmp(((redisReply*)reply)->element[0]->str,"LOLWUT",6) && + ((redisReply*)reply)->element[1]->type == REDIS_REPLY_INTEGER && + ((redisReply*)reply)->element[1]->integer == 42); + freeReplyObject(reply); + redisReaderFree(reader); +} + +static void test_free_null(void) { + void *redisCtx = NULL; + void *reply = NULL; + + test("Don't fail when redisFree is passed a NULL value: "); + redisFree(redisCtx); + test_cond(redisCtx == NULL); + + test("Don't fail when freeReplyObject is passed a NULL value: "); + freeReplyObject(reply); + test_cond(reply == NULL); +} + +static void *hi_malloc_fail(size_t size) { + (void)size; + return NULL; +} + +static void *hi_calloc_fail(size_t nmemb, size_t size) { + (void)nmemb; + (void)size; + return NULL; +} + +static void *hi_realloc_fail(void *ptr, size_t size) { + (void)ptr; + (void)size; + return NULL; +} + +static void test_allocator_injection(void) { + hiredisAllocFuncs ha = { + .mallocFn = hi_malloc_fail, + .callocFn = hi_calloc_fail, + .reallocFn = hi_realloc_fail, + .strdupFn = strdup, + .freeFn = free, + }; + + // Override hiredis allocators + hiredisSetAllocators(&ha); + + test("redisContext uses injected allocators: "); + redisContext *c = redisConnect("localhost", 6379); + test_cond(c == NULL); + + test("redisReader uses injected allocators: "); + redisReader *reader = redisReaderCreate(); + test_cond(reader == NULL); + + // Return allocators to default + hiredisResetAllocators(); +} + +#define HIREDIS_BAD_DOMAIN "idontexist-noreally.com" +static void test_blocking_connection_errors(void) { + redisContext *c; + struct addrinfo hints = {.ai_family = AF_INET}; + struct addrinfo *ai_tmp = NULL; + + int rv = getaddrinfo(HIREDIS_BAD_DOMAIN, "6379", &hints, &ai_tmp); + if (rv != 0) { + // Address does *not* exist + test("Returns error when host cannot be resolved: "); + // First see if this domain name *actually* resolves to NXDOMAIN + c = redisConnect(HIREDIS_BAD_DOMAIN, 6379); + test_cond( + c->err == REDIS_ERR_OTHER && + (strcmp(c->errstr, "Name or service not known") == 0 || + strcmp(c->errstr, "Can't resolve: " HIREDIS_BAD_DOMAIN) == 0 || + strcmp(c->errstr, "Name does not resolve") == 0 || + strcmp(c->errstr, "nodename nor servname provided, or not known") == 0 || + strcmp(c->errstr, "No address associated with hostname") == 0 || + strcmp(c->errstr, "Temporary failure in name resolution") == 0 || + strcmp(c->errstr, "hostname nor servname provided, or not known") == 0 || + strcmp(c->errstr, "no address associated with name") == 0 || + strcmp(c->errstr, "No such host is known. ") == 0)); + redisFree(c); + } else { + printf("Skipping NXDOMAIN test. Found evil ISP!\n"); + freeaddrinfo(ai_tmp); + } + +#ifndef _WIN32 + test("Returns error when the port is not open: "); + c = redisConnect((char*)"localhost", 1); + test_cond(c->err == REDIS_ERR_IO && + strcmp(c->errstr,"Connection refused") == 0); + redisFree(c); + + test("Returns error when the unix_sock socket path doesn't accept connections: "); + c = redisConnectUnix((char*)"/tmp/idontexist.sock"); + test_cond(c->err == REDIS_ERR_IO); /* Don't care about the message... */ + redisFree(c); +#endif +} + +/* Dummy push handler */ +void push_handler(void *privdata, void *reply) { + int *counter = privdata; + freeReplyObject(reply); + *counter += 1; +} + +/* Dummy function just to test setting a callback with redisOptions */ +void push_handler_async(redisAsyncContext *ac, void *reply) { + (void)ac; + (void)reply; +} + +static void test_resp3_push_handler(redisContext *c) { + redisPushFn *old = NULL; + redisReply *reply; + void *privdata; + int n = 0; + + /* Switch to RESP3 and turn on client tracking */ + send_hello(c, 3); + send_client_tracking(c, "ON"); + privdata = c->privdata; + c->privdata = &n; + + reply = redisCommand(c, "GET key:0"); + assert(reply != NULL); + freeReplyObject(reply); + + test("RESP3 PUSH messages are handled out of band by default: "); + reply = redisCommand(c, "SET key:0 val:0"); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS); + freeReplyObject(reply); + + assert((reply = redisCommand(c, "GET key:0")) != NULL); + freeReplyObject(reply); + + old = redisSetPushCallback(c, push_handler); + test("We can set a custom RESP3 PUSH handler: "); + reply = redisCommand(c, "SET key:0 val:0"); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS && n == 1); + freeReplyObject(reply); + + /* Unset the push callback and generate an invalidate message making + * sure it is not handled out of band. */ + test("With no handler, PUSH replies come in-band: "); + redisSetPushCallback(c, NULL); + assert((reply = redisCommand(c, "GET key:0")) != NULL); + freeReplyObject(reply); + assert((reply = redisCommand(c, "SET key:0 invalid")) != NULL); + test_cond(reply->type == REDIS_REPLY_PUSH); + freeReplyObject(reply); + + test("With no PUSH handler, no replies are lost: "); + assert(redisGetReply(c, (void**)&reply) == REDIS_OK); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS); + freeReplyObject(reply); + + /* Return to the originally set PUSH handler */ + assert(old != NULL); + redisSetPushCallback(c, old); + + /* Switch back to RESP2 and disable tracking */ + c->privdata = privdata; + send_client_tracking(c, "OFF"); + send_hello(c, 2); +} + +redisOptions get_redis_tcp_options(struct config config) { + redisOptions options = {0}; + REDIS_OPTIONS_SET_TCP(&options, config.tcp.host, config.tcp.port); + return options; +} + +static void test_resp3_push_options(struct config config) { + redisAsyncContext *ac; + redisContext *c; + redisOptions options; + + test("We set a default RESP3 handler for redisContext: "); + options = get_redis_tcp_options(config); + assert((c = redisConnectWithOptions(&options)) != NULL); + test_cond(c->push_cb != NULL); + redisFree(c); + + test("We don't set a default RESP3 push handler for redisAsyncContext: "); + options = get_redis_tcp_options(config); + assert((ac = redisAsyncConnectWithOptions(&options)) != NULL); + test_cond(ac->c.push_cb == NULL); + redisAsyncFree(ac); + + test("Our REDIS_OPT_NO_PUSH_AUTOFREE flag works: "); + options = get_redis_tcp_options(config); + options.options |= REDIS_OPT_NO_PUSH_AUTOFREE; + assert((c = redisConnectWithOptions(&options)) != NULL); + test_cond(c->push_cb == NULL); + redisFree(c); + + test("We can use redisOptions to set a custom PUSH handler for redisContext: "); + options = get_redis_tcp_options(config); + options.push_cb = push_handler; + assert((c = redisConnectWithOptions(&options)) != NULL); + test_cond(c->push_cb == push_handler); + redisFree(c); + + test("We can use redisOptions to set a custom PUSH handler for redisAsyncContext: "); + options = get_redis_tcp_options(config); + options.async_push_cb = push_handler_async; + assert((ac = redisAsyncConnectWithOptions(&options)) != NULL); + test_cond(ac->push_cb == push_handler_async); + redisAsyncFree(ac); +} + +void free_privdata(void *privdata) { + struct privdata *data = privdata; + data->dtor_counter++; +} + +static void test_privdata_hooks(struct config config) { + struct privdata data = {0}; + redisOptions options; + redisContext *c; + + test("We can use redisOptions to set privdata: "); + options = get_redis_tcp_options(config); + REDIS_OPTIONS_SET_PRIVDATA(&options, &data, free_privdata); + assert((c = redisConnectWithOptions(&options)) != NULL); + test_cond(c->privdata == &data); + + test("Our privdata destructor fires when we free the context: "); + redisFree(c); + test_cond(data.dtor_counter == 1); +} + +static void test_blocking_connection(struct config config) { + redisContext *c; + redisReply *reply; + int major; + + c = do_connect(config); + + test("Is able to deliver commands: "); + reply = redisCommand(c,"PING"); + test_cond(reply->type == REDIS_REPLY_STATUS && + strcasecmp(reply->str,"pong") == 0) + freeReplyObject(reply); + + test("Is a able to send commands verbatim: "); + reply = redisCommand(c,"SET foo bar"); + test_cond (reply->type == REDIS_REPLY_STATUS && + strcasecmp(reply->str,"ok") == 0) + freeReplyObject(reply); + + test("%%s String interpolation works: "); + reply = redisCommand(c,"SET %s %s","foo","hello world"); + freeReplyObject(reply); + reply = redisCommand(c,"GET foo"); + test_cond(reply->type == REDIS_REPLY_STRING && + strcmp(reply->str,"hello world") == 0); + freeReplyObject(reply); + + test("%%b String interpolation works: "); + reply = redisCommand(c,"SET %b %b","foo",(size_t)3,"hello\x00world",(size_t)11); + freeReplyObject(reply); + reply = redisCommand(c,"GET foo"); + test_cond(reply->type == REDIS_REPLY_STRING && + memcmp(reply->str,"hello\x00world",11) == 0) + + test("Binary reply length is correct: "); + test_cond(reply->len == 11) + freeReplyObject(reply); + + test("Can parse nil replies: "); + reply = redisCommand(c,"GET nokey"); + test_cond(reply->type == REDIS_REPLY_NIL) + freeReplyObject(reply); + + /* test 7 */ + test("Can parse integer replies: "); + reply = redisCommand(c,"INCR mycounter"); + test_cond(reply->type == REDIS_REPLY_INTEGER && reply->integer == 1) + freeReplyObject(reply); + + test("Can parse multi bulk replies: "); + freeReplyObject(redisCommand(c,"LPUSH mylist foo")); + freeReplyObject(redisCommand(c,"LPUSH mylist bar")); + reply = redisCommand(c,"LRANGE mylist 0 -1"); + test_cond(reply->type == REDIS_REPLY_ARRAY && + reply->elements == 2 && + !memcmp(reply->element[0]->str,"bar",3) && + !memcmp(reply->element[1]->str,"foo",3)) + freeReplyObject(reply); + + /* m/e with multi bulk reply *before* other reply. + * specifically test ordering of reply items to parse. */ + test("Can handle nested multi bulk replies: "); + freeReplyObject(redisCommand(c,"MULTI")); + freeReplyObject(redisCommand(c,"LRANGE mylist 0 -1")); + freeReplyObject(redisCommand(c,"PING")); + reply = (redisCommand(c,"EXEC")); + test_cond(reply->type == REDIS_REPLY_ARRAY && + reply->elements == 2 && + reply->element[0]->type == REDIS_REPLY_ARRAY && + reply->element[0]->elements == 2 && + !memcmp(reply->element[0]->element[0]->str,"bar",3) && + !memcmp(reply->element[0]->element[1]->str,"foo",3) && + reply->element[1]->type == REDIS_REPLY_STATUS && + strcasecmp(reply->element[1]->str,"pong") == 0); + freeReplyObject(reply); + + /* Make sure passing NULL to redisGetReply is safe */ + test("Can pass NULL to redisGetReply: "); + assert(redisAppendCommand(c, "PING") == REDIS_OK); + test_cond(redisGetReply(c, NULL) == REDIS_OK); + + get_redis_version(c, &major, NULL); + if (major >= 6) test_resp3_push_handler(c); + test_resp3_push_options(config); + + test_privdata_hooks(config); + + disconnect(c, 0); +} + +/* Send DEBUG SLEEP 0 to detect if we have this command */ +static int detect_debug_sleep(redisContext *c) { + int detected; + redisReply *reply = redisCommand(c, "DEBUG SLEEP 0\r\n"); + + if (reply == NULL || c->err) { + const char *cause = c->err ? c->errstr : "(none)"; + fprintf(stderr, "Error testing for DEBUG SLEEP (Redis error: %s), exiting\n", cause); + exit(-1); + } + + detected = reply->type == REDIS_REPLY_STATUS; + freeReplyObject(reply); + + return detected; +} + +static void test_blocking_connection_timeouts(struct config config) { + redisContext *c; + redisReply *reply; + ssize_t s; + const char *sleep_cmd = "DEBUG SLEEP 3\r\n"; + struct timeval tv; + + c = do_connect(config); + test("Successfully completes a command when the timeout is not exceeded: "); + reply = redisCommand(c,"SET foo fast"); + freeReplyObject(reply); + tv.tv_sec = 0; + tv.tv_usec = 10000; + redisSetTimeout(c, tv); + reply = redisCommand(c, "GET foo"); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STRING && memcmp(reply->str, "fast", 4) == 0); + freeReplyObject(reply); + disconnect(c, 0); + + c = do_connect(config); + test("Does not return a reply when the command times out: "); + if (detect_debug_sleep(c)) { + redisAppendFormattedCommand(c, sleep_cmd, strlen(sleep_cmd)); + s = c->funcs->write(c); + tv.tv_sec = 0; + tv.tv_usec = 10000; + redisSetTimeout(c, tv); + reply = redisCommand(c, "GET foo"); +#ifndef _WIN32 + test_cond(s > 0 && reply == NULL && c->err == REDIS_ERR_IO && + strcmp(c->errstr, "Resource temporarily unavailable") == 0); +#else + test_cond(s > 0 && reply == NULL && c->err == REDIS_ERR_TIMEOUT && + strcmp(c->errstr, "recv timeout") == 0); +#endif + freeReplyObject(reply); + } else { + test_skipped(); + } + + test("Reconnect properly reconnects after a timeout: "); + do_reconnect(c, config); + reply = redisCommand(c, "PING"); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS && strcmp(reply->str, "PONG") == 0); + freeReplyObject(reply); + + test("Reconnect properly uses owned parameters: "); + config.tcp.host = "foo"; + config.unix_sock.path = "foo"; + do_reconnect(c, config); + reply = redisCommand(c, "PING"); + test_cond(reply != NULL && reply->type == REDIS_REPLY_STATUS && strcmp(reply->str, "PONG") == 0); + freeReplyObject(reply); + + disconnect(c, 0); +} + +static void test_blocking_io_errors(struct config config) { + redisContext *c; + redisReply *reply; + void *_reply; + int major, minor; + + /* Connect to target given by config. */ + c = do_connect(config); + get_redis_version(c, &major, &minor); + + test("Returns I/O error when the connection is lost: "); + reply = redisCommand(c,"QUIT"); + if (major > 2 || (major == 2 && minor > 0)) { + /* > 2.0 returns OK on QUIT and read() should be issued once more + * to know the descriptor is at EOF. */ + test_cond(strcasecmp(reply->str,"OK") == 0 && + redisGetReply(c,&_reply) == REDIS_ERR); + freeReplyObject(reply); + } else { + test_cond(reply == NULL); + } + +#ifndef _WIN32 + /* On 2.0, QUIT will cause the connection to be closed immediately and + * the read(2) for the reply on QUIT will set the error to EOF. + * On >2.0, QUIT will return with OK and another read(2) needed to be + * issued to find out the socket was closed by the server. In both + * conditions, the error will be set to EOF. */ + assert(c->err == REDIS_ERR_EOF && + strcmp(c->errstr,"Server closed the connection") == 0); +#endif + redisFree(c); + + c = do_connect(config); + test("Returns I/O error on socket timeout: "); + struct timeval tv = { 0, 1000 }; + assert(redisSetTimeout(c,tv) == REDIS_OK); + int respcode = redisGetReply(c,&_reply); +#ifndef _WIN32 + test_cond(respcode == REDIS_ERR && c->err == REDIS_ERR_IO && errno == EAGAIN); +#else + test_cond(respcode == REDIS_ERR && c->err == REDIS_ERR_TIMEOUT); +#endif + redisFree(c); +} + +static void test_invalid_timeout_errors(struct config config) { + redisContext *c; + + test("Set error when an invalid timeout usec value is given to redisConnectWithTimeout: "); + + config.tcp.timeout.tv_sec = 0; + config.tcp.timeout.tv_usec = 10000001; + + c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.tcp.timeout); + + test_cond(c->err == REDIS_ERR_IO && strcmp(c->errstr, "Invalid timeout specified") == 0); + redisFree(c); + + test("Set error when an invalid timeout sec value is given to redisConnectWithTimeout: "); + + config.tcp.timeout.tv_sec = (((LONG_MAX) - 999) / 1000) + 1; + config.tcp.timeout.tv_usec = 0; + + c = redisConnectWithTimeout(config.tcp.host, config.tcp.port, config.tcp.timeout); + + test_cond(c->err == REDIS_ERR_IO && strcmp(c->errstr, "Invalid timeout specified") == 0); + redisFree(c); +} + +/* Wrap malloc to abort on failure so OOM checks don't make the test logic + * harder to follow. */ +void *hi_malloc_safe(size_t size) { + void *ptr = hi_malloc(size); + if (ptr == NULL) { + fprintf(stderr, "Error: Out of memory\n"); + exit(-1); + } + + return ptr; +} + +static void test_throughput(struct config config) { + redisContext *c = do_connect(config); + redisReply **replies; + int i, num; + long long t1, t2; + + test("Throughput:\n"); + for (i = 0; i < 500; i++) + freeReplyObject(redisCommand(c,"LPUSH mylist foo")); + + num = 1000; + replies = hi_malloc_safe(sizeof(redisReply*)*num); + t1 = usec(); + for (i = 0; i < num; i++) { + replies[i] = redisCommand(c,"PING"); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_STATUS); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx PING: %.3fs)\n", num, (t2-t1)/1000000.0); + + replies = hi_malloc_safe(sizeof(redisReply*)*num); + t1 = usec(); + for (i = 0; i < num; i++) { + replies[i] = redisCommand(c,"LRANGE mylist 0 499"); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_ARRAY); + assert(replies[i] != NULL && replies[i]->elements == 500); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx LRANGE with 500 elements: %.3fs)\n", num, (t2-t1)/1000000.0); + + replies = hi_malloc_safe(sizeof(redisReply*)*num); + t1 = usec(); + for (i = 0; i < num; i++) { + replies[i] = redisCommand(c, "INCRBY incrkey %d", 1000000); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_INTEGER); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx INCRBY: %.3fs)\n", num, (t2-t1)/1000000.0); + + num = 10000; + replies = hi_malloc_safe(sizeof(redisReply*)*num); + for (i = 0; i < num; i++) + redisAppendCommand(c,"PING"); + t1 = usec(); + for (i = 0; i < num; i++) { + assert(redisGetReply(c, (void*)&replies[i]) == REDIS_OK); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_STATUS); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx PING (pipelined): %.3fs)\n", num, (t2-t1)/1000000.0); + + replies = hi_malloc_safe(sizeof(redisReply*)*num); + for (i = 0; i < num; i++) + redisAppendCommand(c,"LRANGE mylist 0 499"); + t1 = usec(); + for (i = 0; i < num; i++) { + assert(redisGetReply(c, (void*)&replies[i]) == REDIS_OK); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_ARRAY); + assert(replies[i] != NULL && replies[i]->elements == 500); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx LRANGE with 500 elements (pipelined): %.3fs)\n", num, (t2-t1)/1000000.0); + + replies = hi_malloc_safe(sizeof(redisReply*)*num); + for (i = 0; i < num; i++) + redisAppendCommand(c,"INCRBY incrkey %d", 1000000); + t1 = usec(); + for (i = 0; i < num; i++) { + assert(redisGetReply(c, (void*)&replies[i]) == REDIS_OK); + assert(replies[i] != NULL && replies[i]->type == REDIS_REPLY_INTEGER); + } + t2 = usec(); + for (i = 0; i < num; i++) freeReplyObject(replies[i]); + hi_free(replies); + printf("\t(%dx INCRBY (pipelined): %.3fs)\n", num, (t2-t1)/1000000.0); + + disconnect(c, 0); +} + +// static long __test_callback_flags = 0; +// static void __test_callback(redisContext *c, void *privdata) { +// ((void)c); +// /* Shift to detect execution order */ +// __test_callback_flags <<= 8; +// __test_callback_flags |= (long)privdata; +// } +// +// static void __test_reply_callback(redisContext *c, redisReply *reply, void *privdata) { +// ((void)c); +// /* Shift to detect execution order */ +// __test_callback_flags <<= 8; +// __test_callback_flags |= (long)privdata; +// if (reply) freeReplyObject(reply); +// } +// +// static redisContext *__connect_nonblock() { +// /* Reset callback flags */ +// __test_callback_flags = 0; +// return redisConnectNonBlock("127.0.0.1", port, NULL); +// } +// +// static void test_nonblocking_connection() { +// redisContext *c; +// int wdone = 0; +// +// test("Calls command callback when command is issued: "); +// c = __connect_nonblock(); +// redisSetCommandCallback(c,__test_callback,(void*)1); +// redisCommand(c,"PING"); +// test_cond(__test_callback_flags == 1); +// redisFree(c); +// +// test("Calls disconnect callback on redisDisconnect: "); +// c = __connect_nonblock(); +// redisSetDisconnectCallback(c,__test_callback,(void*)2); +// redisDisconnect(c); +// test_cond(__test_callback_flags == 2); +// redisFree(c); +// +// test("Calls disconnect callback and free callback on redisFree: "); +// c = __connect_nonblock(); +// redisSetDisconnectCallback(c,__test_callback,(void*)2); +// redisSetFreeCallback(c,__test_callback,(void*)4); +// redisFree(c); +// test_cond(__test_callback_flags == ((2 << 8) | 4)); +// +// test("redisBufferWrite against empty write buffer: "); +// c = __connect_nonblock(); +// test_cond(redisBufferWrite(c,&wdone) == REDIS_OK && wdone == 1); +// redisFree(c); +// +// test("redisBufferWrite against not yet connected fd: "); +// c = __connect_nonblock(); +// redisCommand(c,"PING"); +// test_cond(redisBufferWrite(c,NULL) == REDIS_ERR && +// strncmp(c->error,"write:",6) == 0); +// redisFree(c); +// +// test("redisBufferWrite against closed fd: "); +// c = __connect_nonblock(); +// redisCommand(c,"PING"); +// redisDisconnect(c); +// test_cond(redisBufferWrite(c,NULL) == REDIS_ERR && +// strncmp(c->error,"write:",6) == 0); +// redisFree(c); +// +// test("Process callbacks in the right sequence: "); +// c = __connect_nonblock(); +// redisCommandWithCallback(c,__test_reply_callback,(void*)1,"PING"); +// redisCommandWithCallback(c,__test_reply_callback,(void*)2,"PING"); +// redisCommandWithCallback(c,__test_reply_callback,(void*)3,"PING"); +// +// /* Write output buffer */ +// wdone = 0; +// while(!wdone) { +// usleep(500); +// redisBufferWrite(c,&wdone); +// } +// +// /* Read until at least one callback is executed (the 3 replies will +// * arrive in a single packet, causing all callbacks to be executed in +// * a single pass). */ +// while(__test_callback_flags == 0) { +// assert(redisBufferRead(c) == REDIS_OK); +// redisProcessCallbacks(c); +// } +// test_cond(__test_callback_flags == 0x010203); +// redisFree(c); +// +// test("redisDisconnect executes pending callbacks with NULL reply: "); +// c = __connect_nonblock(); +// redisSetDisconnectCallback(c,__test_callback,(void*)1); +// redisCommandWithCallback(c,__test_reply_callback,(void*)2,"PING"); +// redisDisconnect(c); +// test_cond(__test_callback_flags == 0x0201); +// redisFree(c); +// } + +int main(int argc, char **argv) { + struct config cfg = { + .tcp = { + .host = "127.0.0.1", + .port = 6379 + }, + .unix_sock = { + .path = "/tmp/redis.sock" + } + }; + int throughput = 1; + int test_inherit_fd = 1; + int skips_as_fails = 0; + int test_unix_socket; + + /* Parse command line options. */ + argv++; argc--; + while (argc) { + if (argc >= 2 && !strcmp(argv[0],"-h")) { + argv++; argc--; + cfg.tcp.host = argv[0]; + } else if (argc >= 2 && !strcmp(argv[0],"-p")) { + argv++; argc--; + cfg.tcp.port = atoi(argv[0]); + } else if (argc >= 2 && !strcmp(argv[0],"-s")) { + argv++; argc--; + cfg.unix_sock.path = argv[0]; + } else if (argc >= 1 && !strcmp(argv[0],"--skip-throughput")) { + throughput = 0; + } else if (argc >= 1 && !strcmp(argv[0],"--skip-inherit-fd")) { + test_inherit_fd = 0; + } else if (argc >= 1 && !strcmp(argv[0],"--skips-as-fails")) { + skips_as_fails = 1; +#ifdef HIREDIS_TEST_SSL + } else if (argc >= 2 && !strcmp(argv[0],"--ssl-port")) { + argv++; argc--; + cfg.ssl.port = atoi(argv[0]); + } else if (argc >= 2 && !strcmp(argv[0],"--ssl-host")) { + argv++; argc--; + cfg.ssl.host = argv[0]; + } else if (argc >= 2 && !strcmp(argv[0],"--ssl-ca-cert")) { + argv++; argc--; + cfg.ssl.ca_cert = argv[0]; + } else if (argc >= 2 && !strcmp(argv[0],"--ssl-cert")) { + argv++; argc--; + cfg.ssl.cert = argv[0]; + } else if (argc >= 2 && !strcmp(argv[0],"--ssl-key")) { + argv++; argc--; + cfg.ssl.key = argv[0]; +#endif + } else { + fprintf(stderr, "Invalid argument: %s\n", argv[0]); + exit(1); + } + argv++; argc--; + } + +#ifndef _WIN32 + /* Ignore broken pipe signal (for I/O error tests). */ + signal(SIGPIPE, SIG_IGN); + + test_unix_socket = access(cfg.unix_sock.path, F_OK) == 0; + +#else + /* Unix sockets don't exist in Windows */ + test_unix_socket = 0; +#endif + + test_allocator_injection(); + + test_format_commands(); + test_reply_reader(); + test_blocking_connection_errors(); + test_free_null(); + + printf("\nTesting against TCP connection (%s:%d):\n", cfg.tcp.host, cfg.tcp.port); + cfg.type = CONN_TCP; + test_blocking_connection(cfg); + test_blocking_connection_timeouts(cfg); + test_blocking_io_errors(cfg); + test_invalid_timeout_errors(cfg); + test_append_formatted_commands(cfg); + if (throughput) test_throughput(cfg); + + printf("\nTesting against Unix socket connection (%s): ", cfg.unix_sock.path); + if (test_unix_socket) { + printf("\n"); + cfg.type = CONN_UNIX; + test_blocking_connection(cfg); + test_blocking_connection_timeouts(cfg); + test_blocking_io_errors(cfg); + if (throughput) test_throughput(cfg); + } else { + test_skipped(); + } + +#ifdef HIREDIS_TEST_SSL + if (cfg.ssl.port && cfg.ssl.host) { + + redisInitOpenSSL(); + _ssl_ctx = redisCreateSSLContext(cfg.ssl.ca_cert, NULL, cfg.ssl.cert, cfg.ssl.key, NULL, NULL); + assert(_ssl_ctx != NULL); + + printf("\nTesting against SSL connection (%s:%d):\n", cfg.ssl.host, cfg.ssl.port); + cfg.type = CONN_SSL; + + test_blocking_connection(cfg); + test_blocking_connection_timeouts(cfg); + test_blocking_io_errors(cfg); + test_invalid_timeout_errors(cfg); + test_append_formatted_commands(cfg); + if (throughput) test_throughput(cfg); + + redisFreeSSLContext(_ssl_ctx); + _ssl_ctx = NULL; + } +#endif + + if (test_inherit_fd) { + printf("\nTesting against inherited fd (%s): ", cfg.unix_sock.path); + if (test_unix_socket) { + printf("\n"); + cfg.type = CONN_FD; + test_blocking_connection(cfg); + } else { + test_skipped(); + } + } + + if (fails || (skips_as_fails && skips)) { + printf("*** %d TESTS FAILED ***\n", fails); + if (skips) { + printf("*** %d TESTS SKIPPED ***\n", skips); + } + return 1; + } + + printf("ALL TESTS PASSED (%d skipped)\n", skips); + return 0; +} diff --git a/ext/hiredis-1.0.2/test.sh b/ext/hiredis-1.0.2/test.sh new file mode 100755 index 000000000..c72bcb0dc --- /dev/null +++ b/ext/hiredis-1.0.2/test.sh @@ -0,0 +1,78 @@ +#!/bin/sh -ue + +REDIS_SERVER=${REDIS_SERVER:-redis-server} +REDIS_PORT=${REDIS_PORT:-56379} +REDIS_SSL_PORT=${REDIS_SSL_PORT:-56443} +TEST_SSL=${TEST_SSL:-0} +SKIPS_AS_FAILS=${SKIPS_AS_FAILS-:0} +SSL_TEST_ARGS= +SKIPS_ARG= + +tmpdir=$(mktemp -d) +PID_FILE=${tmpdir}/hiredis-test-redis.pid +SOCK_FILE=${tmpdir}/hiredis-test-redis.sock + +if [ "$TEST_SSL" = "1" ]; then + SSL_CA_CERT=${tmpdir}/ca.crt + SSL_CA_KEY=${tmpdir}/ca.key + SSL_CERT=${tmpdir}/redis.crt + SSL_KEY=${tmpdir}/redis.key + + openssl genrsa -out ${tmpdir}/ca.key 4096 + openssl req \ + -x509 -new -nodes -sha256 \ + -key ${SSL_CA_KEY} \ + -days 3650 \ + -subj '/CN=Hiredis Test CA' \ + -out ${SSL_CA_CERT} + openssl genrsa -out ${SSL_KEY} 2048 + openssl req \ + -new -sha256 \ + -key ${SSL_KEY} \ + -subj '/CN=Hiredis Test Cert' | \ + openssl x509 \ + -req -sha256 \ + -CA ${SSL_CA_CERT} \ + -CAkey ${SSL_CA_KEY} \ + -CAserial ${tmpdir}/ca.txt \ + -CAcreateserial \ + -days 365 \ + -out ${SSL_CERT} + + SSL_TEST_ARGS="--ssl-host 127.0.0.1 --ssl-port ${REDIS_SSL_PORT} --ssl-ca-cert ${SSL_CA_CERT} --ssl-cert ${SSL_CERT} --ssl-key ${SSL_KEY}" +fi + +cleanup() { + set +e + kill $(cat ${PID_FILE}) + rm -rf ${tmpdir} +} +trap cleanup INT TERM EXIT + +cat > ${tmpdir}/redis.conf <> ${tmpdir}/redis.conf < /* for struct timeval */ + +#ifndef inline +#define inline __inline +#endif + +#ifndef strcasecmp +#define strcasecmp stricmp +#endif + +#ifndef strncasecmp +#define strncasecmp strnicmp +#endif + +#ifndef va_copy +#define va_copy(d,s) ((d) = (s)) +#endif + +#ifndef snprintf +#define snprintf c99_snprintf + +__inline int c99_vsnprintf(char* str, size_t size, const char* format, va_list ap) +{ + int count = -1; + + if (size != 0) + count = _vsnprintf_s(str, size, _TRUNCATE, format, ap); + if (count == -1) + count = _vscprintf(format, ap); + + return count; +} + +__inline int c99_snprintf(char* str, size_t size, const char* format, ...) +{ + int count; + va_list ap; + + va_start(ap, format); + count = c99_vsnprintf(str, size, format, ap); + va_end(ap); + + return count; +} +#endif +#endif /* _MSC_VER */ + +#ifdef _WIN32 +#define strerror_r(errno,buf,len) strerror_s(buf,len,errno) +#endif /* _WIN32 */ + +#endif /* _WIN32_HELPER_INCLUDE */ diff --git a/ext/inja/LICENSE b/ext/inja/LICENSE new file mode 100644 index 000000000..9e06bea07 --- /dev/null +++ b/ext/inja/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018-2021 Berscheid + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/ext/inja/README.md b/ext/inja/README.md new file mode 100644 index 000000000..9d66a2c28 --- /dev/null +++ b/ext/inja/README.md @@ -0,0 +1,391 @@ +[
](https://github.com/pantor/inja/releases) + +

+ + CI Status + + + + Documentation Status + + + + + + + + Github Releases + + + + Github Issues + + + + GitHub License + +

+ +Inja is a template engine for modern C++, loosely inspired by [jinja](http://jinja.pocoo.org) for python. It has an easy and yet powerful template syntax with all variables, loops, conditions, includes, callbacks, and comments you need, nested and combined as you like. Inja uses the wonderful [json](https://github.com/nlohmann/json) library by nlohmann for data input. Most importantly, inja needs only two header files, which is (nearly) as trivial as integration in C++ can get. Of course, everything is tested on all relevant compilers. Here is what it looks like: + +```.cpp +json data; +data["name"] = "world"; + +inja::render("Hello {{ name }}!", data); // Returns "Hello world!" +``` + +## Integration + +Inja is a headers only library, which can be downloaded from the [releases](https://github.com/pantor/inja/releases) or directly from the `include/` or `single_include/` folder. Inja uses `nlohmann/json.hpp` (>= v3.8.0) as its single dependency, so make sure it can be included from `inja.hpp`. json can be downloaded [here](https://github.com/nlohmann/json/releases). Then integration is as easy as: + +```.cpp +#include + +// Just for convenience +using namespace inja; +``` + +If you are using the [Meson Build System](http://mesonbuild.com), then you can wrap this repository as a subproject. + +If you are using [Conan](https://conan.io) to manage your dependencies, have a look at [this repository](https://github.com/DEGoodmanWilson/conan-inja). Please file issues [here](https://github.com/DEGoodmanWilson/conan-inja/issues) if you experience problems with the packages. + +You can also integrate inja in your project using [Hunter](https://github.com/cpp-pm/hunter), a package manager for C++. + +If you are using [vcpkg](https://github.com/Microsoft/vcpkg) on your project for external dependencies, then you can use the [inja package](https://github.com/Microsoft/vcpkg/tree/master/ports/inja). Please see the vcpkg project for any issues regarding the packaging. + +If you are using [cget](https://cget.readthedocs.io/en/latest/), you can install the latest development version with `cget install pantor/inja`. A specific version can be installed with `cget install pantor/inja@v2.1.0`. + +On macOS, you can install inja via [Homebrew](https://formulae.brew.sh/formula/inja#default) and `brew install inja`. + +If you are using [conda](https://docs.conda.io/en/latest/), you can install the latest version from [conda-forge](https://anaconda.org/conda-forge/inja) with `conda install -c conda-forge inja`. + +## Tutorial + +This tutorial will give you an idea how to use inja. It will explain the most important concepts and give practical advices using examples and executable code. Beside this tutorial, you may check out the [documentation](https://pantor.github.io/inja). + +### Template Rendering + +The basic template rendering takes a template as a `std::string` and a `json` object for all data. It returns the rendered template as an `std::string`. + +```.cpp +json data; +data["name"] = "world"; + +render("Hello {{ name }}!", data); // Returns std::string "Hello world!" +render_to(std::cout, "Hello {{ name }}!", data); // Writes "Hello world!" to stream +``` + +For more advanced usage, an environment is recommended. +```.cpp +Environment env; + +// Render a string with json data +std::string result = env.render("Hello {{ name }}!", data); // "Hello world!" + +// Or directly read a template file +Template temp = env.parse_template("./templates/greeting.txt"); +std::string result = env.render(temp, data); // "Hello world!" + +data["name"] = "Inja"; +std::string result = env.render(temp, data); // "Hello Inja!" + +// Or read the template file (and/or the json file) directly from the environment +result = env.render_file("./templates/greeting.txt", data); +result = env.render_file_with_json_file("./templates/greeting.txt", "./data.json"); + +// Or write a rendered template file +env.write(temp, data, "./result.txt"); +env.write_with_json_file("./templates/greeting.txt", "./data.json", "./result.txt"); +``` + +The environment class can be configured to your needs. +```.cpp +// With default settings +Environment env_default; + +// With global path to template files and where files will be saved +Environment env_1 {"../path/templates/"}; + +// With separate input and output path +Environment env_2 {"../path/templates/", "../path/results/"}; + +// With other opening and closing strings (here the defaults) +env.set_expression("{{", "}}"); // Expressions +env.set_comment("{#", "#}"); // Comments +env.set_statement("{%", "%}"); // Statements {% %} for many things, see below +env.set_line_statement("##"); // Line statements ## (just an opener) +``` + +### Variables + +Variables are rendered within the `{{ ... }}` expressions. +```.cpp +json data; +data["neighbour"] = "Peter"; +data["guests"] = {"Jeff", "Tom", "Patrick"}; +data["time"]["start"] = 16; +data["time"]["end"] = 22; + +// Indexing in array +render("{{ guests.1 }}", data); // "Tom" + +// Objects +render("{{ time.start }} to {{ time.end + 1 }}pm", data); // "16 to 23pm" +``` +If no variable is found, valid JSON is printed directly, otherwise an `inja::RenderError` is thrown. + +### Statements + +Statements can be written either with the `{% ... %}` syntax or the `##` syntax for entire lines. Note that `##` needs to start the line without indentation. The most important statements are loops, conditions and file includes. All statements can be nested. + +#### Loops + +```.cpp +// Combining loops and line statements +render(R"(Guest List: +## for guest in guests + {{ loop.index1 }}: {{ guest }} +## endfor )", data) + +/* Guest List: + 1: Jeff + 2: Tom + 3: Patrick */ +``` +In a loop, the special variables `loop.index (number)`, `loop.index1 (number)`, `loop.is_first (boolean)` and `loop.is_last (boolean)` are defined. In nested loops, the parent loop variables are available e.g. via `loop.parent.index`. You can also iterate over objects like `{% for key, value in time %}`. + +#### Conditions + +Conditions support the typical if, else if and else statements. Following conditions are for example possible: +```.cpp +// Standard comparisons with a variable +render("{% if time.hour >= 20 %}Serve{% else if time.hour >= 18 %}Make{% endif %} dinner.", data); // Serve dinner. + +// Variable in list +render("{% if neighbour in guests %}Turn up the music!{% endif %}", data); // Turn up the music! + +// Logical operations +render("{% if guest_count < (3+2) and all_tired %}Sleepy...{% else %}Keep going...{% endif %}", data); // Sleepy... + +// Negations +render("{% if not guest_count %}The End{% endif %}", data); // The End +``` + +#### Includes + +You can either include other in-memory templates or from the file system. +```.cpp +// To include in-memory templates, add them to the environment first +inja::Template content_template = env.parse("Hello {{ neighbour }}!"); +env.include_template("content", content_template); +env.render("Content: {% include \"content\" %}", data); // "Content: Hello Peter!" + +// Other template files are included relative from the current file location +render("{% include \"footer.html\" %}", data); +``` +If a corresponding template could not be found in the file system, the *include callback* is called: +```.cpp +// The callback takes the current path and the wanted include name and returns a template +env.set_include_callback([&env](const std::string& path, const std::string& template_name) { + return env.parse("Hello {{ neighbour }} from " + template_name); +}); + +// You can disable to search for templates in the file system via +env.set_search_included_templates_in_files(false); +``` + +Inja will throw an `inja::RenderError` if an included file is not found and no callback is specified. To disable this error, you can call `env.set_throw_at_missing_includes(false)`. + +#### Assignments + +Variables can also be defined within the template using the set statment. +```.cpp +render("{% set new_hour=23 %}{{ new_hour }}pm", data); // "23pm" +render("{% set time.start=18 %}{{ time.start }}pm", data); // using json pointers +``` + +Assignments only set the value within the rendering context; they do not modify the json object passed into the `render` call. + +### Functions + +A few functions are implemented within the inja template syntax. They can be called with +```.cpp +// Upper and lower function, for string cases +render("Hello {{ upper(neighbour) }}!", data); // "Hello PETER!" +render("Hello {{ lower(neighbour) }}!", data); // "Hello peter!" + +// Range function, useful for loops +render("{% for i in range(4) %}{{ loop.index1 }}{% endfor %}", data); // "1234" +render("{% for i in range(3) %}{{ at(guests, i) }} {% endfor %}", data); // "Jeff Tom Patrick " + +// Length function (please don't combine with range, use list directly...) +render("I count {{ length(guests) }} guests.", data); // "I count 3 guests." + +// Get first and last element in a list +render("{{ first(guests) }} was first.", data); // "Jeff was first." +render("{{ last(guests) }} was last.", data); // "Patir was last." + +// Sort a list +render("{{ sort([3,2,1]) }}", data); // "[1,2,3]" +render("{{ sort(guests) }}", data); // "[\"Jeff\", \"Patrick\", \"Tom\"]" + +// Join a list with a separator +render("{{ join([1,2,3], \" + \") }}", data); // "1 + 2 + 3" +render("{{ join(guests, \", \") }}", data); // "Jeff, Patrick, Tom" + +// Round numbers to a given precision +render("{{ round(3.1415, 0) }}", data); // 3 +render("{{ round(3.1415, 3) }}", data); // 3.142 + +// Check if a value is odd, even or divisible by a number +render("{{ odd(42) }}", data); // false +render("{{ even(42) }}", data); // true +render("{{ divisibleBy(42, 7) }}", data); // true + +// Maximum and minimum values from a list +render("{{ max([1, 2, 3]) }}", data); // 3 +render("{{ min([-2.4, -1.2, 4.5]) }}", data); // -2.4 + +// Convert strings to numbers +render("{{ int(\"2\") == 2 }}", data); // true +render("{{ float(\"1.8\") > 2 }}", data); // false + +// Set default values if variables are not defined +render("Hello {{ default(neighbour, \"my friend\") }}!", data); // "Hello Peter!" +render("Hello {{ default(colleague, \"my friend\") }}!", data); // "Hello my friend!" + +// Access an objects value dynamically +render("{{ at(time, \"start\") }} to {{ time.end }}", data); // "16 to 22" + +// Check if a key exists in an object +render("{{ exists(\"guests\") }}", data); // "true" +render("{{ exists(\"city\") }}", data); // "false" +render("{{ existsIn(time, \"start\") }}", data); // "true" +render("{{ existsIn(time, neighbour) }}", data); // "false" + +// Check if a key is a specific type +render("{{ isString(neighbour) }}", data); // "true" +render("{{ isArray(guests) }}", data); // "true" +// Implemented type checks: isArray, isBoolean, isFloat, isInteger, isNumber, isObject, isString, +``` + +### Callbacks + +You can create your own and more complex functions with callbacks. These are implemented with `std::function`, so you can for example use C++ lambdas. Inja `Arguments` are a vector of json pointers. +```.cpp +Environment env; + +/* + * Callbacks are defined by its: + * - name, + * - (optional) number of arguments, + * - callback function. + */ +env.add_callback("double", 1, [](Arguments& args) { + int number = args.at(0)->get(); // Adapt the index and type of the argument + return 2 * number; +}); + +// You can then use a callback like a regular function +env.render("{{ double(16) }}", data); // "32" + +// Inja falls back to variadic callbacks if the number of expected arguments is omitted. +env.add_callback("argmax", [](Arguments& args) { + auto result = std::max_element(args.begin(), args.end(), [](const json* a, const json* b) { return *a < *b;}); + return std::distance(args.begin(), result); +}); +env.render("{{ argmax(4, 2, 6) }}", data); // "2" +env.render("{{ argmax(0, 2, 6, 8, 3) }}", data); // "3" + +// A callback without argument can be used like a dynamic variable: +std::string greet = "Hello"; +env.add_callback("double-greetings", 0, [greet](Arguments args) { + return greet + " " + greet + "!"; +}); +env.render("{{ double-greetings }}", data); // "Hello Hello!" +``` +You can also add a void callback without return variable, e.g. for debugging: +```.cpp +env.add_void_callback("log", 1, [greet](Arguments args) { + std::cout << "logging: " << args[0] << std::endl; +}); +env.render("{{ log(neighbour) }}", data); // Prints nothing to result, only to cout... +``` + +### Template Inheritance + +Template inheritance allows you to build a base *skeleton* template that contains all the common elements and defines blocks that child templates can override. Lets show an example: The base template +```.html + + + + {% block head %} + + {% block title %}{% endblock %} - My Webpage + {% endblock %} + + +
{% block content %}{% endblock %}
+ + +``` +contains three `blocks` that child templates can fill in. The child template +```.html +{% extends "base.html" %} +{% block title %}Index{% endblock %} +{% block head %} + {{ super() }} + +{% endblock %} +{% block content %} +

Index

+

+ Welcome to my blog! +

+{% endblock %} +``` +calls a parent template with the `extends` keyword; it should be the first element in the template. It is possible to render the contents of the parent block by calling `super()`. In the case of multiple levels of `{% extends %}`, super references may be called with an argument (e.g. `super(2)`) to skip levels in the inheritance tree. + +### Whitespace Control + +In the default configuration, no whitespace is removed while rendering the file. To support a more readable template style, you can configure the environment to control whitespaces before and after a statement automatically. While enabling `set_trim_blocks` removes the first newline after a statement, `set_lstrip_blocks` strips tabs and spaces from the beginning of a line to the start of a block. + +```.cpp +Environment env; +env.set_trim_blocks(true); +env.set_lstrip_blocks(true); +``` + +With both `trim_blocks` and `lstrip_blocks` enabled, you can put statements on their own lines. Furthermore, you can also strip whitespaces for both statements and expressions by hand. If you add a minus sign (`-`) to the start or end, the whitespaces before or after that block will be removed: + +```.cpp +render("Hello {{- name -}} !", data); // "Hello Inja!" +render("{% if neighbour in guests -%} I was there{% endif -%} !", data); // Renders without any whitespaces +``` + +Stripping behind a statement or expression also removes any newlines. + +### Comments + +Comments can be written with the `{# ... #}` syntax. +```.cpp +render("Hello{# Todo #}!", data); // "Hello!" +``` + +### Exceptions + +Inja uses exceptions to handle ill-formed template input. However, exceptions can be switched off with either using the compiler flag `-fno-exceptions` or by defining the symbol `INJA_NOEXCEPTION`. In this case, exceptions are replaced by `abort()` calls. + + +## Supported compilers + +Inja uses the `string_view` feature of the C++17 STL. Currently, the following compilers are tested: + +- GCC 7 - 11 (and possibly later) +- Clang 5 - 12 (and possibly later) +- Microsoft Visual C++ 2017 15.0 - 2022 (and possibly later) + +A list of supported compiler / os versions can be found in the [CI definition](https://github.com/pantor/inja/blob/master/.github/workflows/ci.yml). diff --git a/ext/inja/inja.hpp b/ext/inja/inja.hpp new file mode 100644 index 000000000..5b469745f --- /dev/null +++ b/ext/inja/inja.hpp @@ -0,0 +1,2949 @@ +/* + ___ _ Version 3.3 + |_ _|_ __ (_) __ _ https://github.com/pantor/inja + | || '_ \ | |/ _` | Licensed under the MIT License . + | || | | || | (_| | + |___|_| |_|/ |\__,_| Copyright (c) 2018-2021 Lars Berscheid + |__/ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +#ifndef INCLUDE_INJA_INJA_HPP_ +#define INCLUDE_INJA_INJA_HPP_ + +#include + +namespace inja { +#ifndef INJA_DATA_TYPE +using json = nlohmann::json; +#else +using json = INJA_DATA_TYPE; +#endif +} // namespace inja + +#if (defined(__cpp_exceptions) || defined(__EXCEPTIONS) || defined(_CPPUNWIND)) && !defined(INJA_NOEXCEPTION) +#ifndef INJA_THROW +#define INJA_THROW(exception) throw exception +#endif +#else +#include +#ifndef INJA_THROW +#define INJA_THROW(exception) \ + std::abort(); \ + std::ignore = exception +#endif +#ifndef INJA_NOEXCEPTION +#define INJA_NOEXCEPTION +#endif +#endif + +// #include "environment.hpp" +#ifndef INCLUDE_INJA_ENVIRONMENT_HPP_ +#define INCLUDE_INJA_ENVIRONMENT_HPP_ + +#include +#include +#include +#include +#include +#include + +// #include "config.hpp" +#ifndef INCLUDE_INJA_CONFIG_HPP_ +#define INCLUDE_INJA_CONFIG_HPP_ + +#include +#include + +// #include "template.hpp" +#ifndef INCLUDE_INJA_TEMPLATE_HPP_ +#define INCLUDE_INJA_TEMPLATE_HPP_ + +#include +#include +#include +#include + +// #include "node.hpp" +#ifndef INCLUDE_INJA_NODE_HPP_ +#define INCLUDE_INJA_NODE_HPP_ + +#include +#include +#include + +// #include "function_storage.hpp" +#ifndef INCLUDE_INJA_FUNCTION_STORAGE_HPP_ +#define INCLUDE_INJA_FUNCTION_STORAGE_HPP_ + +#include +#include + +namespace inja { + +using Arguments = std::vector; +using CallbackFunction = std::function; +using VoidCallbackFunction = std::function; + +/*! + * \brief Class for builtin functions and user-defined callbacks. + */ +class FunctionStorage { +public: + enum class Operation { + Not, + And, + Or, + In, + Equal, + NotEqual, + Greater, + GreaterEqual, + Less, + LessEqual, + Add, + Subtract, + Multiplication, + Division, + Power, + Modulo, + AtId, + At, + Default, + DivisibleBy, + Even, + Exists, + ExistsInObject, + First, + Float, + Int, + IsArray, + IsBoolean, + IsFloat, + IsInteger, + IsNumber, + IsObject, + IsString, + Last, + Length, + Lower, + Max, + Min, + Odd, + Range, + Round, + Sort, + Upper, + Super, + Join, + Callback, + ParenLeft, + ParenRight, + None, + }; + + struct FunctionData { + explicit FunctionData(const Operation& op, const CallbackFunction& cb = CallbackFunction {}): operation(op), callback(cb) {} + const Operation operation; + const CallbackFunction callback; + }; + +private: + const int VARIADIC {-1}; + + std::map, FunctionData> function_storage = { + {std::make_pair("at", 2), FunctionData {Operation::At}}, + {std::make_pair("default", 2), FunctionData {Operation::Default}}, + {std::make_pair("divisibleBy", 2), FunctionData {Operation::DivisibleBy}}, + {std::make_pair("even", 1), FunctionData {Operation::Even}}, + {std::make_pair("exists", 1), FunctionData {Operation::Exists}}, + {std::make_pair("existsIn", 2), FunctionData {Operation::ExistsInObject}}, + {std::make_pair("first", 1), FunctionData {Operation::First}}, + {std::make_pair("float", 1), FunctionData {Operation::Float}}, + {std::make_pair("int", 1), FunctionData {Operation::Int}}, + {std::make_pair("isArray", 1), FunctionData {Operation::IsArray}}, + {std::make_pair("isBoolean", 1), FunctionData {Operation::IsBoolean}}, + {std::make_pair("isFloat", 1), FunctionData {Operation::IsFloat}}, + {std::make_pair("isInteger", 1), FunctionData {Operation::IsInteger}}, + {std::make_pair("isNumber", 1), FunctionData {Operation::IsNumber}}, + {std::make_pair("isObject", 1), FunctionData {Operation::IsObject}}, + {std::make_pair("isString", 1), FunctionData {Operation::IsString}}, + {std::make_pair("last", 1), FunctionData {Operation::Last}}, + {std::make_pair("length", 1), FunctionData {Operation::Length}}, + {std::make_pair("lower", 1), FunctionData {Operation::Lower}}, + {std::make_pair("max", 1), FunctionData {Operation::Max}}, + {std::make_pair("min", 1), FunctionData {Operation::Min}}, + {std::make_pair("odd", 1), FunctionData {Operation::Odd}}, + {std::make_pair("range", 1), FunctionData {Operation::Range}}, + {std::make_pair("round", 2), FunctionData {Operation::Round}}, + {std::make_pair("sort", 1), FunctionData {Operation::Sort}}, + {std::make_pair("upper", 1), FunctionData {Operation::Upper}}, + {std::make_pair("super", 0), FunctionData {Operation::Super}}, + {std::make_pair("super", 1), FunctionData {Operation::Super}}, + {std::make_pair("join", 2), FunctionData {Operation::Join}}, + }; + +public: + void add_builtin(std::string_view name, int num_args, Operation op) { + function_storage.emplace(std::make_pair(static_cast(name), num_args), FunctionData {op}); + } + + void add_callback(std::string_view name, int num_args, const CallbackFunction& callback) { + function_storage.emplace(std::make_pair(static_cast(name), num_args), FunctionData {Operation::Callback, callback}); + } + + FunctionData find_function(std::string_view name, int num_args) const { + auto it = function_storage.find(std::make_pair(static_cast(name), num_args)); + if (it != function_storage.end()) { + return it->second; + + // Find variadic function + } else if (num_args > 0) { + it = function_storage.find(std::make_pair(static_cast(name), VARIADIC)); + if (it != function_storage.end()) { + return it->second; + } + } + + return FunctionData {Operation::None}; + } +}; + +} // namespace inja + +#endif // INCLUDE_INJA_FUNCTION_STORAGE_HPP_ + +// #include "utils.hpp" +#ifndef INCLUDE_INJA_UTILS_HPP_ +#define INCLUDE_INJA_UTILS_HPP_ + +#include +#include +#include +#include +#include + +// #include "exceptions.hpp" +#ifndef INCLUDE_INJA_EXCEPTIONS_HPP_ +#define INCLUDE_INJA_EXCEPTIONS_HPP_ + +#include +#include + +namespace inja { + +struct SourceLocation { + size_t line; + size_t column; +}; + +struct InjaError : public std::runtime_error { + const std::string type; + const std::string message; + + const SourceLocation location; + + explicit InjaError(const std::string& type, const std::string& message) + : std::runtime_error("[inja.exception." + type + "] " + message), type(type), message(message), location({0, 0}) {} + + explicit InjaError(const std::string& type, const std::string& message, SourceLocation location) + : std::runtime_error("[inja.exception." + type + "] (at " + std::to_string(location.line) + ":" + std::to_string(location.column) + ") " + message), + type(type), message(message), location(location) {} +}; + +struct ParserError : public InjaError { + explicit ParserError(const std::string& message, SourceLocation location): InjaError("parser_error", message, location) {} +}; + +struct RenderError : public InjaError { + explicit RenderError(const std::string& message, SourceLocation location): InjaError("render_error", message, location) {} +}; + +struct FileError : public InjaError { + explicit FileError(const std::string& message): InjaError("file_error", message) {} + explicit FileError(const std::string& message, SourceLocation location): InjaError("file_error", message, location) {} +}; + +struct DataError : public InjaError { + explicit DataError(const std::string& message, SourceLocation location): InjaError("data_error", message, location) {} +}; + +} // namespace inja + +#endif // INCLUDE_INJA_EXCEPTIONS_HPP_ + + +namespace inja { + +namespace string_view { +inline std::string_view slice(std::string_view view, size_t start, size_t end) { + start = std::min(start, view.size()); + end = std::min(std::max(start, end), view.size()); + return view.substr(start, end - start); +} + +inline std::pair split(std::string_view view, char Separator) { + size_t idx = view.find(Separator); + if (idx == std::string_view::npos) { + return std::make_pair(view, std::string_view()); + } + return std::make_pair(slice(view, 0, idx), slice(view, idx + 1, std::string_view::npos)); +} + +inline bool starts_with(std::string_view view, std::string_view prefix) { + return (view.size() >= prefix.size() && view.compare(0, prefix.size(), prefix) == 0); +} +} // namespace string_view + +inline SourceLocation get_source_location(std::string_view content, size_t pos) { + // Get line and offset position (starts at 1:1) + auto sliced = string_view::slice(content, 0, pos); + std::size_t last_newline = sliced.rfind("\n"); + + if (last_newline == std::string_view::npos) { + return {1, sliced.length() + 1}; + } + + // Count newlines + size_t count_lines = 0; + size_t search_start = 0; + while (search_start <= sliced.size()) { + search_start = sliced.find("\n", search_start) + 1; + if (search_start == 0) { + break; + } + count_lines += 1; + } + + return {count_lines + 1, sliced.length() - last_newline}; +} + +inline void replace_substring(std::string& s, const std::string& f, const std::string& t) { + if (f.empty()) { + return; + } + for (auto pos = s.find(f); // find first occurrence of f + pos != std::string::npos; // make sure f was found + s.replace(pos, f.size(), t), // replace with t, and + pos = s.find(f, pos + t.size())) // find next occurrence of f + {} +} + +} // namespace inja + +#endif // INCLUDE_INJA_UTILS_HPP_ + + +namespace inja { + +class NodeVisitor; +class BlockNode; +class TextNode; +class ExpressionNode; +class LiteralNode; +class DataNode; +class FunctionNode; +class ExpressionListNode; +class StatementNode; +class ForStatementNode; +class ForArrayStatementNode; +class ForObjectStatementNode; +class IfStatementNode; +class IncludeStatementNode; +class ExtendsStatementNode; +class BlockStatementNode; +class SetStatementNode; + +class NodeVisitor { +public: + virtual ~NodeVisitor() = default; + + virtual void visit(const BlockNode& node) = 0; + virtual void visit(const TextNode& node) = 0; + virtual void visit(const ExpressionNode& node) = 0; + virtual void visit(const LiteralNode& node) = 0; + virtual void visit(const DataNode& node) = 0; + virtual void visit(const FunctionNode& node) = 0; + virtual void visit(const ExpressionListNode& node) = 0; + virtual void visit(const StatementNode& node) = 0; + virtual void visit(const ForStatementNode& node) = 0; + virtual void visit(const ForArrayStatementNode& node) = 0; + virtual void visit(const ForObjectStatementNode& node) = 0; + virtual void visit(const IfStatementNode& node) = 0; + virtual void visit(const IncludeStatementNode& node) = 0; + virtual void visit(const ExtendsStatementNode& node) = 0; + virtual void visit(const BlockStatementNode& node) = 0; + virtual void visit(const SetStatementNode& node) = 0; +}; + +/*! + * \brief Base node class for the abstract syntax tree (AST). + */ +class AstNode { +public: + virtual void accept(NodeVisitor& v) const = 0; + + size_t pos; + + AstNode(size_t pos): pos(pos) {} + virtual ~AstNode() {} +}; + +class BlockNode : public AstNode { +public: + std::vector> nodes; + + explicit BlockNode(): AstNode(0) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +class TextNode : public AstNode { +public: + const size_t length; + + explicit TextNode(size_t pos, size_t length): AstNode(pos), length(length) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +class ExpressionNode : public AstNode { +public: + explicit ExpressionNode(size_t pos): AstNode(pos) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +class LiteralNode : public ExpressionNode { +public: + const json value; + + explicit LiteralNode(std::string_view data_text, size_t pos): ExpressionNode(pos), value(json::parse(data_text)) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +class DataNode : public ExpressionNode { +public: + const std::string name; + const json::json_pointer ptr; + + static std::string convert_dot_to_ptr(std::string_view ptr_name) { + std::string result; + do { + std::string_view part; + std::tie(part, ptr_name) = string_view::split(ptr_name, '.'); + result.push_back('/'); + result.append(part.begin(), part.end()); + } while (!ptr_name.empty()); + return result; + } + + explicit DataNode(std::string_view ptr_name, size_t pos): ExpressionNode(pos), name(ptr_name), ptr(json::json_pointer(convert_dot_to_ptr(ptr_name))) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +class FunctionNode : public ExpressionNode { + using Op = FunctionStorage::Operation; + +public: + enum class Associativity { + Left, + Right, + }; + + unsigned int precedence; + Associativity associativity; + + Op operation; + + std::string name; + int number_args; // Should also be negative -> -1 for unknown number + std::vector> arguments; + CallbackFunction callback; + + explicit FunctionNode(std::string_view name, size_t pos) + : ExpressionNode(pos), precedence(8), associativity(Associativity::Left), operation(Op::Callback), name(name), number_args(1) {} + explicit FunctionNode(Op operation, size_t pos): ExpressionNode(pos), operation(operation), number_args(1) { + switch (operation) { + case Op::Not: { + number_args = 1; + precedence = 4; + associativity = Associativity::Left; + } break; + case Op::And: { + number_args = 2; + precedence = 1; + associativity = Associativity::Left; + } break; + case Op::Or: { + number_args = 2; + precedence = 1; + associativity = Associativity::Left; + } break; + case Op::In: { + number_args = 2; + precedence = 2; + associativity = Associativity::Left; + } break; + case Op::Equal: { + number_args = 2; + precedence = 2; + associativity = Associativity::Left; + } break; + case Op::NotEqual: { + number_args = 2; + precedence = 2; + associativity = Associativity::Left; + } break; + case Op::Greater: { + number_args = 2; + precedence = 2; + associativity = Associativity::Left; + } break; + case Op::GreaterEqual: { + number_args = 2; + precedence = 2; + associativity = Associativity::Left; + } break; + case Op::Less: { + number_args = 2; + precedence = 2; + associativity = Associativity::Left; + } break; + case Op::LessEqual: { + number_args = 2; + precedence = 2; + associativity = Associativity::Left; + } break; + case Op::Add: { + number_args = 2; + precedence = 3; + associativity = Associativity::Left; + } break; + case Op::Subtract: { + number_args = 2; + precedence = 3; + associativity = Associativity::Left; + } break; + case Op::Multiplication: { + number_args = 2; + precedence = 4; + associativity = Associativity::Left; + } break; + case Op::Division: { + number_args = 2; + precedence = 4; + associativity = Associativity::Left; + } break; + case Op::Power: { + number_args = 2; + precedence = 5; + associativity = Associativity::Right; + } break; + case Op::Modulo: { + number_args = 2; + precedence = 4; + associativity = Associativity::Left; + } break; + case Op::AtId: { + number_args = 2; + precedence = 8; + associativity = Associativity::Left; + } break; + default: { + precedence = 1; + associativity = Associativity::Left; + } + } + } + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +class ExpressionListNode : public AstNode { +public: + std::shared_ptr root; + + explicit ExpressionListNode(): AstNode(0) {} + explicit ExpressionListNode(size_t pos): AstNode(pos) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +class StatementNode : public AstNode { +public: + StatementNode(size_t pos): AstNode(pos) {} + + virtual void accept(NodeVisitor& v) const = 0; +}; + +class ForStatementNode : public StatementNode { +public: + ExpressionListNode condition; + BlockNode body; + BlockNode* const parent; + + ForStatementNode(BlockNode* const parent, size_t pos): StatementNode(pos), parent(parent) {} + + virtual void accept(NodeVisitor& v) const = 0; +}; + +class ForArrayStatementNode : public ForStatementNode { +public: + const std::string value; + + explicit ForArrayStatementNode(const std::string& value, BlockNode* const parent, size_t pos): ForStatementNode(parent, pos), value(value) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +class ForObjectStatementNode : public ForStatementNode { +public: + const std::string key; + const std::string value; + + explicit ForObjectStatementNode(const std::string& key, const std::string& value, BlockNode* const parent, size_t pos) + : ForStatementNode(parent, pos), key(key), value(value) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +class IfStatementNode : public StatementNode { +public: + ExpressionListNode condition; + BlockNode true_statement; + BlockNode false_statement; + BlockNode* const parent; + + const bool is_nested; + bool has_false_statement {false}; + + explicit IfStatementNode(BlockNode* const parent, size_t pos): StatementNode(pos), parent(parent), is_nested(false) {} + explicit IfStatementNode(bool is_nested, BlockNode* const parent, size_t pos): StatementNode(pos), parent(parent), is_nested(is_nested) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +class IncludeStatementNode : public StatementNode { +public: + const std::string file; + + explicit IncludeStatementNode(const std::string& file, size_t pos): StatementNode(pos), file(file) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +class ExtendsStatementNode : public StatementNode { +public: + const std::string file; + + explicit ExtendsStatementNode(const std::string& file, size_t pos): StatementNode(pos), file(file) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + }; +}; + +class BlockStatementNode : public StatementNode { +public: + const std::string name; + BlockNode block; + BlockNode* const parent; + + explicit BlockStatementNode(BlockNode* const parent, const std::string& name, size_t pos): StatementNode(pos), name(name), parent(parent) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + }; +}; + +class SetStatementNode : public StatementNode { +public: + const std::string key; + ExpressionListNode expression; + + explicit SetStatementNode(const std::string& key, size_t pos): StatementNode(pos), key(key) {} + + void accept(NodeVisitor& v) const { + v.visit(*this); + } +}; + +} // namespace inja + +#endif // INCLUDE_INJA_NODE_HPP_ + +// #include "statistics.hpp" +#ifndef INCLUDE_INJA_STATISTICS_HPP_ +#define INCLUDE_INJA_STATISTICS_HPP_ + +// #include "node.hpp" + + +namespace inja { + +/*! + * \brief A class for counting statistics on a Template. + */ +class StatisticsVisitor : public NodeVisitor { + void visit(const BlockNode& node) { + for (auto& n : node.nodes) { + n->accept(*this); + } + } + + void visit(const TextNode&) {} + void visit(const ExpressionNode&) {} + void visit(const LiteralNode&) {} + + void visit(const DataNode&) { + variable_counter += 1; + } + + void visit(const FunctionNode& node) { + for (auto& n : node.arguments) { + n->accept(*this); + } + } + + void visit(const ExpressionListNode& node) { + node.root->accept(*this); + } + + void visit(const StatementNode&) {} + void visit(const ForStatementNode&) {} + + void visit(const ForArrayStatementNode& node) { + node.condition.accept(*this); + node.body.accept(*this); + } + + void visit(const ForObjectStatementNode& node) { + node.condition.accept(*this); + node.body.accept(*this); + } + + void visit(const IfStatementNode& node) { + node.condition.accept(*this); + node.true_statement.accept(*this); + node.false_statement.accept(*this); + } + + void visit(const IncludeStatementNode&) {} + + void visit(const ExtendsStatementNode&) {} + + void visit(const BlockStatementNode& node) { + node.block.accept(*this); + } + + void visit(const SetStatementNode&) {} + +public: + unsigned int variable_counter; + + explicit StatisticsVisitor(): variable_counter(0) {} +}; + +} // namespace inja + +#endif // INCLUDE_INJA_STATISTICS_HPP_ + + +namespace inja { + +/*! + * \brief The main inja Template. + */ +struct Template { + BlockNode root; + std::string content; + std::map> block_storage; + + explicit Template() {} + explicit Template(const std::string& content): content(content) {} + + /// Return number of variables (total number, not distinct ones) in the template + int count_variables() { + auto statistic_visitor = StatisticsVisitor(); + root.accept(statistic_visitor); + return statistic_visitor.variable_counter; + } +}; + +using TemplateStorage = std::map; + +} // namespace inja + +#endif // INCLUDE_INJA_TEMPLATE_HPP_ + + +namespace inja { + +/*! + * \brief Class for lexer configuration. + */ +struct LexerConfig { + std::string statement_open {"{%"}; + std::string statement_open_no_lstrip {"{%+"}; + std::string statement_open_force_lstrip {"{%-"}; + std::string statement_close {"%}"}; + std::string statement_close_force_rstrip {"-%}"}; + std::string line_statement {"##"}; + std::string expression_open {"{{"}; + std::string expression_open_force_lstrip {"{{-"}; + std::string expression_close {"}}"}; + std::string expression_close_force_rstrip {"-}}"}; + std::string comment_open {"{#"}; + std::string comment_open_force_lstrip {"{#-"}; + std::string comment_close {"#}"}; + std::string comment_close_force_rstrip {"-#}"}; + std::string open_chars {"#{"}; + + bool trim_blocks {false}; + bool lstrip_blocks {false}; + + void update_open_chars() { + open_chars = ""; + if (open_chars.find(line_statement[0]) == std::string::npos) { + open_chars += line_statement[0]; + } + if (open_chars.find(statement_open[0]) == std::string::npos) { + open_chars += statement_open[0]; + } + if (open_chars.find(statement_open_no_lstrip[0]) == std::string::npos) { + open_chars += statement_open_no_lstrip[0]; + } + if (open_chars.find(statement_open_force_lstrip[0]) == std::string::npos) { + open_chars += statement_open_force_lstrip[0]; + } + if (open_chars.find(expression_open[0]) == std::string::npos) { + open_chars += expression_open[0]; + } + if (open_chars.find(expression_open_force_lstrip[0]) == std::string::npos) { + open_chars += expression_open_force_lstrip[0]; + } + if (open_chars.find(comment_open[0]) == std::string::npos) { + open_chars += comment_open[0]; + } + if (open_chars.find(comment_open_force_lstrip[0]) == std::string::npos) { + open_chars += comment_open_force_lstrip[0]; + } + } +}; + +/*! + * \brief Class for parser configuration. + */ +struct ParserConfig { + bool search_included_templates_in_files {true}; + + std::function include_callback; +}; + +/*! + * \brief Class for render configuration. + */ +struct RenderConfig { + bool throw_at_missing_includes {true}; +}; + +} // namespace inja + +#endif // INCLUDE_INJA_CONFIG_HPP_ + +// #include "function_storage.hpp" + +// #include "parser.hpp" +#ifndef INCLUDE_INJA_PARSER_HPP_ +#define INCLUDE_INJA_PARSER_HPP_ + +#include +#include +#include +#include +#include + +// #include "config.hpp" + +// #include "exceptions.hpp" + +// #include "function_storage.hpp" + +// #include "lexer.hpp" +#ifndef INCLUDE_INJA_LEXER_HPP_ +#define INCLUDE_INJA_LEXER_HPP_ + +#include +#include + +// #include "config.hpp" + +// #include "token.hpp" +#ifndef INCLUDE_INJA_TOKEN_HPP_ +#define INCLUDE_INJA_TOKEN_HPP_ + +#include +#include + +namespace inja { + +/*! + * \brief Helper-class for the inja Lexer. + */ +struct Token { + enum class Kind { + Text, + ExpressionOpen, // {{ + ExpressionClose, // }} + LineStatementOpen, // ## + LineStatementClose, // \n + StatementOpen, // {% + StatementClose, // %} + CommentOpen, // {# + CommentClose, // #} + Id, // this, this.foo + Number, // 1, 2, -1, 5.2, -5.3 + String, // "this" + Plus, // + + Minus, // - + Times, // * + Slash, // / + Percent, // % + Power, // ^ + Comma, // , + Dot, // . + Colon, // : + LeftParen, // ( + RightParen, // ) + LeftBracket, // [ + RightBracket, // ] + LeftBrace, // { + RightBrace, // } + Equal, // == + NotEqual, // != + GreaterThan, // > + GreaterEqual, // >= + LessThan, // < + LessEqual, // <= + Unknown, + Eof, + }; + + Kind kind {Kind::Unknown}; + std::string_view text; + + explicit constexpr Token() = default; + explicit constexpr Token(Kind kind, std::string_view text): kind(kind), text(text) {} + + std::string describe() const { + switch (kind) { + case Kind::Text: + return ""; + case Kind::LineStatementClose: + return ""; + case Kind::Eof: + return ""; + default: + return static_cast(text); + } + } +}; + +} // namespace inja + +#endif // INCLUDE_INJA_TOKEN_HPP_ + +// #include "utils.hpp" + + +namespace inja { + +/*! + * \brief Class for lexing an inja Template. + */ +class Lexer { + enum class State { + Text, + ExpressionStart, + ExpressionStartForceLstrip, + ExpressionBody, + LineStart, + LineBody, + StatementStart, + StatementStartNoLstrip, + StatementStartForceLstrip, + StatementBody, + CommentStart, + CommentStartForceLstrip, + CommentBody, + }; + + enum class MinusState { + Operator, + Number, + }; + + const LexerConfig& config; + + State state; + MinusState minus_state; + std::string_view m_in; + size_t tok_start; + size_t pos; + + Token scan_body(std::string_view close, Token::Kind closeKind, std::string_view close_trim = std::string_view(), bool trim = false) { + again: + // skip whitespace (except for \n as it might be a close) + if (tok_start >= m_in.size()) { + return make_token(Token::Kind::Eof); + } + const char ch = m_in[tok_start]; + if (ch == ' ' || ch == '\t' || ch == '\r') { + tok_start += 1; + goto again; + } + + // check for close + if (!close_trim.empty() && inja::string_view::starts_with(m_in.substr(tok_start), close_trim)) { + state = State::Text; + pos = tok_start + close_trim.size(); + const Token tok = make_token(closeKind); + skip_whitespaces_and_newlines(); + return tok; + } + + if (inja::string_view::starts_with(m_in.substr(tok_start), close)) { + state = State::Text; + pos = tok_start + close.size(); + const Token tok = make_token(closeKind); + if (trim) { + skip_whitespaces_and_first_newline(); + } + return tok; + } + + // skip \n + if (ch == '\n') { + tok_start += 1; + goto again; + } + + pos = tok_start + 1; + if (std::isalpha(ch)) { + minus_state = MinusState::Operator; + return scan_id(); + } + + const MinusState current_minus_state = minus_state; + if (minus_state == MinusState::Operator) { + minus_state = MinusState::Number; + } + + switch (ch) { + case '+': + return make_token(Token::Kind::Plus); + case '-': + if (current_minus_state == MinusState::Operator) { + return make_token(Token::Kind::Minus); + } + return scan_number(); + case '*': + return make_token(Token::Kind::Times); + case '/': + return make_token(Token::Kind::Slash); + case '^': + return make_token(Token::Kind::Power); + case '%': + return make_token(Token::Kind::Percent); + case '.': + return make_token(Token::Kind::Dot); + case ',': + return make_token(Token::Kind::Comma); + case ':': + return make_token(Token::Kind::Colon); + case '(': + return make_token(Token::Kind::LeftParen); + case ')': + minus_state = MinusState::Operator; + return make_token(Token::Kind::RightParen); + case '[': + return make_token(Token::Kind::LeftBracket); + case ']': + minus_state = MinusState::Operator; + return make_token(Token::Kind::RightBracket); + case '{': + return make_token(Token::Kind::LeftBrace); + case '}': + minus_state = MinusState::Operator; + return make_token(Token::Kind::RightBrace); + case '>': + if (pos < m_in.size() && m_in[pos] == '=') { + pos += 1; + return make_token(Token::Kind::GreaterEqual); + } + return make_token(Token::Kind::GreaterThan); + case '<': + if (pos < m_in.size() && m_in[pos] == '=') { + pos += 1; + return make_token(Token::Kind::LessEqual); + } + return make_token(Token::Kind::LessThan); + case '=': + if (pos < m_in.size() && m_in[pos] == '=') { + pos += 1; + return make_token(Token::Kind::Equal); + } + return make_token(Token::Kind::Unknown); + case '!': + if (pos < m_in.size() && m_in[pos] == '=') { + pos += 1; + return make_token(Token::Kind::NotEqual); + } + return make_token(Token::Kind::Unknown); + case '\"': + return scan_string(); + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + minus_state = MinusState::Operator; + return scan_number(); + case '_': + case '@': + case '$': + minus_state = MinusState::Operator; + return scan_id(); + default: + return make_token(Token::Kind::Unknown); + } + } + + Token scan_id() { + for (;;) { + if (pos >= m_in.size()) { + break; + } + const char ch = m_in[pos]; + if (!std::isalnum(ch) && ch != '.' && ch != '/' && ch != '_' && ch != '-') { + break; + } + pos += 1; + } + return make_token(Token::Kind::Id); + } + + Token scan_number() { + for (;;) { + if (pos >= m_in.size()) { + break; + } + const char ch = m_in[pos]; + // be very permissive in lexer (we'll catch errors when conversion happens) + if (!(std::isdigit(ch) || ch == '.' || ch == 'e' || ch == 'E' || (ch == '+' && (pos == 0 || m_in[pos-1] == 'e' || m_in[pos-1] == 'E')) || (ch == '-' && (pos == 0 || m_in[pos-1] == 'e' || m_in[pos-1] == 'E')))) { + break; + } + pos += 1; + } + return make_token(Token::Kind::Number); + } + + Token scan_string() { + bool escape {false}; + for (;;) { + if (pos >= m_in.size()) { + break; + } + const char ch = m_in[pos++]; + if (ch == '\\') { + escape = true; + } else if (!escape && ch == m_in[tok_start]) { + break; + } else { + escape = false; + } + } + return make_token(Token::Kind::String); + } + + Token make_token(Token::Kind kind) const { + return Token(kind, string_view::slice(m_in, tok_start, pos)); + } + + void skip_whitespaces_and_newlines() { + if (pos < m_in.size()) { + while (pos < m_in.size() && (m_in[pos] == ' ' || m_in[pos] == '\t' || m_in[pos] == '\n' || m_in[pos] == '\r')) { + pos += 1; + } + } + } + + void skip_whitespaces_and_first_newline() { + if (pos < m_in.size()) { + while (pos < m_in.size() && (m_in[pos] == ' ' || m_in[pos] == '\t')) { + pos += 1; + } + } + + if (pos < m_in.size()) { + const char ch = m_in[pos]; + if (ch == '\n') { + pos += 1; + } else if (ch == '\r') { + pos += 1; + if (pos < m_in.size() && m_in[pos] == '\n') { + pos += 1; + } + } + } + } + + static std::string_view clear_final_line_if_whitespace(std::string_view text) { + std::string_view result = text; + while (!result.empty()) { + const char ch = result.back(); + if (ch == ' ' || ch == '\t') { + result.remove_suffix(1); + } else if (ch == '\n' || ch == '\r') { + break; + } else { + return text; + } + } + return result; + } + +public: + explicit Lexer(const LexerConfig& config): config(config), state(State::Text), minus_state(MinusState::Number) {} + + SourceLocation current_position() const { + return get_source_location(m_in, tok_start); + } + + void start(std::string_view input) { + m_in = input; + tok_start = 0; + pos = 0; + state = State::Text; + minus_state = MinusState::Number; + + // Consume byte order mark (BOM) for UTF-8 + if (inja::string_view::starts_with(m_in, "\xEF\xBB\xBF")) { + m_in = m_in.substr(3); + } + } + + Token scan() { + tok_start = pos; + + again: + if (tok_start >= m_in.size()) { + return make_token(Token::Kind::Eof); + } + + switch (state) { + default: + case State::Text: { + // fast-scan to first open character + const size_t open_start = m_in.substr(pos).find_first_of(config.open_chars); + if (open_start == std::string_view::npos) { + // didn't find open, return remaining text as text token + pos = m_in.size(); + return make_token(Token::Kind::Text); + } + pos += open_start; + + // try to match one of the opening sequences, and get the close + std::string_view open_str = m_in.substr(pos); + bool must_lstrip = false; + if (inja::string_view::starts_with(open_str, config.expression_open)) { + if (inja::string_view::starts_with(open_str, config.expression_open_force_lstrip)) { + state = State::ExpressionStartForceLstrip; + must_lstrip = true; + } else { + state = State::ExpressionStart; + } + } else if (inja::string_view::starts_with(open_str, config.statement_open)) { + if (inja::string_view::starts_with(open_str, config.statement_open_no_lstrip)) { + state = State::StatementStartNoLstrip; + } else if (inja::string_view::starts_with(open_str, config.statement_open_force_lstrip)) { + state = State::StatementStartForceLstrip; + must_lstrip = true; + } else { + state = State::StatementStart; + must_lstrip = config.lstrip_blocks; + } + } else if (inja::string_view::starts_with(open_str, config.comment_open)) { + if (inja::string_view::starts_with(open_str, config.comment_open_force_lstrip)) { + state = State::CommentStartForceLstrip; + must_lstrip = true; + } else { + state = State::CommentStart; + must_lstrip = config.lstrip_blocks; + } + } else if ((pos == 0 || m_in[pos - 1] == '\n') && inja::string_view::starts_with(open_str, config.line_statement)) { + state = State::LineStart; + } else { + pos += 1; // wasn't actually an opening sequence + goto again; + } + + std::string_view text = string_view::slice(m_in, tok_start, pos); + if (must_lstrip) { + text = clear_final_line_if_whitespace(text); + } + + if (text.empty()) { + goto again; // don't generate empty token + } + return Token(Token::Kind::Text, text); + } + case State::ExpressionStart: { + state = State::ExpressionBody; + pos += config.expression_open.size(); + return make_token(Token::Kind::ExpressionOpen); + } + case State::ExpressionStartForceLstrip: { + state = State::ExpressionBody; + pos += config.expression_open_force_lstrip.size(); + return make_token(Token::Kind::ExpressionOpen); + } + case State::LineStart: { + state = State::LineBody; + pos += config.line_statement.size(); + return make_token(Token::Kind::LineStatementOpen); + } + case State::StatementStart: { + state = State::StatementBody; + pos += config.statement_open.size(); + return make_token(Token::Kind::StatementOpen); + } + case State::StatementStartNoLstrip: { + state = State::StatementBody; + pos += config.statement_open_no_lstrip.size(); + return make_token(Token::Kind::StatementOpen); + } + case State::StatementStartForceLstrip: { + state = State::StatementBody; + pos += config.statement_open_force_lstrip.size(); + return make_token(Token::Kind::StatementOpen); + } + case State::CommentStart: { + state = State::CommentBody; + pos += config.comment_open.size(); + return make_token(Token::Kind::CommentOpen); + } + case State::CommentStartForceLstrip: { + state = State::CommentBody; + pos += config.comment_open_force_lstrip.size(); + return make_token(Token::Kind::CommentOpen); + } + case State::ExpressionBody: + return scan_body(config.expression_close, Token::Kind::ExpressionClose, config.expression_close_force_rstrip); + case State::LineBody: + return scan_body("\n", Token::Kind::LineStatementClose); + case State::StatementBody: + return scan_body(config.statement_close, Token::Kind::StatementClose, config.statement_close_force_rstrip, config.trim_blocks); + case State::CommentBody: { + // fast-scan to comment close + const size_t end = m_in.substr(pos).find(config.comment_close); + if (end == std::string_view::npos) { + pos = m_in.size(); + return make_token(Token::Kind::Eof); + } + + // Check for trim pattern + const bool must_rstrip = inja::string_view::starts_with(m_in.substr(pos + end - 1), config.comment_close_force_rstrip); + + // return the entire comment in the close token + state = State::Text; + pos += end + config.comment_close.size(); + Token tok = make_token(Token::Kind::CommentClose); + + if (must_rstrip || config.trim_blocks) { + skip_whitespaces_and_first_newline(); + } + return tok; + } + } + } + + const LexerConfig& get_config() const { + return config; + } +}; + +} // namespace inja + +#endif // INCLUDE_INJA_LEXER_HPP_ + +// #include "node.hpp" + +// #include "template.hpp" + +// #include "token.hpp" + +// #include "utils.hpp" + + +namespace inja { + +/*! + * \brief Class for parsing an inja Template. + */ +class Parser { + const ParserConfig& config; + + Lexer lexer; + TemplateStorage& template_storage; + const FunctionStorage& function_storage; + + Token tok, peek_tok; + bool have_peek_tok {false}; + + size_t current_paren_level {0}; + size_t current_bracket_level {0}; + size_t current_brace_level {0}; + + std::string_view literal_start; + + BlockNode* current_block {nullptr}; + ExpressionListNode* current_expression_list {nullptr}; + std::stack> function_stack; + std::vector> arguments; + + std::stack> operator_stack; + std::stack if_statement_stack; + std::stack for_statement_stack; + std::stack block_statement_stack; + + inline void throw_parser_error(const std::string& message) const { + INJA_THROW(ParserError(message, lexer.current_position())); + } + + inline void get_next_token() { + if (have_peek_tok) { + tok = peek_tok; + have_peek_tok = false; + } else { + tok = lexer.scan(); + } + } + + inline void get_peek_token() { + if (!have_peek_tok) { + peek_tok = lexer.scan(); + have_peek_tok = true; + } + } + + inline void add_literal(const char* content_ptr) { + std::string_view data_text(literal_start.data(), tok.text.data() - literal_start.data() + tok.text.size()); + arguments.emplace_back(std::make_shared(data_text, data_text.data() - content_ptr)); + } + + inline void add_operator() { + auto function = operator_stack.top(); + operator_stack.pop(); + + for (int i = 0; i < function->number_args; ++i) { + function->arguments.insert(function->arguments.begin(), arguments.back()); + arguments.pop_back(); + } + arguments.emplace_back(function); + } + + void add_to_template_storage(std::string_view path, std::string& template_name) { + if (template_storage.find(template_name) != template_storage.end()) { + return; + } + + std::string original_path = static_cast(path); + std::string original_name = template_name; + + if (config.search_included_templates_in_files) { + // Build the relative path + template_name = original_path + original_name; + if (template_name.compare(0, 2, "./") == 0) { + template_name.erase(0, 2); + } + + if (template_storage.find(template_name) == template_storage.end()) { + // Load file + std::ifstream file; + file.open(template_name); + if (!file.fail()) { + std::string text((std::istreambuf_iterator(file)), std::istreambuf_iterator()); + + auto include_template = Template(text); + template_storage.emplace(template_name, include_template); + parse_into_template(template_storage[template_name], template_name); + return; + } else if (!config.include_callback) { + INJA_THROW(FileError("failed accessing file at '" + template_name + "'")); + } + } + } + + // Try include callback + if (config.include_callback) { + auto include_template = config.include_callback(original_path, original_name); + template_storage.emplace(template_name, include_template); + } + } + + std::string parse_filename(const Token& tok) const { + if (tok.kind != Token::Kind::String) { + throw_parser_error("expected string, got '" + tok.describe() + "'"); + } + + if (tok.text.length() < 2) { + throw_parser_error("expected filename, got '" + static_cast(tok.text) + "'"); + } + + // Remove first and last character "" + return std::string {tok.text.substr(1, tok.text.length() - 2)}; + } + + bool parse_expression(Template& tmpl, Token::Kind closing) { + while (tok.kind != closing && tok.kind != Token::Kind::Eof) { + // Literals + switch (tok.kind) { + case Token::Kind::String: { + if (current_brace_level == 0 && current_bracket_level == 0) { + literal_start = tok.text; + add_literal(tmpl.content.c_str()); + } + } break; + case Token::Kind::Number: { + if (current_brace_level == 0 && current_bracket_level == 0) { + literal_start = tok.text; + add_literal(tmpl.content.c_str()); + } + } break; + case Token::Kind::LeftBracket: { + if (current_brace_level == 0 && current_bracket_level == 0) { + literal_start = tok.text; + } + current_bracket_level += 1; + } break; + case Token::Kind::LeftBrace: { + if (current_brace_level == 0 && current_bracket_level == 0) { + literal_start = tok.text; + } + current_brace_level += 1; + } break; + case Token::Kind::RightBracket: { + if (current_bracket_level == 0) { + throw_parser_error("unexpected ']'"); + } + + current_bracket_level -= 1; + if (current_brace_level == 0 && current_bracket_level == 0) { + add_literal(tmpl.content.c_str()); + } + } break; + case Token::Kind::RightBrace: { + if (current_brace_level == 0) { + throw_parser_error("unexpected '}'"); + } + + current_brace_level -= 1; + if (current_brace_level == 0 && current_bracket_level == 0) { + add_literal(tmpl.content.c_str()); + } + } break; + case Token::Kind::Id: { + get_peek_token(); + + // Data Literal + if (tok.text == static_cast("true") || tok.text == static_cast("false") || + tok.text == static_cast("null")) { + if (current_brace_level == 0 && current_bracket_level == 0) { + literal_start = tok.text; + add_literal(tmpl.content.c_str()); + } + + // Operator + } else if (tok.text == "and" || tok.text == "or" || tok.text == "in" || tok.text == "not") { + goto parse_operator; + + // Functions + } else if (peek_tok.kind == Token::Kind::LeftParen) { + operator_stack.emplace(std::make_shared(static_cast(tok.text), tok.text.data() - tmpl.content.c_str())); + function_stack.emplace(operator_stack.top().get(), current_paren_level); + + // Variables + } else { + arguments.emplace_back(std::make_shared(static_cast(tok.text), tok.text.data() - tmpl.content.c_str())); + } + + // Operators + } break; + case Token::Kind::Equal: + case Token::Kind::NotEqual: + case Token::Kind::GreaterThan: + case Token::Kind::GreaterEqual: + case Token::Kind::LessThan: + case Token::Kind::LessEqual: + case Token::Kind::Plus: + case Token::Kind::Minus: + case Token::Kind::Times: + case Token::Kind::Slash: + case Token::Kind::Power: + case Token::Kind::Percent: + case Token::Kind::Dot: { + + parse_operator: + FunctionStorage::Operation operation; + switch (tok.kind) { + case Token::Kind::Id: { + if (tok.text == "and") { + operation = FunctionStorage::Operation::And; + } else if (tok.text == "or") { + operation = FunctionStorage::Operation::Or; + } else if (tok.text == "in") { + operation = FunctionStorage::Operation::In; + } else if (tok.text == "not") { + operation = FunctionStorage::Operation::Not; + } else { + throw_parser_error("unknown operator in parser."); + } + } break; + case Token::Kind::Equal: { + operation = FunctionStorage::Operation::Equal; + } break; + case Token::Kind::NotEqual: { + operation = FunctionStorage::Operation::NotEqual; + } break; + case Token::Kind::GreaterThan: { + operation = FunctionStorage::Operation::Greater; + } break; + case Token::Kind::GreaterEqual: { + operation = FunctionStorage::Operation::GreaterEqual; + } break; + case Token::Kind::LessThan: { + operation = FunctionStorage::Operation::Less; + } break; + case Token::Kind::LessEqual: { + operation = FunctionStorage::Operation::LessEqual; + } break; + case Token::Kind::Plus: { + operation = FunctionStorage::Operation::Add; + } break; + case Token::Kind::Minus: { + operation = FunctionStorage::Operation::Subtract; + } break; + case Token::Kind::Times: { + operation = FunctionStorage::Operation::Multiplication; + } break; + case Token::Kind::Slash: { + operation = FunctionStorage::Operation::Division; + } break; + case Token::Kind::Power: { + operation = FunctionStorage::Operation::Power; + } break; + case Token::Kind::Percent: { + operation = FunctionStorage::Operation::Modulo; + } break; + case Token::Kind::Dot: { + operation = FunctionStorage::Operation::AtId; + } break; + default: { + throw_parser_error("unknown operator in parser."); + } + } + auto function_node = std::make_shared(operation, tok.text.data() - tmpl.content.c_str()); + + while (!operator_stack.empty() && + ((operator_stack.top()->precedence > function_node->precedence) || + (operator_stack.top()->precedence == function_node->precedence && function_node->associativity == FunctionNode::Associativity::Left)) && + (operator_stack.top()->operation != FunctionStorage::Operation::ParenLeft)) { + add_operator(); + } + + operator_stack.emplace(function_node); + } break; + case Token::Kind::Comma: { + if (current_brace_level == 0 && current_bracket_level == 0) { + if (function_stack.empty()) { + throw_parser_error("unexpected ','"); + } + + function_stack.top().first->number_args += 1; + } + } break; + case Token::Kind::Colon: { + if (current_brace_level == 0 && current_bracket_level == 0) { + throw_parser_error("unexpected ':'"); + } + } break; + case Token::Kind::LeftParen: { + current_paren_level += 1; + operator_stack.emplace(std::make_shared(FunctionStorage::Operation::ParenLeft, tok.text.data() - tmpl.content.c_str())); + + get_peek_token(); + if (peek_tok.kind == Token::Kind::RightParen) { + if (!function_stack.empty() && function_stack.top().second == current_paren_level - 1) { + function_stack.top().first->number_args = 0; + } + } + } break; + case Token::Kind::RightParen: { + current_paren_level -= 1; + while (!operator_stack.empty() && operator_stack.top()->operation != FunctionStorage::Operation::ParenLeft) { + add_operator(); + } + + if (!operator_stack.empty() && operator_stack.top()->operation == FunctionStorage::Operation::ParenLeft) { + operator_stack.pop(); + } + + if (!function_stack.empty() && function_stack.top().second == current_paren_level) { + auto func = function_stack.top().first; + auto function_data = function_storage.find_function(func->name, func->number_args); + if (function_data.operation == FunctionStorage::Operation::None) { + throw_parser_error("unknown function " + func->name); + } + func->operation = function_data.operation; + if (function_data.operation == FunctionStorage::Operation::Callback) { + func->callback = function_data.callback; + } + + if (operator_stack.empty()) { + throw_parser_error("internal error at function " + func->name); + } + + add_operator(); + function_stack.pop(); + } + } + default: + break; + } + + get_next_token(); + } + + while (!operator_stack.empty()) { + add_operator(); + } + + if (arguments.size() == 1) { + current_expression_list->root = arguments[0]; + arguments = {}; + } else if (arguments.size() > 1) { + throw_parser_error("malformed expression"); + } + + return true; + } + + bool parse_statement(Template& tmpl, Token::Kind closing, std::string_view path) { + if (tok.kind != Token::Kind::Id) { + return false; + } + + if (tok.text == static_cast("if")) { + get_next_token(); + + auto if_statement_node = std::make_shared(current_block, tok.text.data() - tmpl.content.c_str()); + current_block->nodes.emplace_back(if_statement_node); + if_statement_stack.emplace(if_statement_node.get()); + current_block = &if_statement_node->true_statement; + current_expression_list = &if_statement_node->condition; + + if (!parse_expression(tmpl, closing)) { + return false; + } + } else if (tok.text == static_cast("else")) { + if (if_statement_stack.empty()) { + throw_parser_error("else without matching if"); + } + auto& if_statement_data = if_statement_stack.top(); + get_next_token(); + + if_statement_data->has_false_statement = true; + current_block = &if_statement_data->false_statement; + + // Chained else if + if (tok.kind == Token::Kind::Id && tok.text == static_cast("if")) { + get_next_token(); + + auto if_statement_node = std::make_shared(true, current_block, tok.text.data() - tmpl.content.c_str()); + current_block->nodes.emplace_back(if_statement_node); + if_statement_stack.emplace(if_statement_node.get()); + current_block = &if_statement_node->true_statement; + current_expression_list = &if_statement_node->condition; + + if (!parse_expression(tmpl, closing)) { + return false; + } + } + } else if (tok.text == static_cast("endif")) { + if (if_statement_stack.empty()) { + throw_parser_error("endif without matching if"); + } + + // Nested if statements + while (if_statement_stack.top()->is_nested) { + if_statement_stack.pop(); + } + + auto& if_statement_data = if_statement_stack.top(); + get_next_token(); + + current_block = if_statement_data->parent; + if_statement_stack.pop(); + } else if (tok.text == static_cast("block")) { + get_next_token(); + + if (tok.kind != Token::Kind::Id) { + throw_parser_error("expected block name, got '" + tok.describe() + "'"); + } + + const std::string block_name = static_cast(tok.text); + + auto block_statement_node = std::make_shared(current_block, block_name, tok.text.data() - tmpl.content.c_str()); + current_block->nodes.emplace_back(block_statement_node); + block_statement_stack.emplace(block_statement_node.get()); + current_block = &block_statement_node->block; + auto success = tmpl.block_storage.emplace(block_name, block_statement_node); + if (!success.second) { + throw_parser_error("block with the name '" + block_name + "' does already exist"); + } + + get_next_token(); + } else if (tok.text == static_cast("endblock")) { + if (block_statement_stack.empty()) { + throw_parser_error("endblock without matching block"); + } + + auto& block_statement_data = block_statement_stack.top(); + get_next_token(); + + current_block = block_statement_data->parent; + block_statement_stack.pop(); + } else if (tok.text == static_cast("for")) { + get_next_token(); + + // options: for a in arr; for a, b in obj + if (tok.kind != Token::Kind::Id) { + throw_parser_error("expected id, got '" + tok.describe() + "'"); + } + + Token value_token = tok; + get_next_token(); + + // Object type + std::shared_ptr for_statement_node; + if (tok.kind == Token::Kind::Comma) { + get_next_token(); + if (tok.kind != Token::Kind::Id) { + throw_parser_error("expected id, got '" + tok.describe() + "'"); + } + + Token key_token = std::move(value_token); + value_token = tok; + get_next_token(); + + for_statement_node = std::make_shared(static_cast(key_token.text), static_cast(value_token.text), + current_block, tok.text.data() - tmpl.content.c_str()); + + // Array type + } else { + for_statement_node = + std::make_shared(static_cast(value_token.text), current_block, tok.text.data() - tmpl.content.c_str()); + } + + current_block->nodes.emplace_back(for_statement_node); + for_statement_stack.emplace(for_statement_node.get()); + current_block = &for_statement_node->body; + current_expression_list = &for_statement_node->condition; + + if (tok.kind != Token::Kind::Id || tok.text != static_cast("in")) { + throw_parser_error("expected 'in', got '" + tok.describe() + "'"); + } + get_next_token(); + + if (!parse_expression(tmpl, closing)) { + return false; + } + } else if (tok.text == static_cast("endfor")) { + if (for_statement_stack.empty()) { + throw_parser_error("endfor without matching for"); + } + + auto& for_statement_data = for_statement_stack.top(); + get_next_token(); + + current_block = for_statement_data->parent; + for_statement_stack.pop(); + } else if (tok.text == static_cast("include")) { + get_next_token(); + + std::string template_name = parse_filename(tok); + add_to_template_storage(path, template_name); + + current_block->nodes.emplace_back(std::make_shared(template_name, tok.text.data() - tmpl.content.c_str())); + + get_next_token(); + } else if (tok.text == static_cast("extends")) { + get_next_token(); + + std::string template_name = parse_filename(tok); + add_to_template_storage(path, template_name); + + current_block->nodes.emplace_back(std::make_shared(template_name, tok.text.data() - tmpl.content.c_str())); + + get_next_token(); + } else if (tok.text == static_cast("set")) { + get_next_token(); + + if (tok.kind != Token::Kind::Id) { + throw_parser_error("expected variable name, got '" + tok.describe() + "'"); + } + + std::string key = static_cast(tok.text); + get_next_token(); + + auto set_statement_node = std::make_shared(key, tok.text.data() - tmpl.content.c_str()); + current_block->nodes.emplace_back(set_statement_node); + current_expression_list = &set_statement_node->expression; + + if (tok.text != static_cast("=")) { + throw_parser_error("expected '=', got '" + tok.describe() + "'"); + } + get_next_token(); + + if (!parse_expression(tmpl, closing)) { + return false; + } + } else { + return false; + } + return true; + } + + void parse_into(Template& tmpl, std::string_view path) { + lexer.start(tmpl.content); + current_block = &tmpl.root; + + for (;;) { + get_next_token(); + switch (tok.kind) { + case Token::Kind::Eof: { + if (!if_statement_stack.empty()) { + throw_parser_error("unmatched if"); + } + if (!for_statement_stack.empty()) { + throw_parser_error("unmatched for"); + } + } + return; + case Token::Kind::Text: { + current_block->nodes.emplace_back(std::make_shared(tok.text.data() - tmpl.content.c_str(), tok.text.size())); + } break; + case Token::Kind::StatementOpen: { + get_next_token(); + if (!parse_statement(tmpl, Token::Kind::StatementClose, path)) { + throw_parser_error("expected statement, got '" + tok.describe() + "'"); + } + if (tok.kind != Token::Kind::StatementClose) { + throw_parser_error("expected statement close, got '" + tok.describe() + "'"); + } + } break; + case Token::Kind::LineStatementOpen: { + get_next_token(); + if (!parse_statement(tmpl, Token::Kind::LineStatementClose, path)) { + throw_parser_error("expected statement, got '" + tok.describe() + "'"); + } + if (tok.kind != Token::Kind::LineStatementClose && tok.kind != Token::Kind::Eof) { + throw_parser_error("expected line statement close, got '" + tok.describe() + "'"); + } + } break; + case Token::Kind::ExpressionOpen: { + get_next_token(); + + auto expression_list_node = std::make_shared(tok.text.data() - tmpl.content.c_str()); + current_block->nodes.emplace_back(expression_list_node); + current_expression_list = expression_list_node.get(); + + if (!parse_expression(tmpl, Token::Kind::ExpressionClose)) { + throw_parser_error("expected expression, got '" + tok.describe() + "'"); + } + + if (tok.kind != Token::Kind::ExpressionClose) { + throw_parser_error("expected expression close, got '" + tok.describe() + "'"); + } + } break; + case Token::Kind::CommentOpen: { + get_next_token(); + if (tok.kind != Token::Kind::CommentClose) { + throw_parser_error("expected comment close, got '" + tok.describe() + "'"); + } + } break; + default: { + throw_parser_error("unexpected token '" + tok.describe() + "'"); + } break; + } + } + } + +public: + explicit Parser(const ParserConfig& parser_config, const LexerConfig& lexer_config, TemplateStorage& template_storage, + const FunctionStorage& function_storage) + : config(parser_config), lexer(lexer_config), template_storage(template_storage), function_storage(function_storage) {} + + Template parse(std::string_view input, std::string_view path) { + auto result = Template(static_cast(input)); + parse_into(result, path); + return result; + } + + Template parse(std::string_view input) { + return parse(input, "./"); + } + + void parse_into_template(Template& tmpl, std::string_view filename) { + std::string_view path = filename.substr(0, filename.find_last_of("/\\") + 1); + + // StringRef path = sys::path::parent_path(filename); + auto sub_parser = Parser(config, lexer.get_config(), template_storage, function_storage); + sub_parser.parse_into(tmpl, path); + } + + std::string load_file(const std::string& filename) { + std::ifstream file; + file.open(filename); + if (file.fail()) { + INJA_THROW(FileError("failed accessing file at '" + filename + "'")); + } + std::string text((std::istreambuf_iterator(file)), std::istreambuf_iterator()); + return text; + } +}; + +} // namespace inja + +#endif // INCLUDE_INJA_PARSER_HPP_ + +// #include "renderer.hpp" +#ifndef INCLUDE_INJA_RENDERER_HPP_ +#define INCLUDE_INJA_RENDERER_HPP_ + +#include +#include +#include +#include +#include + +// #include "config.hpp" + +// #include "exceptions.hpp" + +// #include "node.hpp" + +// #include "template.hpp" + +// #include "utils.hpp" + + +namespace inja { + +/*! + * \brief Class for rendering a Template with data. + */ +class Renderer : public NodeVisitor { + using Op = FunctionStorage::Operation; + + const RenderConfig config; + const TemplateStorage& template_storage; + const FunctionStorage& function_storage; + + const Template* current_template; + size_t current_level {0}; + std::vector template_stack; + std::vector block_statement_stack; + + const json* data_input; + std::ostream* output_stream; + + json additional_data; + json* current_loop_data = &additional_data["loop"]; + + std::vector> data_tmp_stack; + std::stack data_eval_stack; + std::stack not_found_stack; + + bool break_rendering {false}; + + static bool truthy(const json* data) { + if (data->is_boolean()) { + return data->get(); + } else if (data->is_number()) { + return (*data != 0); + } else if (data->is_null()) { + return false; + } + return !data->empty(); + } + + void print_data(const std::shared_ptr value) { + if (value->is_string()) { + *output_stream << value->get_ref(); + } else if (value->is_number_integer()) { + *output_stream << value->get(); + } else if (value->is_null()) { + } else { + *output_stream << value->dump(); + } + } + + const std::shared_ptr eval_expression_list(const ExpressionListNode& expression_list) { + if (!expression_list.root) { + throw_renderer_error("empty expression", expression_list); + } + + expression_list.root->accept(*this); + + if (data_eval_stack.empty()) { + throw_renderer_error("empty expression", expression_list); + } else if (data_eval_stack.size() != 1) { + throw_renderer_error("malformed expression", expression_list); + } + + const auto result = data_eval_stack.top(); + data_eval_stack.pop(); + + if (!result) { + if (not_found_stack.empty()) { + throw_renderer_error("expression could not be evaluated", expression_list); + } + + auto node = not_found_stack.top(); + not_found_stack.pop(); + + throw_renderer_error("variable '" + static_cast(node->name) + "' not found", *node); + } + return std::make_shared(*result); + } + + void throw_renderer_error(const std::string& message, const AstNode& node) { + SourceLocation loc = get_source_location(current_template->content, node.pos); + INJA_THROW(RenderError(message, loc)); + } + + void make_result(const json&& result) { + auto result_ptr = std::make_shared(result); + data_tmp_stack.push_back(result_ptr); + data_eval_stack.push(result_ptr.get()); + } + + template std::array get_arguments(const FunctionNode& node) { + if (node.arguments.size() < N_start + N) { + throw_renderer_error("function needs " + std::to_string(N_start + N) + " variables, but has only found " + std::to_string(node.arguments.size()), node); + } + + for (size_t i = N_start; i < N_start + N; i += 1) { + node.arguments[i]->accept(*this); + } + + if (data_eval_stack.size() < N) { + throw_renderer_error("function needs " + std::to_string(N) + " variables, but has only found " + std::to_string(data_eval_stack.size()), node); + } + + std::array result; + for (size_t i = 0; i < N; i += 1) { + result[N - i - 1] = data_eval_stack.top(); + data_eval_stack.pop(); + + if (!result[N - i - 1]) { + const auto data_node = not_found_stack.top(); + not_found_stack.pop(); + + if (throw_not_found) { + throw_renderer_error("variable '" + static_cast(data_node->name) + "' not found", *data_node); + } + } + } + return result; + } + + template Arguments get_argument_vector(const FunctionNode& node) { + const size_t N = node.arguments.size(); + for (auto a : node.arguments) { + a->accept(*this); + } + + if (data_eval_stack.size() < N) { + throw_renderer_error("function needs " + std::to_string(N) + " variables, but has only found " + std::to_string(data_eval_stack.size()), node); + } + + Arguments result {N}; + for (size_t i = 0; i < N; i += 1) { + result[N - i - 1] = data_eval_stack.top(); + data_eval_stack.pop(); + + if (!result[N - i - 1]) { + const auto data_node = not_found_stack.top(); + not_found_stack.pop(); + + if (throw_not_found) { + throw_renderer_error("variable '" + static_cast(data_node->name) + "' not found", *data_node); + } + } + } + return result; + } + + void visit(const BlockNode& node) { + for (auto& n : node.nodes) { + n->accept(*this); + + if (break_rendering) { + break; + } + } + } + + void visit(const TextNode& node) { + output_stream->write(current_template->content.c_str() + node.pos, node.length); + } + + void visit(const ExpressionNode&) {} + + void visit(const LiteralNode& node) { + data_eval_stack.push(&node.value); + } + + void visit(const DataNode& node) { + if (additional_data.contains(node.ptr)) { + data_eval_stack.push(&(additional_data[node.ptr])); + } else if (data_input->contains(node.ptr)) { + data_eval_stack.push(&(*data_input)[node.ptr]); + } else { + // Try to evaluate as a no-argument callback + const auto function_data = function_storage.find_function(node.name, 0); + if (function_data.operation == FunctionStorage::Operation::Callback) { + Arguments empty_args {}; + const auto value = std::make_shared(function_data.callback(empty_args)); + data_tmp_stack.push_back(value); + data_eval_stack.push(value.get()); + } else { + data_eval_stack.push(nullptr); + not_found_stack.emplace(&node); + } + } + } + + void visit(const FunctionNode& node) { + switch (node.operation) { + case Op::Not: { + const auto args = get_arguments<1>(node); + make_result(!truthy(args[0])); + } break; + case Op::And: { + make_result(truthy(get_arguments<1, 0>(node)[0]) && truthy(get_arguments<1, 1>(node)[0])); + } break; + case Op::Or: { + make_result(truthy(get_arguments<1, 0>(node)[0]) || truthy(get_arguments<1, 1>(node)[0])); + } break; + case Op::In: { + const auto args = get_arguments<2>(node); + make_result(std::find(args[1]->begin(), args[1]->end(), *args[0]) != args[1]->end()); + } break; + case Op::Equal: { + const auto args = get_arguments<2>(node); + make_result(*args[0] == *args[1]); + } break; + case Op::NotEqual: { + const auto args = get_arguments<2>(node); + make_result(*args[0] != *args[1]); + } break; + case Op::Greater: { + const auto args = get_arguments<2>(node); + make_result(*args[0] > *args[1]); + } break; + case Op::GreaterEqual: { + const auto args = get_arguments<2>(node); + make_result(*args[0] >= *args[1]); + } break; + case Op::Less: { + const auto args = get_arguments<2>(node); + make_result(*args[0] < *args[1]); + } break; + case Op::LessEqual: { + const auto args = get_arguments<2>(node); + make_result(*args[0] <= *args[1]); + } break; + case Op::Add: { + const auto args = get_arguments<2>(node); + if (args[0]->is_string() && args[1]->is_string()) { + make_result(args[0]->get_ref() + args[1]->get_ref()); + } else if (args[0]->is_number_integer() && args[1]->is_number_integer()) { + make_result(args[0]->get() + args[1]->get()); + } else { + make_result(args[0]->get() + args[1]->get()); + } + } break; + case Op::Subtract: { + const auto args = get_arguments<2>(node); + if (args[0]->is_number_integer() && args[1]->is_number_integer()) { + make_result(args[0]->get() - args[1]->get()); + } else { + make_result(args[0]->get() - args[1]->get()); + } + } break; + case Op::Multiplication: { + const auto args = get_arguments<2>(node); + if (args[0]->is_number_integer() && args[1]->is_number_integer()) { + make_result(args[0]->get() * args[1]->get()); + } else { + make_result(args[0]->get() * args[1]->get()); + } + } break; + case Op::Division: { + const auto args = get_arguments<2>(node); + if (args[1]->get() == 0) { + throw_renderer_error("division by zero", node); + } + make_result(args[0]->get() / args[1]->get()); + } break; + case Op::Power: { + const auto args = get_arguments<2>(node); + if (args[0]->is_number_integer() && args[1]->get() >= 0) { + int result = static_cast(std::pow(args[0]->get(), args[1]->get())); + make_result(result); + } else { + double result = std::pow(args[0]->get(), args[1]->get()); + make_result(result); + } + } break; + case Op::Modulo: { + const auto args = get_arguments<2>(node); + make_result(args[0]->get() % args[1]->get()); + } break; + case Op::AtId: { + const auto container = get_arguments<1, 0, false>(node)[0]; + node.arguments[1]->accept(*this); + if (not_found_stack.empty()) { + throw_renderer_error("could not find element with given name", node); + } + const auto id_node = not_found_stack.top(); + not_found_stack.pop(); + data_eval_stack.pop(); + data_eval_stack.push(&container->at(id_node->name)); + } break; + case Op::At: { + const auto args = get_arguments<2>(node); + if (args[0]->is_object()) { + data_eval_stack.push(&args[0]->at(args[1]->get())); + } else { + data_eval_stack.push(&args[0]->at(args[1]->get())); + } + } break; + case Op::Default: { + const auto test_arg = get_arguments<1, 0, false>(node)[0]; + data_eval_stack.push(test_arg ? test_arg : get_arguments<1, 1>(node)[0]); + } break; + case Op::DivisibleBy: { + const auto args = get_arguments<2>(node); + const int divisor = args[1]->get(); + make_result((divisor != 0) && (args[0]->get() % divisor == 0)); + } break; + case Op::Even: { + make_result(get_arguments<1>(node)[0]->get() % 2 == 0); + } break; + case Op::Exists: { + auto&& name = get_arguments<1>(node)[0]->get_ref(); + make_result(data_input->contains(json::json_pointer(DataNode::convert_dot_to_ptr(name)))); + } break; + case Op::ExistsInObject: { + const auto args = get_arguments<2>(node); + auto&& name = args[1]->get_ref(); + make_result(args[0]->find(name) != args[0]->end()); + } break; + case Op::First: { + const auto result = &get_arguments<1>(node)[0]->front(); + data_eval_stack.push(result); + } break; + case Op::Float: { + make_result(std::stod(get_arguments<1>(node)[0]->get_ref())); + } break; + case Op::Int: { + make_result(std::stoi(get_arguments<1>(node)[0]->get_ref())); + } break; + case Op::Last: { + const auto result = &get_arguments<1>(node)[0]->back(); + data_eval_stack.push(result); + } break; + case Op::Length: { + const auto val = get_arguments<1>(node)[0]; + if (val->is_string()) { + make_result(val->get_ref().length()); + } else { + make_result(val->size()); + } + } break; + case Op::Lower: { + std::string result = get_arguments<1>(node)[0]->get(); + std::transform(result.begin(), result.end(), result.begin(), ::tolower); + make_result(std::move(result)); + } break; + case Op::Max: { + const auto args = get_arguments<1>(node); + const auto result = std::max_element(args[0]->begin(), args[0]->end()); + data_eval_stack.push(&(*result)); + } break; + case Op::Min: { + const auto args = get_arguments<1>(node); + const auto result = std::min_element(args[0]->begin(), args[0]->end()); + data_eval_stack.push(&(*result)); + } break; + case Op::Odd: { + make_result(get_arguments<1>(node)[0]->get() % 2 != 0); + } break; + case Op::Range: { + std::vector result(get_arguments<1>(node)[0]->get()); + std::iota(result.begin(), result.end(), 0); + make_result(std::move(result)); + } break; + case Op::Round: { + const auto args = get_arguments<2>(node); + const int precision = args[1]->get(); + const double result = std::round(args[0]->get() * std::pow(10.0, precision)) / std::pow(10.0, precision); + if (precision == 0) { + make_result(int(result)); + } else { + make_result(result); + } + } break; + case Op::Sort: { + auto result_ptr = std::make_shared(get_arguments<1>(node)[0]->get>()); + std::sort(result_ptr->begin(), result_ptr->end()); + data_tmp_stack.push_back(result_ptr); + data_eval_stack.push(result_ptr.get()); + } break; + case Op::Upper: { + std::string result = get_arguments<1>(node)[0]->get(); + std::transform(result.begin(), result.end(), result.begin(), ::toupper); + make_result(std::move(result)); + } break; + case Op::IsBoolean: { + make_result(get_arguments<1>(node)[0]->is_boolean()); + } break; + case Op::IsNumber: { + make_result(get_arguments<1>(node)[0]->is_number()); + } break; + case Op::IsInteger: { + make_result(get_arguments<1>(node)[0]->is_number_integer()); + } break; + case Op::IsFloat: { + make_result(get_arguments<1>(node)[0]->is_number_float()); + } break; + case Op::IsObject: { + make_result(get_arguments<1>(node)[0]->is_object()); + } break; + case Op::IsArray: { + make_result(get_arguments<1>(node)[0]->is_array()); + } break; + case Op::IsString: { + make_result(get_arguments<1>(node)[0]->is_string()); + } break; + case Op::Callback: { + auto args = get_argument_vector(node); + make_result(node.callback(args)); + } break; + case Op::Super: { + const auto args = get_argument_vector(node); + const size_t old_level = current_level; + const size_t level_diff = (args.size() == 1) ? args[0]->get() : 1; + const size_t level = current_level + level_diff; + + if (block_statement_stack.empty()) { + throw_renderer_error("super() call is not within a block", node); + } + + if (level < 1 || level > template_stack.size() - 1) { + throw_renderer_error("level of super() call does not match parent templates (between 1 and " + std::to_string(template_stack.size() - 1) + ")", node); + } + + const auto current_block_statement = block_statement_stack.back(); + const Template* new_template = template_stack.at(level); + const Template* old_template = current_template; + const auto block_it = new_template->block_storage.find(current_block_statement->name); + if (block_it != new_template->block_storage.end()) { + current_template = new_template; + current_level = level; + block_it->second->block.accept(*this); + current_level = old_level; + current_template = old_template; + } else { + throw_renderer_error("could not find block with name '" + current_block_statement->name + "'", node); + } + make_result(nullptr); + } break; + case Op::Join: { + const auto args = get_arguments<2>(node); + const auto separator = args[1]->get(); + std::ostringstream os; + std::string sep; + for (const auto& value : *args[0]) { + os << sep; + if (value.is_string()) { + os << value.get(); // otherwise the value is surrounded with "" + } else { + os << value.dump(); + } + sep = separator; + } + make_result(os.str()); + } break; + case Op::ParenLeft: + case Op::ParenRight: + case Op::None: + break; + } + } + + void visit(const ExpressionListNode& node) { + print_data(eval_expression_list(node)); + } + + void visit(const StatementNode&) {} + + void visit(const ForStatementNode&) {} + + void visit(const ForArrayStatementNode& node) { + const auto result = eval_expression_list(node.condition); + if (!result->is_array()) { + throw_renderer_error("object must be an array", node); + } + + if (!current_loop_data->empty()) { + auto tmp = *current_loop_data; // Because of clang-3 + (*current_loop_data)["parent"] = std::move(tmp); + } + + size_t index = 0; + (*current_loop_data)["is_first"] = true; + (*current_loop_data)["is_last"] = (result->size() <= 1); + for (auto it = result->begin(); it != result->end(); ++it) { + additional_data[static_cast(node.value)] = *it; + + (*current_loop_data)["index"] = index; + (*current_loop_data)["index1"] = index + 1; + if (index == 1) { + (*current_loop_data)["is_first"] = false; + } + if (index == result->size() - 1) { + (*current_loop_data)["is_last"] = true; + } + + node.body.accept(*this); + ++index; + } + + additional_data[static_cast(node.value)].clear(); + if (!(*current_loop_data)["parent"].empty()) { + const auto tmp = (*current_loop_data)["parent"]; + *current_loop_data = std::move(tmp); + } else { + current_loop_data = &additional_data["loop"]; + } + } + + void visit(const ForObjectStatementNode& node) { + const auto result = eval_expression_list(node.condition); + if (!result->is_object()) { + throw_renderer_error("object must be an object", node); + } + + if (!current_loop_data->empty()) { + (*current_loop_data)["parent"] = std::move(*current_loop_data); + } + + size_t index = 0; + (*current_loop_data)["is_first"] = true; + (*current_loop_data)["is_last"] = (result->size() <= 1); + for (auto it = result->begin(); it != result->end(); ++it) { + additional_data[static_cast(node.key)] = it.key(); + additional_data[static_cast(node.value)] = it.value(); + + (*current_loop_data)["index"] = index; + (*current_loop_data)["index1"] = index + 1; + if (index == 1) { + (*current_loop_data)["is_first"] = false; + } + if (index == result->size() - 1) { + (*current_loop_data)["is_last"] = true; + } + + node.body.accept(*this); + ++index; + } + + additional_data[static_cast(node.key)].clear(); + additional_data[static_cast(node.value)].clear(); + if (!(*current_loop_data)["parent"].empty()) { + *current_loop_data = std::move((*current_loop_data)["parent"]); + } else { + current_loop_data = &additional_data["loop"]; + } + } + + void visit(const IfStatementNode& node) { + const auto result = eval_expression_list(node.condition); + if (truthy(result.get())) { + node.true_statement.accept(*this); + } else if (node.has_false_statement) { + node.false_statement.accept(*this); + } + } + + void visit(const IncludeStatementNode& node) { + auto sub_renderer = Renderer(config, template_storage, function_storage); + const auto included_template_it = template_storage.find(node.file); + if (included_template_it != template_storage.end()) { + sub_renderer.render_to(*output_stream, included_template_it->second, *data_input, &additional_data); + } else if (config.throw_at_missing_includes) { + throw_renderer_error("include '" + node.file + "' not found", node); + } + } + + void visit(const ExtendsStatementNode& node) { + const auto included_template_it = template_storage.find(node.file); + if (included_template_it != template_storage.end()) { + const Template* parent_template = &included_template_it->second; + render_to(*output_stream, *parent_template, *data_input, &additional_data); + break_rendering = true; + } else if (config.throw_at_missing_includes) { + throw_renderer_error("extends '" + node.file + "' not found", node); + } + } + + void visit(const BlockStatementNode& node) { + const size_t old_level = current_level; + current_level = 0; + current_template = template_stack.front(); + const auto block_it = current_template->block_storage.find(node.name); + if (block_it != current_template->block_storage.end()) { + block_statement_stack.emplace_back(&node); + block_it->second->block.accept(*this); + block_statement_stack.pop_back(); + } + current_level = old_level; + current_template = template_stack.back(); + } + + void visit(const SetStatementNode& node) { + std::string ptr = node.key; + replace_substring(ptr, ".", "/"); + ptr = "/" + ptr; + additional_data[json::json_pointer(ptr)] = *eval_expression_list(node.expression); + } + +public: + Renderer(const RenderConfig& config, const TemplateStorage& template_storage, const FunctionStorage& function_storage) + : config(config), template_storage(template_storage), function_storage(function_storage) {} + + void render_to(std::ostream& os, const Template& tmpl, const json& data, json* loop_data = nullptr) { + output_stream = &os; + current_template = &tmpl; + data_input = &data; + if (loop_data) { + additional_data = *loop_data; + current_loop_data = &additional_data["loop"]; + } + + template_stack.emplace_back(current_template); + current_template->root.accept(*this); + + data_tmp_stack.clear(); + } +}; + +} // namespace inja + +#endif // INCLUDE_INJA_RENDERER_HPP_ + +// #include "template.hpp" + +// #include "utils.hpp" + + +namespace inja { + +/*! + * \brief Class for changing the configuration. + */ +class Environment { + std::string input_path; + std::string output_path; + + LexerConfig lexer_config; + ParserConfig parser_config; + RenderConfig render_config; + + FunctionStorage function_storage; + TemplateStorage template_storage; + +public: + Environment(): Environment("") {} + + explicit Environment(const std::string& global_path): input_path(global_path), output_path(global_path) {} + + Environment(const std::string& input_path, const std::string& output_path): input_path(input_path), output_path(output_path) {} + + /// Sets the opener and closer for template statements + void set_statement(const std::string& open, const std::string& close) { + lexer_config.statement_open = open; + lexer_config.statement_open_no_lstrip = open + "+"; + lexer_config.statement_open_force_lstrip = open + "-"; + lexer_config.statement_close = close; + lexer_config.statement_close_force_rstrip = "-" + close; + lexer_config.update_open_chars(); + } + + /// Sets the opener for template line statements + void set_line_statement(const std::string& open) { + lexer_config.line_statement = open; + lexer_config.update_open_chars(); + } + + /// Sets the opener and closer for template expressions + void set_expression(const std::string& open, const std::string& close) { + lexer_config.expression_open = open; + lexer_config.expression_open_force_lstrip = open + "-"; + lexer_config.expression_close = close; + lexer_config.expression_close_force_rstrip = "-" + close; + lexer_config.update_open_chars(); + } + + /// Sets the opener and closer for template comments + void set_comment(const std::string& open, const std::string& close) { + lexer_config.comment_open = open; + lexer_config.comment_open_force_lstrip = open + "-"; + lexer_config.comment_close = close; + lexer_config.comment_close_force_rstrip = "-" + close; + lexer_config.update_open_chars(); + } + + /// Sets whether to remove the first newline after a block + void set_trim_blocks(bool trim_blocks) { + lexer_config.trim_blocks = trim_blocks; + } + + /// Sets whether to strip the spaces and tabs from the start of a line to a block + void set_lstrip_blocks(bool lstrip_blocks) { + lexer_config.lstrip_blocks = lstrip_blocks; + } + + /// Sets the element notation syntax + void set_search_included_templates_in_files(bool search_in_files) { + parser_config.search_included_templates_in_files = search_in_files; + } + + /// Sets whether a missing include will throw an error + void set_throw_at_missing_includes(bool will_throw) { + render_config.throw_at_missing_includes = will_throw; + } + + Template parse(std::string_view input) { + Parser parser(parser_config, lexer_config, template_storage, function_storage); + return parser.parse(input); + } + + Template parse_template(const std::string& filename) { + Parser parser(parser_config, lexer_config, template_storage, function_storage); + auto result = Template(parser.load_file(input_path + static_cast(filename))); + parser.parse_into_template(result, input_path + static_cast(filename)); + return result; + } + + Template parse_file(const std::string& filename) { + return parse_template(filename); + } + + std::string render(std::string_view input, const json& data) { + return render(parse(input), data); + } + + std::string render(const Template& tmpl, const json& data) { + std::stringstream os; + render_to(os, tmpl, data); + return os.str(); + } + + std::string render_file(const std::string& filename, const json& data) { + return render(parse_template(filename), data); + } + + std::string render_file_with_json_file(const std::string& filename, const std::string& filename_data) { + const json data = load_json(filename_data); + return render_file(filename, data); + } + + void write(const std::string& filename, const json& data, const std::string& filename_out) { + std::ofstream file(output_path + filename_out); + file << render_file(filename, data); + file.close(); + } + + void write(const Template& temp, const json& data, const std::string& filename_out) { + std::ofstream file(output_path + filename_out); + file << render(temp, data); + file.close(); + } + + void write_with_json_file(const std::string& filename, const std::string& filename_data, const std::string& filename_out) { + const json data = load_json(filename_data); + write(filename, data, filename_out); + } + + void write_with_json_file(const Template& temp, const std::string& filename_data, const std::string& filename_out) { + const json data = load_json(filename_data); + write(temp, data, filename_out); + } + + std::ostream& render_to(std::ostream& os, const Template& tmpl, const json& data) { + Renderer(render_config, template_storage, function_storage).render_to(os, tmpl, data); + return os; + } + + std::string load_file(const std::string& filename) { + Parser parser(parser_config, lexer_config, template_storage, function_storage); + return parser.load_file(input_path + filename); + } + + json load_json(const std::string& filename) { + std::ifstream file; + file.open(input_path + filename); + if (file.fail()) { + INJA_THROW(FileError("failed accessing file at '" + input_path + filename + "'")); + } + + return json::parse(std::istreambuf_iterator(file), std::istreambuf_iterator()); + } + + /*! + @brief Adds a variadic callback + */ + void add_callback(const std::string& name, const CallbackFunction& callback) { + add_callback(name, -1, callback); + } + + /*! + @brief Adds a variadic void callback + */ + void add_void_callback(const std::string& name, const VoidCallbackFunction& callback) { + add_void_callback(name, -1, callback); + } + + /*! + @brief Adds a callback with given number or arguments + */ + void add_callback(const std::string& name, int num_args, const CallbackFunction& callback) { + function_storage.add_callback(name, num_args, callback); + } + + /*! + @brief Adds a void callback with given number or arguments + */ + void add_void_callback(const std::string& name, int num_args, const VoidCallbackFunction& callback) { + function_storage.add_callback(name, num_args, [callback](Arguments& args) { + callback(args); + return json(); + }); + } + + /** Includes a template with a given name into the environment. + * Then, a template can be rendered in another template using the + * include "" syntax. + */ + void include_template(const std::string& name, const Template& tmpl) { + template_storage[name] = tmpl; + } + + /*! + @brief Sets a function that is called when an included file is not found + */ + void set_include_callback(const std::function& callback) { + parser_config.include_callback = callback; + } +}; + +/*! +@brief render with default settings to a string +*/ +inline std::string render(std::string_view input, const json& data) { + return Environment().render(input, data); +} + +/*! +@brief render with default settings to the given output stream +*/ +inline void render_to(std::ostream& os, std::string_view input, const json& data) { + Environment env; + env.render_to(os, env.parse(input), data); +} + +} // namespace inja + +#endif // INCLUDE_INJA_ENVIRONMENT_HPP_ + +// #include "exceptions.hpp" + +// #include "parser.hpp" + +// #include "renderer.hpp" + +// #include "template.hpp" + + +#endif // INCLUDE_INJA_INJA_HPP_ diff --git a/ext/installfiles/mac/ZeroTier One.pkgproj b/ext/installfiles/mac/ZeroTier One.pkgproj index 764a0caf0..70cedf9f9 100755 --- a/ext/installfiles/mac/ZeroTier One.pkgproj +++ b/ext/installfiles/mac/ZeroTier One.pkgproj @@ -701,7 +701,7 @@ USE_HFS+_COMPRESSION VERSION - 1.8.9 + 1.14.2 TYPE 0 diff --git a/ext/installfiles/windows/Prerequisites/WebView2/MicrosoftEdgeWebview2Setup.exe b/ext/installfiles/windows/Prerequisites/WebView2/MicrosoftEdgeWebview2Setup.exe deleted file mode 100644 index f24fedaeb..000000000 Binary files a/ext/installfiles/windows/Prerequisites/WebView2/MicrosoftEdgeWebview2Setup.exe and /dev/null differ diff --git a/ext/installfiles/windows/ZeroTier One.aip b/ext/installfiles/windows/ZeroTier One.aip index 17bec1042..3066eb788 100644 --- a/ext/installfiles/windows/ZeroTier One.aip +++ b/ext/installfiles/windows/ZeroTier One.aip @@ -1,25 +1,17 @@ - - - - + - - - + - - - @@ -27,28 +19,29 @@ - - + - + - + - - - + + + + + @@ -57,56 +50,61 @@ + + + + - - - - + - + + + - - - - + - - - + + + + - + + + - - - - - - - + + + + + + + + + @@ -115,7 +113,8 @@ - + + @@ -124,26 +123,18 @@ - - - - - - - - - + - - - - + + + + @@ -160,6 +151,7 @@ + @@ -169,6 +161,7 @@ + @@ -188,15 +181,17 @@ - - - - + + + + + + @@ -205,6 +200,7 @@ + @@ -218,7 +214,7 @@ - + @@ -249,7 +245,6 @@ - @@ -258,16 +253,14 @@ - - - + + + - - - + @@ -275,12 +268,12 @@ + - @@ -296,13 +289,13 @@ - + + - @@ -316,7 +309,6 @@ - @@ -328,9 +320,6 @@ - - - @@ -340,22 +329,22 @@ - - - - - - - - + + + + + + + + @@ -370,59 +359,56 @@ - + - - + + - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + - + - - + - - + + - + @@ -434,15 +420,13 @@ - - - - - - + + + + @@ -486,6 +470,10 @@ + + + + @@ -496,6 +484,7 @@ + @@ -505,10 +494,6 @@ - - - - @@ -530,10 +515,10 @@ - + - + diff --git a/ext/installfiles/windows/ZeroTier One.back.aip b/ext/installfiles/windows/ZeroTier One.back.aip new file mode 100644 index 000000000..9ad818277 --- /dev/null +++ b/ext/installfiles/windows/ZeroTier One.back.aip @@ -0,0 +1,558 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ext/libpqxx-7.7.3/.circleci/config.yml b/ext/libpqxx-7.7.3/.circleci/config.yml new file mode 100644 index 000000000..038d7adb0 --- /dev/null +++ b/ext/libpqxx-7.7.3/.circleci/config.yml @@ -0,0 +1,60 @@ +# CircleCI config for automated test builds triggered from Github. +version: 2 +jobs: + build: + docker: + - image: debian:testing +# - image: postgres:latest + environment: + - PGHOST: "/tmp" + steps: + - checkout + - run: + name: Configure apt archives + command: apt update + - run: + name: Install + command: apt install -y lsb-release python3 cmake postgresql libpq-dev postgresql-server-dev-all build-essential autoconf dh-autoreconf autoconf-archive automake cppcheck + - run: + name: Identify + command: lsb_release -a && c++ --version + - run: + name: Prepare postgres + command: | + mkdir /tmp/db && + chown postgres /tmp/db && + su postgres -c '/usr/lib/postgresql/*/bin/initdb --pgdata /tmp/db --auth trust --nosync' + - run: + name: Run postgres + command: (su postgres -c '/usr/lib/postgresql/*/bin/postgres -D /tmp/db -k /tmp' &) && sleep 5 + - run: + name: Create postgres user + command: su postgres -c "createuser -w -d root" + - run: + name: Set up database + command: createdb root + - run: + name: Autogen + command: NOCONFIGURE=1 ./autogen.sh + - run: + name: Configure + command: | + ./configure \ + --disable-documentation \ + --enable-maintainer-mode \ + --enable-audit \ + --enable-shared --disable-static \ + CXXFLAGS=-O3 + - store_artifacts: + path: config.log + - run: + name: Make + command: make -j$(nproc) + - run: + name: Test + command: PGDATA=db/data make check + - run: + name: Analyse + command: ./tools/lint --full >lint.log + - store_artifacts: + path: lint.log diff --git a/ext/libpqxx-7.7.3/.clang-format b/ext/libpqxx-7.7.3/.clang-format new file mode 100644 index 000000000..97823d8ff --- /dev/null +++ b/ext/libpqxx-7.7.3/.clang-format @@ -0,0 +1,71 @@ +Language: Cpp + +AlignAfterOpenBracket: AlwaysBreak +# AllowAllArgumentsOnNextLine: true +# AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: true +AllowShortCaseLabelsOnASingleLine: true +AllowShortFunctionsOnASingleLine: Inline +# AllowShortIfStatementsOnASingleLine: WithoutElse +# AllowShortLambdasOnASingleLine: All +AllowShortLoopsOnASingleLine: true +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +# AlwaysBreakTemplateDeclarations: No +BinPackArguments: true +BinPackParameters: true +BreakBeforeBraces: Custom +BraceWrapping: + # AfterCaseLabel: true + AfterClass: true + AfterControlStatement: true + AfterEnum: true + AfterExternBlock: true + AfterFunction: true + AfterNamespace: true + AfterStruct: true + BeforeCatch: true + BeforeElse: true + IndentBraces: false + SplitEmptyFunction: false + SplitEmptyNamespace: false + SplitEmptyRecord: false +BreakBeforeBinaryOperators: None +BreakBeforeTernaryOperators: false +BreakConstructorInitializers: AfterColon +# BreakInheritanceList: AfterColon +BreakStringLiterals: true +ColumnLimit: 79 +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 8 +ContinuationIndentWidth: 2 +Cpp11BracedListStyle: true +FixNamespaceComments: true +IncludeBlocks: Preserve +IndentCaseLabels: false +IndentPPDirectives: AfterHash +IndentWidth: 2 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: false +MaxEmptyLinesToKeep: 2 +# NamespaceIndentation: All +SortIncludes: true +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterTemplateKeyword: false +SpaceBeforeAssignmentOperators: true +# SpaceBeforeCpp11BracedList: false +# SpaceBeforeCtorInitializerColon: true +# SpaceBeforeInheritanceColon: true +# SpaceBeforeParents: ControlStatements +# SpaceBeforeRangedBasedForLoopColon: true +SpaceInEmptyParentheses: false +SpacesInAngles: false +SpacesInCStyleCastParentheses: false +SpacesInContainerLiterals: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +UseTab: Never +--- diff --git a/ext/libpqxx-7.7.3/.cmake-format b/ext/libpqxx-7.7.3/.cmake-format new file mode 100644 index 000000000..af00ef936 --- /dev/null +++ b/ext/libpqxx-7.7.3/.cmake-format @@ -0,0 +1,184 @@ +format: + _help_max_prefix_chars: + - !!python/unicode 'If the statement spelling length (including space and' + - !!python/unicode 'parenthesis) is larger than the tab width by more than this' + - !!python/unicode 'amount, then force reject un-nested layouts.' + max_prefix_chars: 10 + _help_dangle_align: + - !!python/unicode 'If the trailing parenthesis must be ''dangled'' on its own' + - !!python/unicode 'line, then align it to this reference: `prefix`: the start' + - !!python/unicode 'of the statement, `prefix-indent`: the start of the' + - !!python/unicode 'statement, plus one indentation level, `child`: align to' + - !!python/unicode 'the column of the arguments' + dangle_align: !!python/unicode 'prefix' + _help_max_subgroups_hwrap: + - !!python/unicode 'If an argument group contains more than this many sub-groups' + - !!python/unicode '(parg or kwarg groups) then force it to a vertical layout.' + max_subgroups_hwrap: 2 + _help_min_prefix_chars: + - !!python/unicode 'If the statement spelling length (including space and' + - !!python/unicode 'parenthesis) is smaller than this amount, then force reject' + - !!python/unicode 'nested layouts.' + min_prefix_chars: 4 + _help_max_pargs_hwrap: + - !!python/unicode 'If a positional argument group contains more than this many' + - !!python/unicode 'arguments, then force it to a vertical layout.' + max_pargs_hwrap: 6 + _help_max_lines_hwrap: + - !!python/unicode 'If a candidate layout is wrapped horizontally but it exceeds' + - !!python/unicode 'this many lines, then reject the layout.' + max_lines_hwrap: 2 + _help_autosort: + - !!python/unicode 'If true, the parsers may infer whether or not an argument' + - !!python/unicode 'list is sortable (without annotation).' + autosort: false + _help_line_ending: + - !!python/unicode 'What style line endings to use in the output.' + line_ending: !!python/unicode 'unix' + _help_line_width: + - !!python/unicode 'How wide to allow formatted cmake files' + line_width: 80 + _help_dangle_parens: + - !!python/unicode 'If a statement is wrapped to more than one line, then dangle' + - !!python/unicode 'the closing parenthesis on its own line.' + dangle_parens: true + _help_tab_size: + - !!python/unicode 'How many spaces to tab for indent' + tab_size: 4 + _help_always_wrap: + - !!python/unicode 'A list of command names which should always be wrapped' + always_wrap: [] + _help_require_valid_layout: + - !!python/unicode 'By default, if cmake-format cannot successfully fit' + - !!python/unicode 'everything into the desired linewidth it will apply the' + - !!python/unicode 'last, most agressive attempt that it made. If this flag is' + - !!python/unicode 'True, however, cmake-format will print error, exit with non-' + - !!python/unicode 'zero status code, and write-out nothing' + require_valid_layout: true + _help_keyword_case: + - !!python/unicode 'Format keywords consistently as ''lower'' or ''upper'' case' + keyword_case: !!python/unicode 'unchanged' + _help_layout_passes: + - !!python/unicode 'A dictionary mapping layout nodes to a list of wrap' + - !!python/unicode 'decisions. See the documentation for more information.' + layout_passes: {} + _help_enable_sort: + - !!python/unicode 'If true, the argument lists which are known to be sortable' + - !!python/unicode 'will be sorted lexicographically' + enable_sort: true +_help_markup: !!python/unicode 'Options affecting comment reflow and formatting.' +markup: + _help_literal_comment_pattern: + - !!python/unicode 'If comment markup is enabled, don''t reflow any comment block' + - !!python/unicode 'which matches this (regex) pattern. Default is `None`' + - !!python/unicode '(disabled).' + literal_comment_pattern: null + _help_hashruler_min_length: + - !!python/unicode 'If a comment line starts with at least this many consecutive' + - !!python/unicode 'hash characters, then don''t lstrip() them off. This allows' + - !!python/unicode 'for lazy hash rulers where the first hash char is not' + - !!python/unicode 'separated by space' + hashruler_min_length: 10 + _help_fence_pattern: + - !!python/unicode 'Regular expression to match preformat fences in comments' + - !!python/unicode 'default=r''^\s*([`~]{3}[`~]*)(.*)$''' + fence_pattern: !!python/unicode '^\s*([`~]{3}[`~]*)(.*)$' + _help_canonicalize_hashrulers: + - !!python/unicode 'If true, then insert a space between the first hash char and' + - !!python/unicode 'remaining hash chars in a hash ruler, and normalize its' + - !!python/unicode 'length to fill the column' + canonicalize_hashrulers: true + _help_explicit_trailing_pattern: + - !!python/unicode 'If a comment line matches starts with this pattern then it' + - !!python/unicode 'is explicitly a trailing comment for the preceeding' + - !!python/unicode 'argument. Default is ''#<''' + explicit_trailing_pattern: !!python/unicode '#<' + _help_first_comment_is_literal: + - !!python/unicode 'If comment markup is enabled, don''t reflow the first comment' + - !!python/unicode 'block in each listfile. Use this to preserve formatting of' + - !!python/unicode 'your copyright/license statements.' + first_comment_is_literal: false + _help_enable_markup: + - !!python/unicode 'enable comment markup parsing and reflow' + enable_markup: true + _help_ruler_pattern: + - !!python/unicode 'Regular expression to match rulers in comments' + - !!python/unicode 'default=r''^\s*[^\w\s]{3}.*[^\w\s]{3}$''' + ruler_pattern: !!python/unicode '^\s*[^\w\s]{3}.*[^\w\s]{3}$' + _help_enum_char: + - !!python/unicode 'What character to use as punctuation after numerals in an' + - !!python/unicode 'enumerated list' + enum_char: . + _help_bullet_char: + - !!python/unicode 'What character to use for bulleted lists' + bullet_char: '*' +_help_lint: !!python/unicode 'Options affecting the linter' +lint: + _help_function_pattern: + - !!python/unicode 'regular expression pattern describing valid function names' + function_pattern: !!python/unicode '[0-9a-z_]+' + _help_disabled_codes: + - !!python/unicode 'a list of lint codes to disable' + disabled_codes: [] + _help_min_statement_spacing: + - !!python/unicode 'Require at least this many newlines between statements' + min_statement_spacing: 1 + _help_macro_pattern: + - !!python/unicode 'regular expression pattern describing valid macro names' + macro_pattern: !!python/unicode '[0-9A-Z_]+' + _help_public_var_pattern: + - !!python/unicode 'regular expression pattern describing valid names for' + - !!python/unicode 'publicdirectory variables' + public_var_pattern: !!python/unicode '[0-9A-Z][0-9A-Z_]+' + max_statements: 50 + _help_max_conditionals_custom_parser: + - !!python/unicode 'In the heuristic for C0201, how many conditionals to match' + - !!python/unicode 'within a loop in before considering the loop a parser.' + max_conditionals_custom_parser: 2 + _help_global_var_pattern: + - !!python/unicode 'regular expression pattern describing valid names for' + - !!python/unicode 'variables with global scope' + global_var_pattern: !!python/unicode '[0-9A-Z][0-9A-Z_]+' + _help_keyword_pattern: + - !!python/unicode 'regular expression pattern describing valid names for' + - !!python/unicode 'keywords used in functions or macros' + keyword_pattern: !!python/unicode '[0-9A-Z_]+' + max_arguments: 5 + _help_private_var_pattern: + - !!python/unicode 'regular expression pattern describing valid names for' + - !!python/unicode 'privatedirectory variables' + private_var_pattern: !!python/unicode '_[0-9a-z_]+' + max_localvars: 15 + max_branches: 12 + _help_local_var_pattern: + - !!python/unicode 'regular expression pattern describing valid names for' + - !!python/unicode 'variables with local scope' + local_var_pattern: !!python/unicode '[0-9a-z_]+' + _help_max_statement_spacing: + - !!python/unicode 'Require no more than this many newlines between statements' + max_statement_spacing: 1 + _help_internal_var_pattern: + - !!python/unicode 'regular expression pattern describing valid names for' + - !!python/unicode 'variables with global scope (but internal semantic)' + internal_var_pattern: !!python/unicode '_[0-9A-Z][0-9A-Z_]+' + max_returns: 6 +_help_misc: !!python/unicode 'Miscellaneous configurations options.' +misc: + _help_per_command: + - !!python/unicode 'A dictionary containing any per-command configuration' + - !!python/unicode 'overrides. Currently only `command_case` is supported.' + per_command: {} +_help_parse: !!python/unicode 'Options affecting listfile parsing' +parse: + _help_additional_commands: + - !!python/unicode 'Specify structure for custom cmake functions' + additional_commands: + !!python/unicode 'foo': + !!python/unicode 'flags': + - !!python/unicode 'BAR' + - !!python/unicode 'BAZ' + !!python/unicode 'kwargs': + !!python/unicode 'HEADERS': !!python/unicode '*' + !!python/unicode 'DEPENDS': !!python/unicode '*' + !!python/unicode 'SOURCES': !!python/unicode '*' +_help_encode: !!python/unicode 'Options effecting file encoding' diff --git a/ext/libpqxx-7.7.3/.github/workflows/stale.yml b/ext/libpqxx-7.7.3/.github/workflows/stale.yml new file mode 100644 index 000000000..0983a3e54 --- /dev/null +++ b/ext/libpqxx-7.7.3/.github/workflows/stale.yml @@ -0,0 +1,19 @@ +name: Mark stale issues and pull requests + +on: + schedule: + - cron: "30 1 * * *" + +jobs: + stale: + + runs-on: ubuntu-latest + + steps: + - uses: actions/stale@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'There has been no activity on this ticket. Consider closing it.' + stale-pr-message: 'There has been no activity on this pull request. Complete it or drop it.' + stale-issue-label: 'no-issue-activity' + stale-pr-label: 'no-pr-activity' diff --git a/ext/libpqxx-7.7.3/.gitignore b/ext/libpqxx-7.7.3/.gitignore new file mode 100644 index 000000000..d798c9dca --- /dev/null +++ b/ext/libpqxx-7.7.3/.gitignore @@ -0,0 +1,49 @@ +autom4te.cache +build-*.out +ChangeLog +CMakeFiles/CMakeTmp +confdefs.h +config.log +config.status +conftest +conftest.cpp +conftest.err +doc/_build +doc/Doxyfile +doc/html/Reference/*.css +doc/html/Reference/*.html +doc/html/Reference/*.js +doc/html/Reference/*.png +doc/html/Reference/*.map +doc/html/Reference/*.md5 +doc/reference-stamp +include/pqxx/config-*-*.h +include/pqxx/config.h +include/pqxx/stamp-h1 +libpqxx.pc +libpqxx-*.tar.gz +libtool +pqxx-config +pqxxlo.txt +test/pqxxlo.txt +tools/pqxxthreadsafety +tools/rmlo +README +win32/common +**/Makefile +**/*.la +**/*.lo +**/*.o +**/*.out +**/.*.swp +**/.swp +**/*.tmp +**/.deps +**/.libs +**/*~ +**/lint.log +**/lint.trs +**/runner +**/runner.log +**/runner.trs +**/test-suite.log diff --git a/ext/libpqxx-7.7.3/.lgtm.yml b/ext/libpqxx-7.7.3/.lgtm.yml new file mode 100644 index 000000000..d53664eb4 --- /dev/null +++ b/ext/libpqxx-7.7.3/.lgtm.yml @@ -0,0 +1,9 @@ +# Config file for lgtm.com static analysis. + +path_classifiers: + test: + - test + generated: + - aclocal.m4 + - configure + - ltmain.sh diff --git a/ext/libpqxx-7.7.3/.lift/ignoreFiles b/ext/libpqxx-7.7.3/.lift/ignoreFiles new file mode 100644 index 000000000..8c6c7a103 --- /dev/null +++ b/ext/libpqxx-7.7.3/.lift/ignoreFiles @@ -0,0 +1,3 @@ +# Make Sonatype Lift ignore these generated files. +configure +config/* diff --git a/ext/libpqxx-7.7.3/AUTHORS b/ext/libpqxx-7.7.3/AUTHORS new file mode 100644 index 000000000..6a922e950 --- /dev/null +++ b/ext/libpqxx-7.7.3/AUTHORS @@ -0,0 +1,4 @@ +Jeroen T. Vermeulen. Wrote the code. +Ray Dassen. Did most of the autoconf etc. stuff. + +Lots of others helped with various other contributions. diff --git a/ext/libpqxx-7.7.3/BUILDING-cmake.md b/ext/libpqxx-7.7.3/BUILDING-cmake.md new file mode 100644 index 000000000..48b1e7930 --- /dev/null +++ b/ext/libpqxx-7.7.3/BUILDING-cmake.md @@ -0,0 +1,272 @@ +Building using CMake +==================== + +The build requires the full PostgreSQL development package. That package must +be installed before you can build libpqxx. + +The instructions will assume that you're working from a command-line shell. +If you prefer to work from an IDE, you'll have to know how your IDE likes to +do things, and you'll want to follow the shell instructions as a guide. + +I'm not too familiar with CMake, and this build relies heavily on contributions +from users. If you see something wrong here, please file a bug and explain, in +simple words, what needs changing and why. + + +Quick start +----------- + +If you just want to get it built and installed quickly, run `cmake` from the +root of the libpqxx source tree. This configures your build. + +Then compile libpqxx by running: + +```shell + cmake --build . +``` + +To install in the default location: + +```shell + cmake --install . +``` + + +Stages +------ + +I'll explain the main build steps in more detail below, but here's a quick +rundown: +1. Configure +2. Compile +3. Test +4. Install +5. Use + +The Test step is optional. + + +Configure +--------- + +Run `cmake` to configure your build. It figures out various parameters, such +as where libpq and its headers are, which C++ features your compiler supports, +and which options your compiler needs. CMake generates configuration for your +build tool: `Makefile`s for `make`, or a Solution (".sln") file for MSVC's +`msbuild`, and so on. + +At this stage you can also override those options yourself. e.g. to instruct +the compiler to look for libpq in a non-standard place, or to use a different +compiler, or pass different compiler flags. Don't try to specify those while +doing the actual compile; set them once when running `cmake`. + +Let's say `$BUILD` is the directory where you want to build libpqxx, and +`$SRC` is where its source code is. So for example, the readme file will be at +`$SRC/README.md`. + +In the simplest case, you just do: + +```shell + cd $BUILD + cmake $SRC +``` + +Add CMake options as needed. There's more about the options below. I'll also +explain the two directories. + + +### Cheat sheet + +Here are some popular `cmake` options for libpqxx: +* `-DSKIP_BUILD_TEST=on` skips compiling libpqxx's tests. +* `-DBUILD_SHARED_LIBS=on` to build a shared library. +* `-DBUILD_SHARED_LIBS=off` to build a static library. +* `-DBUILD_DOC=on` to build documentation. +* `-DINSTALL_TEST=on` to install test executor binary. + +On Windows, I recommend building libpqxx as a shared library and bundling it +with your application. On other platforms I would prefer a static library. + +Building the documentation requires some tools to be installed. It takes at +least Doxygen, but there's no list of requirements. The way to get this set up +is to just try it and see what it's missing. + + +### Generators + +You can also choose your own build tool by telling CMake to use a particular +"generator." For example, here's how to force use of `make`: + +```shell + cmake -G 'Unix Makefiles' +``` + +Or if you prefer to build using `ninja` instead: + +```shell + cmake -G Ninja +``` + +There are many more options. You may prefer yet a different build tool. + + +### Finding libpq + +The CMake step tries to figure out where libpq is, using Cmake's `find_package` +function. If that doesn't work, or if you want a libpq in a different location +from the one it finds, there are two ways to override it. + +The first is to set the individual include and link paths. + +To make the build look for the libpq headers in some directory `$DIR`, add +these options: +* `-DPostgreSQL_TYPE_INCLUDE_DIR=$DIR` +* `-DPostgreSQL_INCLUDE_DIR=$DIR` + +To make the build look for the libpq library binary in a directory `$DIR`, add +this option: +* `-DPostgreSQL_LIBRARY_DIR=$DIR` + +The second, easier way requires CMake 3.12 or better. Here, you specify a path +to a full PostgreSQL build tree. You do this (again for some directory `$DIR`) +by simply passing this cmake option: `-DPostgreSQL_ROOT=$DIR` + + +### Source and Build trees + +Where should you run `cmake`? + +Two directories matter when building libpqxx: the _source tree_ (where the +libpqxx source code is) and the _build tree_ (where you want your build +artefacts). Here I will call them `$SRC` and `$BUILD`, but you can call them +anything you like. + +They can be one and the same, if you like. It's convenient, but less clean, as +source code and build artefacts will exist in the same directory tree. If +you're going to delete the source tree after installing, of course it's fine to +make a mess in there. + + +Compile +------- + +To compile, run: + +```shell + cmake --build $BUILD +``` + +(Where `$BUILD` is again the directory where you wish to do the build.) + +This command will invoke your build tool. Other ways to do the same thing +would be... +* With Unix Makefiles: `make` +* With Ninja: `ninja` +* With Visual Studio: `msbuild libpqxx.sln` +* etc. + +Depending on your build tool, you may want to speed this up by adding an option +like `-j 16`, where `16` is an example of how many processes you might want to +run in parallel. The optimal number depends on your available CPUs and memory. +If you have enough memory, usually the number of CPUs will be a good starting +point for the right number. Don't use this option with Ninja though. It +figures things out for itself. + + +Test +---- + +Of course libpqxx comes with a test suite, to check that the library is +functioning correctly. + +You can run it, but there's one caveat: you need to give it a database where it +can log in, without a password or any other parameters, and try out various +things. + +And when I say you need to "give" it a database, I really mean "give." The +test suite will create and drop tables. Those will all have names prefixed +with "pqxx", so it's probably safe to use a database you already had, but if +any of the items in your database happen to have names starting with `pqxx`, +tough luck. They're fair game. + +Enter this in your shell to build and run the tests: + +```shell + test/runner +``` + + +### Configuring the test database + +But what if you do need a password to log into your test database? Or, what if +it's running on a different system so you need to pass that machine's address? +What if it's not running on the default port? + +You can set these parameters for the test suite, or for any other libpq-based +application, using the following environment variables. (They only set default +values, so they won't override parameters that the application sets in some +other way.) +* `PGHOST` — the IP address where we can contact the database's socket. Or + for a Unix domain socket, its absolute path on the filesystem. +* `PGPORT` — TCP port number on which we can connect to the database. +* `PGDATABASE` — the name of the database to which you wish to connect. +* `PGUSER` — user name under which you wish to log in on the database. +* `PGPASSWORD` — user name's password for accessing the database. + +See the full list at https://www.postgresql.org/docs/current/libpq-envars.html + +**Be careful with passwords,** by the way. Depending on your operating system +and configuration, an attacker with access to your machine could try to read +your password if you set it on the command line: +* Your shell may keep a log of the commands you have entered. +* Environment variables may be visible to other users on the system. + +If at all possible, rely on postgres "peer authentication." Once set up, it is +both more secure and more convenient than passwords. + + +Install +------- + +Once you've built libpqxx, CMake can also help you install the library and +headers on your system. The default installation location will vary from one +operating system to another, but you can set it explicitly. + +Let's say you've got your finished build in `$BUILD`, and you want to install +it to your system's default install location. The command for this is: + +```shell + cmake --install $BUILD +``` + +But you may want to install to some other location. Let's call it `$DEST`. +`$DEST` might be something like `/usr/local` on a Unix-like system, or +something like `D:\Software` on a Windows system. + +To install to `$DEST`, run: + +```shell + cmake --install $BUILD --prefix $DEST +``` + + +Use +--- + +Other projects can include libpqxx in their CMake builds. + +`@abrownsword` uses this configuration: + +```cmake + set(libpqxxdir "libpqxx-${LIBVERSION}") # LIBVERSION set above + set(SKIP_BUILD_TEST on) + set(BUILD_SHARED_LIBS OFF) + + # Used this instead of FindLibrary. + # Setting PostgresSQL_INCLUDE_DIRS externally. + set(PostgreSQL_FOUND true) + set(PostgresSQL_INCLUDE_DIR ${PostgresSQL_INCLUDE_DIRS}) + set(PostgresSQL_TYPE_INCLUDE_DIR ${PostgresSQL_INCLUDE_DIRS}) + + add_subdirectory(${libpqxxdir}) +``` diff --git a/ext/libpqxx-7.7.3/BUILDING-configure.md b/ext/libpqxx-7.7.3/BUILDING-configure.md new file mode 100644 index 000000000..7c63f5b4c --- /dev/null +++ b/ext/libpqxx-7.7.3/BUILDING-configure.md @@ -0,0 +1,275 @@ +Building using `configure` +========================== + +The build requires `libpq`, the C client library for PostgreSQL. This library +must be installed before you can build libpqxx. You'll need the headers as +well as the library binary. + +The instructions will assume that you're working from a command-line shell. +If you prefer to work from an IDE, you'll have to know how your IDE likes to +do things, and you'll want to follow the shell instructions as a guide. + + +Quick start +----------- + +If you just want to get it built and installed quickly, try: + +```shell + ./configure + make + sudo make install +``` + +Want more detail? Read on. + + +Stages +------ + +I'll explain the main build steps in more detail below, but here's a quick +overview: +1. Configure +2. Compile +3. Test +4. Install + +The Test step is optional. + + +Configure +--------- + +The `configure` script configures your build. It figures out various +parameters, such as where libpq and its headers are, which C++ features your +compiler supports, and which options your compiler needs. It generates +Makefiles, which in turn tell the `make` utility how to perform tasks such as +compiling libpqxx, running tests, cleaning up after itself, and installing +libpqxx. + +The `configure` step is also where you can set these options, e.g. to instruct +the compiler to look for libpq in a non-standard place, or to use a different +compiler, or pass different compiler flags. Don't try to specify those while +doing the actual compile; set them once when running `configure`. + +Let's say `$BUILD` is the directory where you want to build libpqxx, and +`$SRC` is where its source code is. So for example, the readme file will be at +`$SRC/README.md`. + +In the simplest case, you just do: + +```shell + cd $BUILD + $SRC/configure +``` + +Add `configure` options as needed. There's more about the options below, or in +the output of `configure --help`. I'll also explain the two directories. + + +### Cheat sheet + +Here are some popular `configure` options: +* `--disable-documentation` skips building of the documentation. +* `CXXFLAGS=-O0` disables optimisation. Slower code, but faster build. +* `CXXFLAGS=-O3` asks for _more_ optimisation. Faster code, slower build. +* `CXX=clang++` compiles with `clang++` as the compiler. +* `--enable-maintainer-mode` makes the compiler more pedantic about the code. +* `--enable-audit` enables expensive run-time checks for debugging. +* `--with-postgres-lib=$DIR` looks for libpq in `$DIR`. +* `--with-postgres-include=$DIR` looks for the libpq headers in `$DIR`. +* `--prefix=$PATH` prepares to install libpqxx in `$PATH`. +* `--enable-shared` enables compilation of libpqxx as a shared library. +* `--disable-shared` disbles compilation of libpqxx as a shared library. +* `--enable-static` enables compilation of libpqxx as a static library. +* `--disable-static` disables compilation of libpqxx as a static library. +* `--help` shows you a lot more of the options. + +So for example, to get a very quick build but produce very inefficient code: + +```shell + ./configure --disable-documentation CXXFLAGS=-O0 +``` + + +Or if you want to pull out all the stops to find problems in the code: + +```shell + ./configure --enable-maintainer-mode --enable-audit CXXFLAGS=-O3 +``` + +(Requesting `-O3` optimisation will make some compilers perform extra analysis +which may, as a side effect, cause them to notice and warn about certain kinds +of mistakes in the code, such as occasionally-unused variables.) + + +### Finding libpq + +One of `configure`'s most important jobs in the libpqxx build is to find the +headers and library for libpq. It has three ways of finding those: +1. Asking a popular tool called `pkg-config`, if installed. +2. Asking postgres' deprecated `pg_config` tool, if installed. +3. Through explicit command-line options to `configure`. + +The explicit command-line options are `--with-postgres-lib` (for the libpq +library binary) and `--with-postgres-include` (for the libpq headers). + +If you want to use a version of libpq that's not installed in a standard +location, e.g. if you're cross-compiling to produce a binary for a different +CPU architecture than your native system's, use the explicit options. + + +### Where does the `configure` script come from? + +I didn't write the `configure` script. It was generated by GNU `autoconf` and +related GNU tools. There's a script to re-generate it, called `autogen.sh`. + +The contents of `configure` are based on a higher-level script called +`configure.ac`. This is where I script checks for specific features in libpq +or the compiler. The `configure` script adds a lot of built-in items that I +don't need to worry about, such as figuring out exactly how your build tools +work. + +Don't try to debug `configure` yourself if you can help it. It's very hard to +read, partly because it's automatically generated, but also because it is +engineered to work with an extremely broad range of shells, compilers, tools, +and operating systems. If you're going to do a "deep dive," try looking at +`configure.ac` instead. + + +### Source and Build trees + +Where should you run `configure`? + +Two directories matter when building libpqxx: the _source tree_ (where the +libpqxx source code is) and the _build tree_ (where you want your build +artefacts). Here I will call them `$SRC` and `$BUILD`, but you can call them +anything you like. + +They can be one and the same, if you like. It's convenient, but less clean, as +source code and build artefacts will exist in the same directory tree. If +you're going to delete the source tree after installing, of course it's fine to +make a mess in there. + + +Compile +------- + +To start the compile, run the `make` tool. It will go through all the steps to +produce a libpqxx library binary. + +Beware though, it only runs _one_ compiler process at a time. That could take +a while. Use the `-j` option to make it run concurrent processes, e.g.: + +```shell + make -j8 +``` + +Very roughly speaking, it's probably fastest if you run one process per CPU +core in your system. If you have the `nproc` utility installed: + +```shell + make -j$(nproc) +``` + +If you want a very fast build and don't mind missing out on efficient code or +documentation, tweak the Configure step above by adding `configure` options +like `CXXFLAGS=-O0` and `--disable-documentation`. + + +Test +---- + +Of course libpqxx comes with a test suite, to check that the library is +functioning correctly. + +You can run it, but there's one caveat: you need to give it a database where it +can log in, without a password or any other parameters, and try out various +things. + +And when I say you need to "give" it a database, I really mean "give." The +test suite will create and drop tables. Those will all have names prefixed +with "pqxx", so it's probably safe to use a database you already had, but if +any of the items in your database happen to have names starting with `pqxx`, +tough luck. They're fair game. + +Enter this in your shell to build and run the tests: + +```shell + make check +``` + +As with compiling, use the `-j` option to make better use of your CPUs. For +example: + +```shell + make check -j$(nproc) +``` + + +### Configuring the test database + +But what if you do need a password to log into your test database? Or, what if +it's running on a different system so you need to pass that machine's address? +What if it's not running on the default port? + +You can set these parameters for the test suite, or for any other libpq-based +application, using the following environment variables. (They only set default +values, so they won't override parameters that the application sets in some +other way.) +* `PGHOST` — the IP address where we can contact the database's socket. Or + for a Unix domain socket, its absolute path on the filesystem. +* `PGPORT` — +* `PGDATABASE` — the name of the database to which you wish to connect. +* `PGUSER` — user name under which you wish to log in on the database. +* `PGPASSWORD` — user name's password for accessing the database. + +See the full list at https://www.postgresql.org/docs/current/libpq-envars.html + +**Be careful with passwords,** by the way. Depending on your operating system +and configuration, an attacker with access to your machine could try to read +your password if you set it on the command line: +* Your shell may keep a log of the commands you have entered. +* Environment variables may be visible to other users on the system. + +If at all possible, rely on postgres "peer authentication." Once set up, it is +both more secure and more convenient than passwords. + + +Install +------- + +Installing libpqxx will install the library and headers in a location chosen at +the time you can the `configure` script. On some systems it defaults to the +`/usr/local/` tree, but it may be different in your environment. Or, use the +`configure` script's `--prefix` option to set an install location. + +(If you want to see exactly what happens, you can run any `make` command line +with the `-n` option, which means: don't actually do this, but print all the +commands you would execute if you did. It's a lot of output though.) + +To install, ensure that you have sufficient privileges to write the files to +their install locations, and run: + +```shell + make install +``` + +Save your build tree somewhere, so that you will be able to undo installation +in the future: + +```shell + make uninstall +``` + +When using the library, make sure the libpqxx headers are in your compiler's +include path. (You will no longer need the libpq headers at that time.) + +Also, building an application which uses libpqxx, make sure the libpqxx library +binary is in your compiler's library search path. And if the library binary is +a shared library, you'll also need it in your loader's search path when running +your application. + +This last part goes for libpq as well: when using libpq, make sure you have +the libpq library binary in your compiler's library search path, and if it's a +shared library, also have it in your loader's search path when running. diff --git a/ext/libpqxx-7.7.3/CMakeLists.txt b/ext/libpqxx-7.7.3/CMakeLists.txt new file mode 100644 index 000000000..55b3c4d44 --- /dev/null +++ b/ext/libpqxx-7.7.3/CMakeLists.txt @@ -0,0 +1,66 @@ +cmake_minimum_required(VERSION 3.8) + +file(READ VERSION VER_FILE_CONTENT) +string(STRIP ${VER_FILE_CONTENT} VER_FILE_CONTENT) + +project( + libpqxx + VERSION ${VER_FILE_CONTENT} + LANGUAGES CXX +) + +if(NOT "${CMAKE_CXX_STANDARD}") + set(CMAKE_CXX_STANDARD 17) +endif() +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) +set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake) + +option(BUILD_DOC "Build documentation" OFF) + +if(NOT SKIP_BUILD_TEST) + option(BUILD_TEST "Build all test cases" ON) +endif() + +include(GNUInstallDirs) +include(CMakePackageConfigHelpers) +include(config) + +add_subdirectory(src) +add_subdirectory(include) +if(BUILD_DOC) + add_subdirectory(doc) +endif() +if(BUILD_TEST) + add_subdirectory(test) +endif() + +# installation +write_basic_package_version_file( + "${CMAKE_CURRENT_BINARY_DIR}/libpqxx-config-version.cmake" + VERSION ${PROJECT_VERSION} + COMPATIBILITY SameMajorVersion +) +install(FILES cmake/libpqxx-config.cmake + "${CMAKE_CURRENT_BINARY_DIR}/libpqxx-config-version.cmake" + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libpqxx +) +install( + EXPORT libpqxx-targets + NAMESPACE libpqxx:: + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libpqxx +) +# Build tree export +export( + EXPORT libpqxx-targets + NAMESPACE libpqxx:: + FILE ${CMAKE_CURRENT_BINARY_DIR}/libpqxx-targets.cmake +) +configure_file( + cmake/libpqxx-config.cmake ${CMAKE_CURRENT_BINARY_DIR}/libpqxx-config.cmake + COPYONLY +) +# Package generation +set(CPACK_GENERATOR TGZ) +set(CPACK_PACKAGE_VERSION ${PROJECT_VERSION}) +include(CPack) diff --git a/ext/libpqxx-7.7.3/COPYING b/ext/libpqxx-7.7.3/COPYING new file mode 100644 index 000000000..8a566aabe --- /dev/null +++ b/ext/libpqxx-7.7.3/COPYING @@ -0,0 +1,27 @@ +Copyright (c) 2000-2022 Jeroen T. Vermeulen. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the author, nor the names of other contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/ext/libpqxx-7.7.3/Makefile.am b/ext/libpqxx-7.7.3/Makefile.am new file mode 100644 index 000000000..cd93a57ef --- /dev/null +++ b/ext/libpqxx-7.7.3/Makefile.am @@ -0,0 +1,23 @@ +SUBDIRS = include src test tools config doc +EXTRA_DIST = autogen.sh configitems README.md VERSION requirements.json + +MAINTAINERCLEANFILES = \ + Makefile.in aclocal.m4 config.h.in config.log configure stamp-h.in + +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libpqxx.pc + +TESTS = tools/lint + + +# Generate ChangeLog from git history. It goes all the way back through +# the project's git, bzr, svn, and cvs days. +dist-hook: ChangeLog + +ChangeLog: configure.ac + git log --stat --name-only --date=short --abbrev-commit >$@ + + +# We use README.md, but automake expects plain README. +README: README.md + ln -s $< $@ diff --git a/ext/libpqxx-7.7.3/Makefile.in b/ext/libpqxx-7.7.3/Makefile.in new file mode 100644 index 000000000..f6798aa75 --- /dev/null +++ b/ext/libpqxx-7.7.3/Makefile.in @@ -0,0 +1,1253 @@ +# Makefile.in generated by automake 1.16.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2021 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = . +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/config/m4/libtool.m4 \ + $(top_srcdir)/config/m4/ltoptions.m4 \ + $(top_srcdir)/config/m4/ltsugar.m4 \ + $(top_srcdir)/config/m4/ltversion.m4 \ + $(top_srcdir)/config/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \ + $(am__configure_deps) $(am__DIST_COMMON) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno config.status.lineno +mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs +CONFIG_HEADER = $(top_builddir)/include/pqxx/config.h +CONFIG_CLEAN_FILES = libpqxx.pc compile_flags +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(pkgconfigdir)" +DATA = $(pkgconfig_DATA) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + cscope check recheck distdir distdir-am dist dist-all \ + distcheck +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +am__tty_colors_dummy = \ + mgn= red= grn= lgn= blu= brg= std=; \ + am__color_tests=no +am__tty_colors = { \ + $(am__tty_colors_dummy); \ + if test "X$(AM_COLOR_TESTS)" = Xno; then \ + am__color_tests=no; \ + elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ + am__color_tests=yes; \ + elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ + am__color_tests=yes; \ + fi; \ + if test $$am__color_tests = yes; then \ + red=''; \ + grn=''; \ + lgn=''; \ + blu=''; \ + mgn=''; \ + brg=''; \ + std=''; \ + fi; \ +} +am__recheck_rx = ^[ ]*:recheck:[ ]* +am__global_test_result_rx = ^[ ]*:global-test-result:[ ]* +am__copy_in_global_log_rx = ^[ ]*:copy-in-global-log:[ ]* +# A command that, given a newline-separated list of test names on the +# standard input, print the name of the tests that are to be re-run +# upon "make recheck". +am__list_recheck_tests = $(AWK) '{ \ + recheck = 1; \ + while ((rc = (getline line < ($$0 ".trs"))) != 0) \ + { \ + if (rc < 0) \ + { \ + if ((getline line2 < ($$0 ".log")) < 0) \ + recheck = 0; \ + break; \ + } \ + else if (line ~ /$(am__recheck_rx)[nN][Oo]/) \ + { \ + recheck = 0; \ + break; \ + } \ + else if (line ~ /$(am__recheck_rx)[yY][eE][sS]/) \ + { \ + break; \ + } \ + }; \ + if (recheck) \ + print $$0; \ + close ($$0 ".trs"); \ + close ($$0 ".log"); \ +}' +# A command that, given a newline-separated list of test names on the +# standard input, create the global log from their .trs and .log files. +am__create_global_log = $(AWK) ' \ +function fatal(msg) \ +{ \ + print "fatal: making $@: " msg | "cat >&2"; \ + exit 1; \ +} \ +function rst_section(header) \ +{ \ + print header; \ + len = length(header); \ + for (i = 1; i <= len; i = i + 1) \ + printf "="; \ + printf "\n\n"; \ +} \ +{ \ + copy_in_global_log = 1; \ + global_test_result = "RUN"; \ + while ((rc = (getline line < ($$0 ".trs"))) != 0) \ + { \ + if (rc < 0) \ + fatal("failed to read from " $$0 ".trs"); \ + if (line ~ /$(am__global_test_result_rx)/) \ + { \ + sub("$(am__global_test_result_rx)", "", line); \ + sub("[ ]*$$", "", line); \ + global_test_result = line; \ + } \ + else if (line ~ /$(am__copy_in_global_log_rx)[nN][oO]/) \ + copy_in_global_log = 0; \ + }; \ + if (copy_in_global_log) \ + { \ + rst_section(global_test_result ": " $$0); \ + while ((rc = (getline line < ($$0 ".log"))) != 0) \ + { \ + if (rc < 0) \ + fatal("failed to read from " $$0 ".log"); \ + print line; \ + }; \ + printf "\n"; \ + }; \ + close ($$0 ".trs"); \ + close ($$0 ".log"); \ +}' +# Restructured Text title. +am__rst_title = { sed 's/.*/ & /;h;s/./=/g;p;x;s/ *$$//;p;g' && echo; } +# Solaris 10 'make', and several other traditional 'make' implementations, +# pass "-e" to $(SHELL), and POSIX 2008 even requires this. Work around it +# by disabling -e (using the XSI extension "set +e") if it's set. +am__sh_e_setup = case $$- in *e*) set +e;; esac +# Default flags passed to test drivers. +am__common_driver_flags = \ + --color-tests "$$am__color_tests" \ + --enable-hard-errors "$$am__enable_hard_errors" \ + --expect-failure "$$am__expect_failure" +# To be inserted before the command running the test. Creates the +# directory for the log if needed. Stores in $dir the directory +# containing $f, in $tst the test, in $log the log. Executes the +# developer- defined test setup AM_TESTS_ENVIRONMENT (if any), and +# passes TESTS_ENVIRONMENT. Set up options for the wrapper that +# will run the test scripts (or their associated LOG_COMPILER, if +# thy have one). +am__check_pre = \ +$(am__sh_e_setup); \ +$(am__vpath_adj_setup) $(am__vpath_adj) \ +$(am__tty_colors); \ +srcdir=$(srcdir); export srcdir; \ +case "$@" in \ + */*) am__odir=`echo "./$@" | sed 's|/[^/]*$$||'`;; \ + *) am__odir=.;; \ +esac; \ +test "x$$am__odir" = x"." || test -d "$$am__odir" \ + || $(MKDIR_P) "$$am__odir" || exit $$?; \ +if test -f "./$$f"; then dir=./; \ +elif test -f "$$f"; then dir=; \ +else dir="$(srcdir)/"; fi; \ +tst=$$dir$$f; log='$@'; \ +if test -n '$(DISABLE_HARD_ERRORS)'; then \ + am__enable_hard_errors=no; \ +else \ + am__enable_hard_errors=yes; \ +fi; \ +case " $(XFAIL_TESTS) " in \ + *[\ \ ]$$f[\ \ ]* | *[\ \ ]$$dir$$f[\ \ ]*) \ + am__expect_failure=yes;; \ + *) \ + am__expect_failure=no;; \ +esac; \ +$(AM_TESTS_ENVIRONMENT) $(TESTS_ENVIRONMENT) +# A shell command to get the names of the tests scripts with any registered +# extension removed (i.e., equivalently, the names of the test logs, with +# the '.log' extension removed). The result is saved in the shell variable +# '$bases'. This honors runtime overriding of TESTS and TEST_LOGS. Sadly, +# we cannot use something simpler, involving e.g., "$(TEST_LOGS:.log=)", +# since that might cause problem with VPATH rewrites for suffix-less tests. +# See also 'test-harness-vpath-rewrite.sh' and 'test-trs-basic.sh'. +am__set_TESTS_bases = \ + bases='$(TEST_LOGS)'; \ + bases=`for i in $$bases; do echo $$i; done | sed 's/\.log$$//'`; \ + bases=`echo $$bases` +AM_TESTSUITE_SUMMARY_HEADER = ' for $(PACKAGE_STRING)' +RECHECK_LOGS = $(TEST_LOGS) +TEST_SUITE_LOG = test-suite.log +TEST_EXTENSIONS = @EXEEXT@ .test +LOG_DRIVER = $(SHELL) $(top_srcdir)/config/test-driver +LOG_COMPILE = $(LOG_COMPILER) $(AM_LOG_FLAGS) $(LOG_FLAGS) +am__set_b = \ + case '$@' in \ + */*) \ + case '$*' in \ + */*) b='$*';; \ + *) b=`echo '$@' | sed 's/\.log$$//'`; \ + esac;; \ + *) \ + b='$*';; \ + esac +am__test_logs1 = $(TESTS:=.log) +am__test_logs2 = $(am__test_logs1:@EXEEXT@.log=.log) +TEST_LOGS = $(am__test_logs2:.test.log=.log) +TEST_LOG_DRIVER = $(SHELL) $(top_srcdir)/config/test-driver +TEST_LOG_COMPILE = $(TEST_LOG_COMPILER) $(AM_TEST_LOG_FLAGS) \ + $(TEST_LOG_FLAGS) +DIST_SUBDIRS = $(SUBDIRS) +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/compile_flags.in \ + $(srcdir)/libpqxx.pc.in $(top_srcdir)/config/compile \ + $(top_srcdir)/config/config.guess \ + $(top_srcdir)/config/config.sub \ + $(top_srcdir)/config/install-sh $(top_srcdir)/config/ltmain.sh \ + $(top_srcdir)/config/missing \ + $(top_srcdir)/config/mkinstalldirs \ + $(top_srcdir)/config/test-driver AUTHORS COPYING ChangeLog \ + INSTALL NEWS README README.md +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +distdir = $(PACKAGE)-$(VERSION) +top_distdir = $(distdir) +am__remove_distdir = \ + if test -d "$(distdir)"; then \ + find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -rf "$(distdir)" \ + || { sleep 5 && rm -rf "$(distdir)"; }; \ + else :; fi +am__post_remove_distdir = $(am__remove_distdir) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +DIST_ARCHIVES = $(distdir).tar.gz +GZIP_ENV = --best +DIST_TARGETS = dist-gzip +# Exists only to be overridden by the user if desired. +AM_DISTCHECK_DVI_TARGET = dvi +distuninstallcheck_listfiles = find . -type f -print +am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ + | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' +distcleancheck_listfiles = find . -type f -print +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ETAGS = @ETAGS@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_DOT = @HAVE_DOT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR = @MKDIR@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PG_CONFIG = @PG_CONFIG@ +PKG_CONFIG = @PKG_CONFIG@ +POSTGRES_INCLUDE = @POSTGRES_INCLUDE@ +PQXXVERSION = @PQXXVERSION@ +PQXX_ABI = @PQXX_ABI@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +with_postgres_lib = @with_postgres_lib@ +SUBDIRS = include src test tools config doc +EXTRA_DIST = autogen.sh configitems README.md VERSION requirements.json +MAINTAINERCLEANFILES = \ + Makefile.in aclocal.m4 config.h.in config.log configure stamp-h.in + +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = libpqxx.pc +TESTS = tools/lint +all: all-recursive + +.SUFFIXES: +.SUFFIXES: .log .test .test$(EXEEXT) .trs +am--refresh: Makefile + @: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --gnu'; \ + $(am__cd) $(srcdir) && $(AUTOMAKE) --gnu \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + $(am__cd) $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) +$(am__aclocal_m4_deps): +libpqxx.pc: $(top_builddir)/config.status $(srcdir)/libpqxx.pc.in + cd $(top_builddir) && $(SHELL) ./config.status $@ +compile_flags: $(top_builddir)/config.status $(srcdir)/compile_flags.in + cd $(top_builddir) && $(SHELL) ./config.status $@ + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +distclean-libtool: + -rm -f libtool config.lt +install-pkgconfigDATA: $(pkgconfig_DATA) + @$(NORMAL_INSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(pkgconfigdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(pkgconfigdir)" || exit 1; \ + fi; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(pkgconfigdir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(pkgconfigdir)" || exit $$?; \ + done + +uninstall-pkgconfigDATA: + @$(NORMAL_UNINSTALL) + @list='$(pkgconfig_DATA)'; test -n "$(pkgconfigdir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pkgconfigdir)'; $(am__uninstall_files_from_dir) + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscope: cscope.files + test ! -s cscope.files \ + || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS) +clean-cscope: + -rm -f cscope.files +cscope.files: clean-cscope cscopelist +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + -rm -f cscope.out cscope.in.out cscope.po.out cscope.files + +# Recover from deleted '.trs' file; this should ensure that +# "rm -f foo.log; make foo.trs" re-run 'foo.test', and re-create +# both 'foo.log' and 'foo.trs'. Break the recipe in two subshells +# to avoid problems with "make -n". +.log.trs: + rm -f $< $@ + $(MAKE) $(AM_MAKEFLAGS) $< + +# Leading 'am--fnord' is there to ensure the list of targets does not +# expand to empty, as could happen e.g. with make check TESTS=''. +am--fnord $(TEST_LOGS) $(TEST_LOGS:.log=.trs): $(am__force_recheck) +am--force-recheck: + @: + +$(TEST_SUITE_LOG): $(TEST_LOGS) + @$(am__set_TESTS_bases); \ + am__f_ok () { test -f "$$1" && test -r "$$1"; }; \ + redo_bases=`for i in $$bases; do \ + am__f_ok $$i.trs && am__f_ok $$i.log || echo $$i; \ + done`; \ + if test -n "$$redo_bases"; then \ + redo_logs=`for i in $$redo_bases; do echo $$i.log; done`; \ + redo_results=`for i in $$redo_bases; do echo $$i.trs; done`; \ + if $(am__make_dryrun); then :; else \ + rm -f $$redo_logs && rm -f $$redo_results || exit 1; \ + fi; \ + fi; \ + if test -n "$$am__remaking_logs"; then \ + echo "fatal: making $(TEST_SUITE_LOG): possible infinite" \ + "recursion detected" >&2; \ + elif test -n "$$redo_logs"; then \ + am__remaking_logs=yes $(MAKE) $(AM_MAKEFLAGS) $$redo_logs; \ + fi; \ + if $(am__make_dryrun); then :; else \ + st=0; \ + errmsg="fatal: making $(TEST_SUITE_LOG): failed to create"; \ + for i in $$redo_bases; do \ + test -f $$i.trs && test -r $$i.trs \ + || { echo "$$errmsg $$i.trs" >&2; st=1; }; \ + test -f $$i.log && test -r $$i.log \ + || { echo "$$errmsg $$i.log" >&2; st=1; }; \ + done; \ + test $$st -eq 0 || exit 1; \ + fi + @$(am__sh_e_setup); $(am__tty_colors); $(am__set_TESTS_bases); \ + ws='[ ]'; \ + results=`for b in $$bases; do echo $$b.trs; done`; \ + test -n "$$results" || results=/dev/null; \ + all=` grep "^$$ws*:test-result:" $$results | wc -l`; \ + pass=` grep "^$$ws*:test-result:$$ws*PASS" $$results | wc -l`; \ + fail=` grep "^$$ws*:test-result:$$ws*FAIL" $$results | wc -l`; \ + skip=` grep "^$$ws*:test-result:$$ws*SKIP" $$results | wc -l`; \ + xfail=`grep "^$$ws*:test-result:$$ws*XFAIL" $$results | wc -l`; \ + xpass=`grep "^$$ws*:test-result:$$ws*XPASS" $$results | wc -l`; \ + error=`grep "^$$ws*:test-result:$$ws*ERROR" $$results | wc -l`; \ + if test `expr $$fail + $$xpass + $$error` -eq 0; then \ + success=true; \ + else \ + success=false; \ + fi; \ + br='==================='; br=$$br$$br$$br$$br; \ + result_count () \ + { \ + if test x"$$1" = x"--maybe-color"; then \ + maybe_colorize=yes; \ + elif test x"$$1" = x"--no-color"; then \ + maybe_colorize=no; \ + else \ + echo "$@: invalid 'result_count' usage" >&2; exit 4; \ + fi; \ + shift; \ + desc=$$1 count=$$2; \ + if test $$maybe_colorize = yes && test $$count -gt 0; then \ + color_start=$$3 color_end=$$std; \ + else \ + color_start= color_end=; \ + fi; \ + echo "$${color_start}# $$desc $$count$${color_end}"; \ + }; \ + create_testsuite_report () \ + { \ + result_count $$1 "TOTAL:" $$all "$$brg"; \ + result_count $$1 "PASS: " $$pass "$$grn"; \ + result_count $$1 "SKIP: " $$skip "$$blu"; \ + result_count $$1 "XFAIL:" $$xfail "$$lgn"; \ + result_count $$1 "FAIL: " $$fail "$$red"; \ + result_count $$1 "XPASS:" $$xpass "$$red"; \ + result_count $$1 "ERROR:" $$error "$$mgn"; \ + }; \ + { \ + echo "$(PACKAGE_STRING): $(subdir)/$(TEST_SUITE_LOG)" | \ + $(am__rst_title); \ + create_testsuite_report --no-color; \ + echo; \ + echo ".. contents:: :depth: 2"; \ + echo; \ + for b in $$bases; do echo $$b; done \ + | $(am__create_global_log); \ + } >$(TEST_SUITE_LOG).tmp || exit 1; \ + mv $(TEST_SUITE_LOG).tmp $(TEST_SUITE_LOG); \ + if $$success; then \ + col="$$grn"; \ + else \ + col="$$red"; \ + test x"$$VERBOSE" = x || cat $(TEST_SUITE_LOG); \ + fi; \ + echo "$${col}$$br$${std}"; \ + echo "$${col}Testsuite summary"$(AM_TESTSUITE_SUMMARY_HEADER)"$${std}"; \ + echo "$${col}$$br$${std}"; \ + create_testsuite_report --maybe-color; \ + echo "$$col$$br$$std"; \ + if $$success; then :; else \ + echo "$${col}See $(subdir)/$(TEST_SUITE_LOG)$${std}"; \ + if test -n "$(PACKAGE_BUGREPORT)"; then \ + echo "$${col}Please report to $(PACKAGE_BUGREPORT)$${std}"; \ + fi; \ + echo "$$col$$br$$std"; \ + fi; \ + $$success || exit 1 + +check-TESTS: + @list='$(RECHECK_LOGS)'; test -z "$$list" || rm -f $$list + @list='$(RECHECK_LOGS:.log=.trs)'; test -z "$$list" || rm -f $$list + @test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) + @set +e; $(am__set_TESTS_bases); \ + log_list=`for i in $$bases; do echo $$i.log; done`; \ + trs_list=`for i in $$bases; do echo $$i.trs; done`; \ + log_list=`echo $$log_list`; trs_list=`echo $$trs_list`; \ + $(MAKE) $(AM_MAKEFLAGS) $(TEST_SUITE_LOG) TEST_LOGS="$$log_list"; \ + exit $$?; +recheck: all + @test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) + @set +e; $(am__set_TESTS_bases); \ + bases=`for i in $$bases; do echo $$i; done \ + | $(am__list_recheck_tests)` || exit 1; \ + log_list=`for i in $$bases; do echo $$i.log; done`; \ + log_list=`echo $$log_list`; \ + $(MAKE) $(AM_MAKEFLAGS) $(TEST_SUITE_LOG) \ + am__force_recheck=am--force-recheck \ + TEST_LOGS="$$log_list"; \ + exit $$? +tools/lint.log: tools/lint + @p='tools/lint'; \ + b='tools/lint'; \ + $(am__check_pre) $(LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_LOG_DRIVER_FLAGS) $(LOG_DRIVER_FLAGS) -- $(LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +.test.log: + @p='$<'; \ + $(am__set_b); \ + $(am__check_pre) $(TEST_LOG_DRIVER) --test-name "$$f" \ + --log-file $$b.log --trs-file $$b.trs \ + $(am__common_driver_flags) $(AM_TEST_LOG_DRIVER_FLAGS) $(TEST_LOG_DRIVER_FLAGS) -- $(TEST_LOG_COMPILE) \ + "$$tst" $(AM_TESTS_FD_REDIRECT) +@am__EXEEXT_TRUE@.test$(EXEEXT).log: +@am__EXEEXT_TRUE@ @p='$<'; \ +@am__EXEEXT_TRUE@ $(am__set_b); \ +@am__EXEEXT_TRUE@ $(am__check_pre) $(TEST_LOG_DRIVER) --test-name "$$f" \ +@am__EXEEXT_TRUE@ --log-file $$b.log --trs-file $$b.trs \ +@am__EXEEXT_TRUE@ $(am__common_driver_flags) $(AM_TEST_LOG_DRIVER_FLAGS) $(TEST_LOG_DRIVER_FLAGS) -- $(TEST_LOG_COMPILE) \ +@am__EXEEXT_TRUE@ "$$tst" $(AM_TESTS_FD_REDIRECT) +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + $(am__remove_distdir) + test -d "$(distdir)" || mkdir "$(distdir)" + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$(top_distdir)" distdir="$(distdir)" \ + dist-hook + -test -n "$(am__skip_mode_fix)" \ + || find "$(distdir)" -type d ! -perm -755 \ + -exec chmod u+rwx,go+rx {} \; -o \ + ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r "$(distdir)" +dist-gzip: distdir + tardir=$(distdir) && $(am__tar) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).tar.gz + $(am__post_remove_distdir) + +dist-bzip2: distdir + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + $(am__post_remove_distdir) + +dist-lzip: distdir + tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz + $(am__post_remove_distdir) + +dist-xz: distdir + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz + $(am__post_remove_distdir) + +dist-zstd: distdir + tardir=$(distdir) && $(am__tar) | zstd -c $${ZSTD_CLEVEL-$${ZSTD_OPT--19}} >$(distdir).tar.zst + $(am__post_remove_distdir) + +dist-tarZ: distdir + @echo WARNING: "Support for distribution archives compressed with" \ + "legacy program 'compress' is deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z + $(am__post_remove_distdir) + +dist-shar: distdir + @echo WARNING: "Support for shar distribution archives is" \ + "deprecated." >&2 + @echo WARNING: "It will be removed altogether in Automake 2.0" >&2 + shar $(distdir) | eval GZIP= gzip $(GZIP_ENV) -c >$(distdir).shar.gz + $(am__post_remove_distdir) + +dist-zip: distdir + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__post_remove_distdir) + +dist dist-all: + $(MAKE) $(AM_MAKEFLAGS) $(DIST_TARGETS) am__post_remove_distdir='@:' + $(am__post_remove_distdir) + +# This target untars the dist file and tries a VPATH configuration. Then +# it guarantees that the distribution is self-contained by making another +# tarfile. +distcheck: dist + case '$(DIST_ARCHIVES)' in \ + *.tar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).tar.gz | $(am__untar) ;;\ + *.tar.bz2*) \ + bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.lz*) \ + lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ + *.tar.xz*) \ + xz -dc $(distdir).tar.xz | $(am__untar) ;;\ + *.tar.Z*) \ + uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ + *.shar.gz*) \ + eval GZIP= gzip $(GZIP_ENV) -dc $(distdir).shar.gz | unshar ;;\ + *.zip*) \ + unzip $(distdir).zip ;;\ + *.tar.zst*) \ + zstd -dc $(distdir).tar.zst | $(am__untar) ;;\ + esac + chmod -R a-w $(distdir) + chmod u+w $(distdir) + mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst + chmod a-w $(distdir) + test -d $(distdir)/_build || exit 0; \ + dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ + && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ + && am__cwd=`pwd` \ + && $(am__cd) $(distdir)/_build/sub \ + && ../../configure \ + $(AM_DISTCHECK_CONFIGURE_FLAGS) \ + $(DISTCHECK_CONFIGURE_FLAGS) \ + --srcdir=../.. --prefix="$$dc_install_base" \ + && $(MAKE) $(AM_MAKEFLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) $(AM_DISTCHECK_DVI_TARGET) \ + && $(MAKE) $(AM_MAKEFLAGS) check \ + && $(MAKE) $(AM_MAKEFLAGS) install \ + && $(MAKE) $(AM_MAKEFLAGS) installcheck \ + && $(MAKE) $(AM_MAKEFLAGS) uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ + (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ + && $(MAKE) $(AM_MAKEFLAGS) dist \ + && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ + && cd "$$am__cwd" \ + || exit 1 + $(am__post_remove_distdir) + @(echo "$(distdir) archives ready for distribution: "; \ + list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ + sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' +distuninstallcheck: + @test -n '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: trying to run $@ with an empty' \ + '$$(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + $(am__cd) '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left after uninstall:" ; \ + if test -n "$(DESTDIR)"; then \ + echo " (check DESTDIR support)"; \ + fi ; \ + $(distuninstallcheck_listfiles) ; \ + exit 1; } >&2 +distcleancheck: distclean + @if test '$(srcdir)' = . ; then \ + echo "ERROR: distcleancheck can only run from a VPATH build" ; \ + exit 1 ; \ + fi + @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left in build directory after distclean:" ; \ + $(distcleancheck_listfiles) ; \ + exit 1; } >&2 +check-am: all-am + $(MAKE) $(AM_MAKEFLAGS) check-TESTS +check: check-recursive +all-am: Makefile $(DATA) +installdirs: installdirs-recursive +installdirs-am: + for dir in "$(DESTDIR)$(pkgconfigdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + -test -z "$(TEST_LOGS)" || rm -f $(TEST_LOGS) + -test -z "$(TEST_LOGS:.log=.trs)" || rm -f $(TEST_LOGS:.log=.trs) + -test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG) + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-recursive + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-libtool \ + distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: install-pkgconfigDATA + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: uninstall-pkgconfigDATA + +.MAKE: $(am__recursive_targets) check-am install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am \ + am--refresh check check-TESTS check-am clean clean-cscope \ + clean-generic clean-libtool cscope cscopelist-am ctags \ + ctags-am dist dist-all dist-bzip2 dist-gzip dist-hook \ + dist-lzip dist-shar dist-tarZ dist-xz dist-zip dist-zstd \ + distcheck distclean distclean-generic distclean-libtool \ + distclean-tags distcleancheck distdir distuninstallcheck dvi \ + dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-pkgconfigDATA install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + installdirs-am maintainer-clean maintainer-clean-generic \ + mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \ + ps ps-am recheck tags tags-am uninstall uninstall-am \ + uninstall-pkgconfigDATA + +.PRECIOUS: Makefile + + +# Generate ChangeLog from git history. It goes all the way back through +# the project's git, bzr, svn, and cvs days. +dist-hook: ChangeLog + +ChangeLog: configure.ac + git log --stat --name-only --date=short --abbrev-commit >$@ + +# We use README.md, but automake expects plain README. +README: README.md + ln -s $< $@ + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/ext/libpqxx-7.7.3/NEWS b/ext/libpqxx-7.7.3/NEWS new file mode 100644 index 000000000..102716c2e --- /dev/null +++ b/ext/libpqxx-7.7.3/NEWS @@ -0,0 +1,1040 @@ +7.7.3 + - Fix up more damage done by auto-formatting. + - New `result::for_each()`: simple iteration and conversion of rows. (#528) + - Add some missing headers in ``. (#551) + - More strictness in `header-pre.hxx`/`header-post.hxx` checking. + - Disallow nesting of `ignore-deprecated` blocks. + - Deprecate `exec` functions' `desc` parameter. + - Fix `placeholders` documentation. (#557) + - Strip `const` and references from `value_type`. (#558) + - Get tests running on appveyor. (#560) + - Fix broken nonblocking connection on Windows. (#560) +7.7.2 + - Fix up damage done by auto-formatting. +7.7.1 + - When you build libpqxx, configure the compiler's C++ version yourself! + - Finally fix a long-standing silly warning in autogen. + - Fix `digit_to_number` not being found on some comilers. (#518, #519) + - In audit mode, define `_FORTIFY_SOURCE` to enable some extra checks. + - Make more functions `constexpr`. Nothing particularly useful though. + - Make more functions `noexcept`. + - Move constructor & assignment for `result`. + - Support LGTM.com and Facebook "infer" static analysis. + - Deprecated `set_variable`/`get_variable` on `transaction_base`. + - (Design unearthed warts in SQL variables, which were then fixed.) + - Set/get session variables on `connection`: `set_session_var`/`get_var`. + - Set/get local variables: execute SQL statements. + - When using `select()`, include `` if available. +7.7.0 + - Fix `stream_to` for differing table/client encodings. (#473) + - Use `[[likely]]` & `[[unlikely]]` only in C++20, to silence warnings. + - Fix clang "not a const expression" error in Windows. (#472) + - Fix warnings about `[[likely]]` in `if constexpr`. (#475) + - Clearer error for ambiguous string conversion of `char` type. (#481) + - Pseudo-statement in `prepare()` error was for the wrong statement. (#488) + - New class, `connecting` for nonblocking connection to the database. (#487) + - New class, `range` for SQL range types. (#490) + - Replace `std::isdigit` with a safer alternative. + - Support string conversions for `std::chrono::year_month_day`. (#492) + - Helper for implementing string traits: `generic_to_buf`. + - Support `result::at(row_num, col_num)`. + - Support `result[row_num, col_num]` if the compiler allows it. + - Work around broken `std::filesystem::path` in MinGW. (#498) + - Fix leak when getting client encoding fails. (#500) + - Use `std::cmp_greater` etc. when available. Saves some ugly casts. + - Move `result_iterator.hxx` into `pqxx/include/internal/`. + - Move `compiler-public.hxx` into `pqxx/include/internal/`. + - Add script for updating copyright strings. + - Make `tools/lint` figure out source directory by itself. + - Pass the actual C++ version to `tools/lint`, not the baseline one. + - Re-enable pyflakes testing in `tools/lint`. + - Make more functions `[[nodiscard]]`. + - Qualified some member functions as lvalue or rvalue. + - Don't run clang-tidy by default. Compatibility issues with gcc options. + - Describe version requirements in a JSON file, `requirements.json`. + - Doxygen documentation now uses the (Doxygen-extended) Markdown format. + - Build docs in `doc/html/`, no longer in `doc/html/Reference/`. + - Disable some `std::filesystem` features on Windows. + - Shut up stupid Visual Studio warnings. + - On gcc, mark rarely-used functions as "cold," to be optimised for size. + - Glyph scanning for GB18030 encoding was utterly broken. (#517) +7.6.0 + - Removed bad string conversion to `std::basic_string_view`. (#463) + - Add C++20 concepts: `binary`, `char_string`, `char_strings`. + - Generalise binary strings to any contiguous range of `std::byte`. + - Mark `zview` as a view and as a borrowed range. + - Save a copy step on string fields in `stream_to`. + - New helper: `pqxx::value_type`. + - New helper: `pqxx::binary_cast`. (#450) + - Some escaping functions now have "into my buffer" variants. + - More generic escaping functions, supporting more types of binary. + - In C++20, accept generic columns list for `stream_to`. (#447) + - Renamed `` to ``. + - Deprecated `dynamic_params` in favour of `params`. + - `pqxx::params::append_multi()` now calls `reserve()` if possible. + - `pqxx::params::size()` is now `noexcept` (but sadly, `ssize()` is not). + - Helper for generating parameter placeholders `$1`, `$2`, etc. (#443) + - Now requires support for C++14 `[[deprecated]]` attribute. + - Deprecated `unesc_raw()` with `unesc_bin()` variants. + - Once `unesc_raw()` is gone, we'll support only the hex escape format. + - Work around broken `thread_local` in MinGW gcc < 11.1. + - `pqxx::blob` now supports `std::filesystem::path`. + - Fixed check against header/lib version mismatch: `check_pqxx_version_7_6` + - Deprecated result slicing. Expect `row::slice()` to disappear. + - More complete documentation, of cursors in particular. +7.5.2 + - **Actual serious bug.** `blob::read(std::vector<...>)` was broken. +7.5.1 + - Fixed some Visual Studio warnings. + - Missed a bit in working around MinGW's broken ``. + - Deprecated more obsolete representations of binary data. Use `std::byte`. + - New script `tools/deprecations` lists files that use deprecated code. + - Added automake-generated `config/compile` to revision control. +7.5.0 + - Now requires `std::variant` support! No longer works with gcc7. + - When implementing a string conversion, consider specialising `param_format`. + - Stop "aborting" nontransaction on closing. (#419) + - Silence an overzealous Visual C++ warning. (#418) + - Starting support for C++20 `std::span`. + - New `blob::read()` using `std::span`. (#429) + - New `params` class lets you build parameter lists incrementally. (#387) + - Pass `std::vector` params in binary format. + - Dropped legacy tests 31 and 49 to work around clang C++20 bug. + - Fixed `stream_to` test failure in non-English locales. (#440) + - Clarify `transaction_base::stream` documentation. (#423) + - Avoid `` on MinGW; it's broken there. (#336, #398, #424, #441) +7.4.1 + - Missing includes; broke macOS clang build. (#416) +7.4.0 + - Work around Visual Studio 2017 bug with `[[deprecated]]`. (#405, #406) + - Work around eternal Windows bug with `max` macro yet again. (#101) + - Prepare for C++20 `std::ssize()`. + - Dropped test12, which didn't add much and tried to read null strings. + - Support string conversion for `std::monostate`. (#409) + - Consistent "named constructors" for `stream_to` and `stream_from`. + - New `table_path` type. + - New `connection` methods `quote_table` and `quote_columns`. + - Lots of deprecated stream constructors. + - `pqxx::row::swap()` now finally has the `deprecated` attribute. + - Finally deprecated a bunch of `field`, `row`, and `result` constructors. + - Exposed `transaction_focus` marker class. +7.3.1 + - New, simpler API for large objects: `blob` ("binary large object"). + - `largeobject` and friends are now deprecated. + - Fix visibility issue on gcc/clang, especially on macOS. (#395) + - Use "pure" & "visibility" attributes if they work, regardless of compiler. + - More deprecated items now have the `[[deprecated]]` attribute. + - Document the concept of transaction focus. + - Error messages more often report query description, if given. + - Suppress spurious deprecation messages on Visual Studio. (#402) + - Correct error when prepared/param statement clashes with tx focus. (#401) + - `from_stream` with `from_query` now has a convenient factory function. + - Removed some obsolete scripts from the `tools/` directory. +7.3.0 + - `stream_to` now quotes and escapes its table name. + - Removed `transaction_base::classname()`. Did anyone ever use it? + - Internal reorg of the `transaction` and `transactionfocus` hierarchies. + - Removed the only case of virtual inheritance, related to `namedclass`. + - Internal `concat()` for faster, simpler string concatentation. + - Fix compile omission in string conversions for `nullptr_t`. + - `pqxx::size_buffer()` can now size multiple values at once. + - `multi_to_string()` to convert multiple values into one `std::string`. + - Implicit `zview` constructor from `char const *`. (#389) + - Many `std::string&` parameters are now `zview` or `std::string_view`. + - Now checking statement parameter lengths for overflow. + - `#include ` in connection.cxx. (#394) +7.2.1 + - Fix infinite loop in converting `char *` to string. (#377) + - Deprecated `namedclass`. + - Convert an entire row using `row::as()`. + - Internal rework of `field::to()` and `field::as()` functions. + - Some more warning options in maintainer mode. + - Removed the old, DocBook-based tutorial. + - Fixed wrong `query` and SQLSTATE params to some exceptions. (#378) +7.2.0 + - You can now implicitly convert a `const std::string &` to `zview`. + - Replaced some overloads for C strings and C++ strings with `zview`. + - Deprecating `binarystring`. Use `std::basic_string` instead! + - Array parser did not recognise escaping in unquoted values. + - gcc10 test build fix: a result iterator is not the same thing as a `row`. + - Doc fix: field size does _not_ include terminating zero. (#356) + - Fix error message in `demangle_type_name`: printed result, not raw name. + - Fix compile warning in `demangle_type_name` on GNU systems. + - Document that string conversions assume non-null values. + - Start playing with C++20 _concepts._ + - Sketch out concepts-based `PQconnectdbParams` support. (#343) + - Add missing link to "datatypes" documentation. (#346) + - Supports `to_string`, `stream_to`, etc. for `binarystring`. (#312) + - Fixed infinite recursion when using `std::optional` in `stream_to`. (#364) + - Home-rolled hex-escaping. Saves an allocation. + - Catch floating-point negative overflow in `check_cast`, not underflow. + - Bit more work on CMake build doc. (#318) + - Typo in `datatypes.md`: `nullness`, not `nullness_traits`. (#353) + - Fixed test names map in `tests/runner.cxx`. (#354) + - Integral `from_string` now accept leading whitespace, as in composite types. + - Experimental support basics for composite types. (#355) + - Use `stream_from` without knowing the number of fields. (#357) + - Global `size_buffer` function. + - `quote()` now works for always-null types without conversions defined. + - `std::nullopt` now converts to an SQL null. + - Skip quoting and escaping array/composite fields of "safe" types. + - New type trait: `is_unquoted_safe`. + - Forbid invalid specialisations of `query_value`. + - Fixed `mktemp` invocation that broke on FreeBSD. + - Avoid unneeded encode/decode step on more binary data. + - If `__cxa_demangle` fails, fall back on raw type name. (#361) +7.1.2 + - Document build in `BUILDING-configure.md` / `BUILDING-cmake.md`. + - Work around silly clang warning in `test_errorhandler.cxx`. + - Fix install error with some internal headers. (#322) + - Fix "No object selected" error message in large objects. (#324) + - If error has no SQLSTATE, throw `broken_connection`. (#280) + - Fix argument order in `encrypt_password`. (#333, #334) + - Fix underestimate of buffer size for `to_string` for floats. (#328) +7.1.1 + - Compile fix for Visual Studio. + - Warning fix for clang. + - Also install `transaction_focus.hxx`. (#320) +7.1.0 + - Query tuples straight into `std::tuple` using `transaction::stream()`! + - And, `stream_from` now supports more or less arbitrary queries. + - Instead of a tuple of fields, you can pass `stream_to` a container as well. + - `string_traits::size_buffer()` must now be `noexcept`. + - New `nullness` member: `always_null`. + - There is now `to_buf` support for string literals. + - Streaming data is now more efficient. + - The table name in `stream_from` is now escaped. + - You can now "convert" strings to `std::string_view`. Mind your lifetimes! + - A `std::variant` will now convert to string, if its member types do. + - If a `stream_from` row fails to convert, you can no longer retry it. + - `from_string(field const &)` now handles null values properly. + - Obsolete Windows build docs are gone. + - Added `row::to(std::tuple<...>)`. + - Unified the test suites. We no longer need `test/unit/unit_runner`. + - New helper: `strip_t<...>` to remove a type's constness and reference. + - Replace custom templating with CMake glob in `src/CMakeLists.txt`. + - Replace custom templating with CMake glob in `doc/CMakeLists.txt`. + - Replace custom templating with CMake glob in `test/unit/CMakeLists.txt`. +7.0.8 + - Inline `type_name` in `PQXX_DECLARE_ENUM_CONVERSION`. +7.0.7 + - Fix broken `--with-postgres-lib` option in `configure` script (#311) + - Should now build even if neither `pkg-config` nor `pg_config` is available. + - CMake accepts `PostgreSQL_ROOT`, if it's a sufficiently recent version. +7.0.6 + - Prefer `pg_config` over `pkg-config` for include path (#291). + - Try to plod on if we don't know the PostgreSQL include path. + - Fix error message when starting overlapping transactions and such (#303). + - Fix potential crashes when converting invalid strings to floats (#307, #308). +7.0.5 + - Compile fix for g++ 10: include `` (#292). + - Cleaned up error-checking of PG results. (#280). + - The `esc()` methods now also take `std::string_view` (#295). +7.0.4 + - Fix possible crash in `connection::connection_string` (#290). + - Fix filtering of default values in `connection::connection_string` (#288). + - Deprecate `row::swap` and public inheritance of iterators from `row`. + - More copy/move/default constructors/assignments on result iterators. + - More copy/move/default constructors/assignments on row iterators. +7.0.3 + - Fixed misreporting of broken connection as `sql_error` (#280). + - Replaced non-ASCII test texts with escape codes (#282, #283). + - `ilostream` could truncate at `0xff` byte at buffer boundary (#284, #286). +7.0.2 + - New query function: `query_value`, queries and converts a single value. + - A `stream_from` stream can now be iterated. + - More callable types qualify as transactors, thanks to `std::invoke`. +7.0.1 + - Windows build fixes. + - Documentation for writing your own string conversions. + - `transaction_rollback` and children are now `sql_error`, not just `failure`. +7.0.0 + - Bumped minimum required C++ version to C++17. + - Everything has changed. If you're porting from older versions, be careful! + - There is now only one connection class: `connection`. + - Removed tablereader/tablewriter, replaced by stream_from/stream_to. + - Removed obsolete transactor framework, replaced by post-C++11 one. + - Removed old ways of invoking parameterised and prepared statements. + - Session variables are no longer cached. + - If you do weird stuff with setting/getting variables, it may break. + - Reading a variable written from raw SQL, procedures, etc, will now work. + - Prepared statements are now registered immediately. + - If you do weird stuff with preparing/unpreparing statements, it may break. + - Changed exceptions and errors for many error situations. + - Mishandling prepared statements will now break the connection. + - Many string references and const char pointers are now `std::string_view`. + - New `zview` class wraps `string_view` for string with terminating zero. + - The `stream_base` abstract base class is gone. + - Transactions no longer have an `isolation_tag` nested type. + - The `isolation_traits` type is gone. + - There's no more `pqxx_exception`. Complicated things too much. + - `pqxx::string_traits<>::name()` has been replaced with `pqxx::type_name`. + - `to_string()` can now handle `std::vector` (to produce an SQL array). + - All `from_string()` that took a C string now take a `std::string_view`. + - There are now separate `string_traits` and `null_traits` templates. + - Enums outside classes are now scoped enums. + - `error_verbosity` is no longer nested in connection. + - `connection::get_verbosity` is gone. + - Some enums have changed names: `accesspolicy` to `access_policy`, and so on. + - `dynamic_params` now accepts an accessor. + - Fixed "const" support in arguments to parameterised/prepared statements. + - Connection objects can now be moved. + - `connections::options()` has been replaced by `connection_string()`. + - Replaced pqxx-fulltest with `test_all.py`. + - Some `size_type` have changed to different types, to match current libpq. + - Internal overflows are more consistently caught and reported. + - Deprecated items have been removed. + - Large objects now require backend version 9.3 or better. + - Seeking inside large objects now supports 64-bit sizes. + - Visual Studio project files and sample headers are gone. Use CMake. + - MinGW Makefiles are gone. Use `configure` or CMake. + - Need PostgreSQL 10 to use robusttransaction. + - `robusttransaction` no longer uses a log table. Feel free to drop it. +6.4.4 + - Use pkg-config if pg-config is not available. + - In CMake build, prefer CMake's config headers over any found in source tree. +6.4.3 + - Updated copyright strings. + - Added missing "stream" headers to autotools-based install. + - Added stream headers to pqxx/pqxx header. +6.4.2 + - Fix mistake for Windows in 6.4.1: use PQfreemem, not std::free. + - Easily enable runtime checks in configure: "--enable-audit". + - Enable optimisation in CircleCI build. +6.4.1 + - Fixed more memory leaks. +6.4.0 + - Half fix, half work around nasty notice processor life time bug. + - Fix selective running of tests: "test/unit/runner test_infinities". + - Added some missing `std::` qualifications. +6.3.3 + - Throw more appropriate error when unable to read client encoding. + - CMake build fixes. +6.3.2 + - Conversion errors no longer throw pqxx::failure; always conversion_error! + - Use C++17's built-in numeric string conversions, if available. + - Query numeric precision in a more sensible, standard way. + - Avoid "dead code" warning. + - Replace obsolete autoconf macros. + - Remove all "using namespace std". + - CMake build fixes. +6.3.1 + - Windows compile fix (CALLBACK is a macro there). + - Work around Visual Studio 2017 not supporting ISO 646. +6.3.0 + - New "table stream" classes by Joseph Durel: stream_from/stream_to. + - Support weird characters in more identifiers: cursors, notifcations, etc. + - Connection policies are deprecated. It'll all be one class in 7.0! + - Connection deactivation/reactivation is deprecated. + - Some items that were documented as deprecated are now also declared as such. + - Fix Windows bug where WSAPoll was never used. Thanks dpolunin. + - Fix Windows CMake build to link to socket libraries. Thanks dpolunin. + - Various other changes to the CMake build. + - Fix failure when doing multiple string conversions with Visual C++. + - Fix nested project builds in CMake. Thanks Andrew Brownsword. + - In Visual Studio, build for host architecture, not "x64". + - Fix string conversion link error in Visual Studio. Thanks Ivan Poiakov. + - Fix string conversion to bool for "1". Thanks Amaracs. + - Fix in escaping of strings in arrays. Thanks smirql. + - Faster copying of results of large queries. Thanks Vsevolod Strukchinsky. + - Starting to handle encodings properly! Thanks to Joseph Durel. + - No longer using std::iterator (deprecated in C++17). +6.2.5 + - Removed deprecated pqxx-config. + - Build fix on Visual C++ when not defining NOMINMAX. + - Reduce setup code for string conversions, hopefully improving speed. + - Allow nul bytes in tablereader. + - Support defining string conversions for enum types. + - Fixed const/pure attributes warning in gcc 8. + - Updated build documentation to mention CMake option. + - Fixed a floating-point string conversion failure with Visual Studio 2017. +6.2.4 + - Fix builds in paths containing non-ASCII characters. + - New macro: PQXX_HIDE_EXP_OPTIONAL (to work around a client build error). +6.2.3 + - Build fixes. +6.2.2 + - Variable number of arguments to prepared or parameterised statement (#75). + - Windows build fix (#76). +6.2.1 + - Compile fixes. +6.2.0 + - At last! A check against version mismatch between library and headers. + - Small compile fixes. +6.1.1 + - Small compile fixes. + - A particular error string would always come out empty. +6.1.0 + - Dependencies among headers have changed. You may need extra includes. + - Library headers now include "*.hxx" directly, not the user-level headers. + - Supports parsing of SQL arrays, when using "ASCII-like" encodings. +6.0.0 + - C++11 is now required. Your compiler must have shared_ptr, noexcept, etc. + - Removed configure.ac.in; we now use configure.ac directly. + - Removed pqxx::items. Use the new C++11 initialiser syntax. + - Removed maketemporary. We weren't using it. + - Can now be built outside the source tree. + - New, simpler, lambda-friendly transactor framework. + - New, simpler, prepared statements and parameterised statements. + - Result rows can be passed around independently. + - New exec0(): perform query, expect zero rows of data. + - New exec1(): perform query, expect (and return) a single row of data. + - New exec_n(): perform query, expect exactly n rows of data. + - No longer defines Visual Studio's NOMINMAX in headers. + - Much faster configure script. + - Most configuration items are gone. + - Retired all existing capability flags. + - Uses WSAPoll() on Windows. + - Documentation on readthedocs.org, thanks Tim Sheerman-Chase. +5.1.0 + - Releases after this will require C++11! + - Internal simplification to pqxx::result. + - A row object now keeps its result object alive. + - New exec() variants: "expect & return 1 row," "expect no rows," "expect n." + - ChangeLog is gone. It was a drag on maintenance. +5.0.1 + - Exposed sqlstate in sql_error exception class. +5.0 + - The PGSTD namespace alias is gone. Use the std namespace directly. + - pqxx::tuple is now pqxx::row, to avoid clashes with std::tuple. + - Deprecated escape_binary functions dropped. + - Deprecated notify_listener class dropped. + - Support for many old compilers dropped. + - Support for "long long" and "long double" types is always enabled. + - No longer uses obsolete std::tr1 namespace; use plain std instead. + - Now requires libpq 9.1 or better. + - Requires server version 9.1 or better. + - Support for REPEATABLE READ isolation level added. + - Makefile fixes for Visual Studio 2013. + - Supports C++11 and C++14. + - No longer has obsolete debian & RPM packaging built in. + - Fixed failure to abort uncommitted subtransactions on destruction. + - Fixed failure to detect some integer overflows during conversion. + - Build tooling uses /usr/bin/env python instead of /usr/bin/python. + - New configure options: --with-postgres-include and --with-postgres-lib. + - In g++ or compatible compilers, non-exported items are no longer accessible. + - Many build fixes for various platforms and compilers. +4.0 + - API change: noticers are gone! Use errorhandlers to capture error output. + - API change: tablereaders and tablewriters are gone; they weren't safe. + - API change: prepared statements are now weakly-typed, and much simpler. + - API change: fields and tuples are now stand-alone classes in ::pqxx. + - API change: thread-safety field have_strerror_r is now have_safe_strerror. + - API change: notify_listener has been replaced with notification_receiver. + - notification_receiver takes a payload parameter. + - Easier Visual C++ setup. + - Absolutely requires a libpq version with PQescapeStringConn. + - Absolutely requires libpq 8.0 or better. + - Changes for C++0x. + - Supports clang++. + - Visual C++ makefiles now support new-style unit tests. + - Sample headers for more recent Visual Studio versions. + - Fixes binary-data escaping problems with postgres 9.0. + - Fixes problems with binary-string handling and escaping. + - Fixes compatibility problems between 9.x libpq and 7.x backend. + - quote_name to escape SQL identifiers for use in queries. + - syntax_error reports error's approximate location in the query. + - On Windows, now uses ws2_32 instead of wsock32. + - Various Windows build fixes. + - Updated for gcc 4.6.0. + - configure script supports --enable-documentation/--disable-documentation. + - Streamlined test/release toolchain. +3.1 + - Shared libraries are now versioned by ABI: 3.1 instead of 3.1.0 etc. + - Threading behaviour is now documented, and can be queried. + - Version information available at compile time. + - Supports parameterized statements. + - Result tuples now support slicing. + - Configure with --with-tr1=boost to use BOOST shared_ptr. + - String conversion now has its own header file. + - Supports read-only transactions. + - Fixed breakage with Solaris "make". + - Uses shared_ptr if available. + - binarystring::str() is no longer cached; no longer returns reference. + - Fixed problems in Visual C++ Makefile for test suite. + - Fixed problems with RPM packaging. + - Fixed build problem on RedHat/CentOS 5. + - Lets you check whether a prepared statement has been defined. + - "Varargs" prepared statements. + - Unnamed prepared statements now supported. + - Results have iterator as well as const_iterator. + - Rewrite of robusttransaction logic; may actually do its job now. + - Connections support async query cancel from signal handler or thread. + - More documentation for performance features. +3.0 + - Website is now at http://pqxx.org/ (no redirects) + - Completely replaced cursor classes + - More helpful error messages on failed connections + - More detailed hierarchy of constraint-violation exception classes + - trigger is now called notify_listener, trigger header is now notify-listen + - New mixin base class pqxx_exception distinguishes libpqxx exception types + - Quoting is back! transaction_base::quote() & connection_base::quote() + - Several build & documentation problems with Visual C++ fixed + - Compile fixes for gcc 4.2, 4.3 + - Compile fixes for Sun Studio Express 5.9 + - Uses strlcpy() where available, instead of strncpy() + - Keeps better track of applicable text encodings + - Fixed bug with prepared statement parameters in separate C++ statements + - robusttransaction now works for multiple users + - Pipeline lets you cancel ongoing queries, e.g. because they run for too long + - Fixed broken escaping of binary values in tablewriter + - Floating-point types now represented with full precision + - Proper unit tests for new functionality + - New traits-based system for adding data types + - Floating-point infinities now supported + - Flushing/completing a pipeline now frees up the transaction for other use + - Completely reworked test suite, builds and runs much faster + - tablewriter supports writing of raw lines +2.6.9 + - Removed old 1.x API (that means all identifiers with capital letters!) + - Tested with all current libpq versions and oldest/newest supported backends + - No longer have old OnCommit()/OnAbort()/OnDoubt() callbacks in transactor! + - Fixes failure when closing cursors with upper-case letters in their names + - Fixes bug when adding triggers to connections that aren't open yet + - Fixes bug when removing triggers + - Fixes small memory leak when preparing statements + - Fixes many problems with older backends + - Fixes bug in result::swap(): protocol versions were not swapped + - Some errors went undetected when using certain libpq versions + - Fixes prepared statements on new libpq versions talking to old backends + - Can estimate server version if libpq does not know how to obtain it + - Greatly reduced memory usage while escaping strings + - With Visual C++, creates lib/ directory if not already present + - Useful error messages when preparing statements + - Allows prepared statements to be registered explicitly + - Support for "long long" types; enable with PQXX_ALLOW_LONG_LONG macro + - Compilation errors for older libpq versions fixed + - Some new small utility classes for disabling notice processing etc. + - Result sets remember the queries that yielded them + - New test script, pqxx-fulltest, tests against all current postgres versions + - Connections can simulate failure + - Adds password encryption function +2.6.8 + - Fixes bug: binary parameters to prepared statements truncated at nul bytes + - New, more specific exception types to distinguish errors from server + - Resolved serious problems with generated reference documentation + - Automatically detect Windows socket library with MinGW + - Windows "make" fixed to run from main directory, not win32 + - Fixes "mktemp" problems on some BSD-based platforms + - pqxx-config is deprecated; use pkg-config instead + - On GNU/Linux, uses poll() instead of select() to avoid file descriptor limit + - Will provide server and protocol version information where available + - New cursor class, absolute_cursor +2.6.7 + - New escape functions for binary data: transaction_base::esc_raw() + - Improved detection of socket libraries, especially for MinGW + - Works around bug in some versions of GNU grep 2.5.1 + - Fixes problem with configuration headers + - Fixes PQprepare() detection + - Fixes incomplete Visual C++ Makefile + - Fixes compile error in workaround for older libpq versions + - Removes "rpath" link option +2.6.6 + - New, encoding-safe string-escaping functions + - Upper-case letters now allowed in prepared-statement names + - Fixes crash in test005 + - More Visual C++ improvements + - Removed collaboration diagrams from reference docs + - New templating system for generating Windows Makefiles etc. +2.6.5 + - Visual C++ users: copy win32/common-sample to win32/common before editing it + - Should fix problems finding socket library on MinGW + - Even more work on Visual C++ problems + - Updated documentation for Visual C++ users + - Fixed bug in prepared statements (mostly visible on Visual C++) + - Nested transactions work harder to detect backend support +2.6.4 + - Massively improved compatibility with Windows and Visual C++ + - Fixed late initialization of "direct" connection state + - Fixed problem with initialization of connection capabilities + - Fixed configuration bug for libpq in nonstandard locations + - Sample configuration header for libpq found in PostgreSQL 8.1 +2.6.3 + - Radical rework of prepared statements; INCOMPATIBLE INTERFACE CHANGE! + - Dropped support for g++ 2.95 + - Emulate prepared statements support on old libpq or old backend + - Bug fix: missing tutorial (release script now tests for this) + - Automatically links in socket library on Windows or Solaris, if needed + - Bug fix: check for std namespace didn't work + - Fixes for Cygwin/MSYS/MinGW +2.6.2 + - Bug fix: connection state was not set up properly in some common cases + - Bug fix: headers were installed in "include" instead of "include/pqxx" + - Bug fix: sqlesc(string) broke with multibyte or multiple encodings + - namedclass is now used as a virtual base; affects all subclass constructors + - Initial implementation of subtransactions + - Detect more connection capabilities + - Standard library namespace can be set from configure script's command line + - Completely reworked connection hierarchy, with separate policy objects + - Clients can now define their own connection policies + - Paved the way for client-defined thread synchronization + - Now lives at http://thaiopensource.org/development/libpqxx/ +2.6.1 + - Hugely improved recognition of different strerror_r() versions + - Resolved link problems with gcc 4.0 and shared library +2.6.0 + - New macro PQXX_SHARED defines whether to use/build libpqxx as shared library + - Robusttransaction compatible with PostgreSQL 8.1 + - Infrastructure for querying connection/backend capabilities at runtime + - Greatly improved cursor support + - Connection reactivation can be inhibited explicitly + - Tries even harder to make sense of conflicting strerror_r() definitions + - Detects connection failures that libpq glosses over + - Reference documentation grouped into more coherent sections + - Assumes strerror() is threadsafe on systems that have no strerror_r() + - Now allows connection's socket number to be queried + - New internal_error class for libpqxx-internal errors + - With Visual C++, doesn't redefine NOMINMAX if it is defined already + - Several compatibility improvements for Visual C++ + - Fixes and workarounds for HP-UX and HP aCC compiler + - Phased old cursor interface out of test suite; tests ported to new interface + - Added documentation on thread safety + - New thread safety model + - Large objects have functions to tell current position + - Minor updates to tutorial (somebody pay me and I'll do more :) + - No longer needs libpq-fs.h header + - Meaningful error messages for ambiguous string conversions fixed +2.5.6 + - Support null parameters to prepared statements (use C-style char pointers) +2.5.5 + - Diagnoses connection failure during result transfer + - Fixes invalid -R link option in pqxx-config +2.5.4 + - Fix workaround code for older libpq versions without PQunescapeBytea() + - Work around grep bug in Fedora Core 4 that broke configure in UTF-8 locales + - In Visual C++, assume libpqxx is a DLL when linking to std library as DLL + - Missing documentation in distribution archive is back again + - Export fewer symbols from library binary with gcc 4.0 + - Releases now automatically tested against gcc 4.0 + - Meaningful link errors for additional ambiguous string conversions + - DLL symbol exports now automatically tested before each release +2.5.3 + - Greatly improved builds on MinGW with MSYS + - All known problems with MinGW fixed + - Fix bugs in stream classes that caused failures and crashes with STLport + - Detects and uses STLport automatically +2.5.2 + - Fix memory leaks + - Fix problems with NaN (not-a-number values) on some compilers +2.5.1 + - Fix configure script; broke when very recent libpqxx was already installed + - Fix cursor breakage when "long" is more than 32 bits + - Fix cases where new-style abort/doubt handlers are used + - Fix for division-by-zero error in Visual C++ (changed sample headers) + - Improved checking for strerror_r in configure script + - Fix for problem MinGW has with configure script + - Fix spurious failure of Oid check in configure script +2.5.0 + - Fix race condition in removing triggers + - Fix binary string conversion with older libpq + - Fix some error strings that may previously have come out wrong + - No longer includes any libpq headers while compiling client code + - Improved thread safety: avoid strerror() where possible + - Prepared statements + - Translate more error conditions to std::bad_alloc exception + - Clearer and more specific explanations for configuration failures + - Improved documentation + - Looks for standard library in global namespace as well as std + - Accepts standard C library in std namespace + - Release script automatically tests with a range of compilers, not just one + - Compatible with g++ 2.95 again; this time it's tested automatically +2.4.4 + - Fix problems building shared library in Visual C++ + - Fix autobuild in Debian, which was broken by mistake in BSD grep workaround + - Fix conversion of string to floating-point type NaN + - Remove stray CVS directories from distribution archive + - Workaround for Visual C++ problem when issuing messages from destructors + - Yet more workarounds for Visual C++ bugs + - Fix situation where connection state might not be restored after failure + - Fix configuration problem on SunOS + - Network speedup in connection setup with pending variables and/or triggers +2.4.3 + - Yet more workarounds for bugs in Visual C++ .NET 2003 + - Fixes for SunC++ 5.5 + - On Visual C++, now defines NOMINMAX, fixing large object support + - Workaround for BSD grep + - Improvements for builds from CVS + - Sample config headers for Sun ONE Studio 8 +2.4.2 + - Fix minor problems with Apple's version of g++ 3.3 + - Fix problem with MingW on Windows + - Workarounds and fixes for Visual C++.NET 2003 + - Renewed compatibility with g++ 2.95 + - More sample configuration headers + - Updated reference documentation + - Removed assert code +2.4.1 + - Several bugs in icursor_iterator fixed; incompatible interface changes + - Tightens throw specifications on begin(), end(), size(), capacity() + - Containers define reference and pointer types + - Implements swap() in all container types + - Implements == and != in all container types + - Stabilizes new (but still limited) cursor interface + - icursor_iterator thinks purely in stride granularity + - Introduces />= comparisons for icursor_iterators + - Allows "adopted SQL cursors" in new cursor interface + - Reference-counting in binarystrings, so they can be copied (and efficiently) + - Fixes reference-to-temporary problem with std::reverse_iterator in results + - Result/tuple reverse_iterators no longer require std::reverse_iterator + - Includes some sample config headers (in config/sample-headers) + - Replaces iffy autoconf checks (avoid failures with maintainer mode's -Werror) + - Fixes incompatibility with some implementations of Unix "cut" program (again) +2.4.0 + - Fixes incompatibility with some implementations of Unix "cut" program + - Fixes "ptrdiff_t redefinition" problem in some environments + - More container-like tuples, so fields can be iterated + - All size_type types are now unsigned + - More conservative robusttransaction--thanks Tom Lane + - Stream-like extraction operator for result field conversion + - Warnings about deprecated headers now suppressed while compiling library + - Iterator constructors and copy assignments now have empty throw specs +2.3.0 + - Generates MinGW Makefile automatically + - Documents MinGW build + - Workaround for missing prepared-statement support + - Potential bug fixed in closing of connections + - Fixed incompatibility between new cursor streams and older backends + - Removed pqxxbench +2.2.9 + - Bugfix in removing trigger + - Added "failed connection" to regression test + - Some changes to throw specifications + - Putting libpq in its own namespace is optional +2.2.8 + - Moved libpq into pqxx::internal::pq namespace + - New config system separates compiler-related items from libpq-related ones + - Auto-generates Visual C++ Makefile, should always remain up-to-date now +2.2.7 + - Bugfix: from_string() didn't handle LONG_MIN--thanks Yannick Boivin +2.2.6 + - Complete "pipeline" rewrite, for better exception safety + - New garbage collection scheme for "result;" constructors now exception-free +2.2.5 + - First new cursor classes! + - Fixed strange failure in tablewriter during large insertions + - Updated tutorial +2.2.4 + - New utility class template, items<> for easy container initialization + - New utility function template, separated_list() + - Error handling bugfix in tablewriter + - Fixed tablereader handling of lines ending in empty fields + - tablereader lines no longer end in newline with old libpq versions +2.2.3 + - Trigger names no longer need to be proper identifiers + - Compile fixes for g++ 3.4.0 and other modern compilers + - Tablestreams may specify column lists + - Deprecated Quote() in favour of sqlesc(); improved quoting + - Fixed generation of libpqxx.spec +2.2.2 + - Bugfix in fieldstream w.r.t. reading strings on some systems + - Renamed config.h to internalconfig.h to avoid confusion + - New connection functions allow client to sleep until notification arrives + - Notification functions return number of notifications received + - Even fewer client-visible macros exported by libconfig.h +2.2.1 + - New, 2.x-style string conversions without locale problem + - Documentation improvements + - Implemented result::swap() +2.2.0 + - Installs to /usr/local by default, NOT to /usr/local/pqxx like before! + - Uses Postgres-provided script to find Postgres (thanks Peter Eisentraut) + - Which means no more configure arguments required on Irix (thanks Arjen Baart) + - Fixes long-standing bug in result class! + - New pipeline class for throughput optimization + - New field stream class: read result field as C++ stream + - Separate namespace pqxx::internal for definitions not relevant to the user + - More Windows compilation fixes + - SUN Workshop 6 compile fixes and workarounds (thanks Jon Meinecke) + - Implemented reverse_iterator for result class + - Checks for functional std::reverse_iterator template + - Preliminary Makefile for MinGW compiler (thanks Pasquale Fersini) + - Changed the way unique<> works + - Checks for functional std::count_if() + - Bugs fixed & test programs added +2.1.3 + - Makefile fixes for Visual C++, thanks Paresh Patel + - Library ABI versioning implemented, thanks Roger Leigh + - Uses old SQL isolation level syntax for compatibility, thanks koun@sina.com + - tablestreams can explicitly complete() before destructor + - Bugfix in robusttransaction: forgot to set isolation level + - Fixed off-by-ones in tablewriter escape code + - tablestreams now use \n-style escape sequences + - tablestreams support octal numbers + - Freely definable "null" strings in tablestreams, as originally intended + - Improved Debian packaging, thanks Roger Leigh + - tablestreams use libpq's new-style COPY functions, if available + - Extended automation of build/release procedure + - tablewriter writes in nonblocking mode to help hide communication latency + - Can get backend variables as well as set them + - More configuration macro cleanups + - Workaround for missing clear() in standard string + - Merry Christmas! +2.1.2 + - Compile fix for gcc libstdc++ 2.9, thanks Jaroslaw Staniek + - Moved deprecated functions below current ones + - Cleanups for Debian packaging (thanks Roger Leigh, new Debian maintainer!) + - Updated authors listings + - Bumped ABI version number for the first time (now 2:0:1) +2.1.1 + - More workarounds for gcc 2.95 + - Automated tools keep test makefiles up to date +2.1.0 + - Asynchronous connections + - Fixed configure --includedir option (thanks Ray Dassen!) + - Compile fixes for SUN Workshop 6, and one for gcc on FreeBSD 4.8 +2.0.0 + - New stable release! + - Includes all changes since 1.5 release. + - Workarounds for Microsoft Visual C++ 7 problems. Thanks Costin Musteata! + - No longer need to define PQXX_NO_PARTIAL_CLASS_TEMPLATE_SPECIALISATION + - Integrated Windows configuration into regular configuration + - Only uses #warning if preprocessor supports it + - Works on libpq versions without PQ[un]escapeBytea() +1.9.9 + - Minor documentation changes +1.9.8 + - Workaround for compile problem with postgres 7.3 + - Convenience typedef for transaction<>: "work" +1.9.7 + - binarystring rewritten and moved to its own file + - binarystring::size() does not include terminating null byte! + - Implemented escaping of binary strings + - Fix in workaround for missing numeric_limits on some compilers + - String conversion supported for unsigned char * + - More helpful link errors for unsupported string conversions + - Complete test coverage +1.9.6 + - Fixes in "field table" support + - Improved coexistence with client program's config.h, if any + - Prefixed autoconf macros used in headers with "PQXX_" +1.9.5 + - Header file contents moved to .hxx files for editor filetype recognition + - Fixes wrong timestamp for include/pqxx/result in 1.9.4 distribution +1.9.4 + - Fixes Visual C++ build problem when compiling as library +1.9.3 + - Quick release for various minor changes +1.9.2 + - Renamed most public member functions to all-lower-case names + - (previously is now called +1.9.1 + - tablestream destructor crashed if table didn't exist (thanks Sean [Rogers?]) + - Renamed all header files to remove ".h" suffix + - Tables created by regression test now prefixed with "pqxx" for safety + - Large objects now considered stable + - Migrated tutorial from SGML to DocBook XML (thanks Wichert Akkerman) + - Added tests 57-59 + - Fixed compile error in largeobject + - Updated Windows makefiles +1.9.0 + - EVERYTHING HAS CHANGED. Read the list or run into trouble! + - CURSOR HAS INCOMPATIBLE CHANGES AND MAY BE REPLACED COMPLETELY + - CACHEDRESULT HAS INCOMPATIBLE CHANGES (won't compile without changes) + - REVISE YOUR TRANSACTORS; now templatized on transaction type + - Finally got license file in order + - Incompatible change in setting transactor quality of service + - Cursors require serializable isolation level (checked at link time) + - Renamed Connection_base to connection_base, Connection to connection, + LazyConnection to lazyconnection + - Renamed LargeObject to largeobject, LargeObjectAccess to largeobjectaccess + - Renamed Noticer to noticer + - Renamed Trigger to trigger + - Renamed Result to result, Tuple to tuple, Field to field + - Renamed Unique<> to unique<> + - Renamed CachedResult to cachedresult + - Transformed Transaction Taxonomy (TTT): + - Renamed Transaction_base to transaction_base + - Renamed Transaction to transaction + - Renamed Transactor to transactor<> (now a template) + - Implemented transaction isolation levels as compile-time static properties + - transaction and robusttransaction now templatized on their isolation levels + - cachedresult requires serializable isolation level (checked at link time) + - Now need to include pqxx/transactor.h yourself if you need transactors + - Large objects require real backend transaction at compile time + - New type oid and constant oid_none for row identifiers resp. null oid + - Added some forgotten PQXX_LIBEXPORTs + - Tweaked documentation in many places +1.8.1 + - By popular request: more convenient way to read field values + - Documented locale sensitivity of ToString(), FromString(), Field::to() +1.8.0 + - Compiles on gcc 2.95 again (heavy streambuf workarounds in largeobject.h) + - ConnectionItf renamed to Connection_base, TransactionItf to Transaction_base + - connectionitf.h is now connection_base.h, transactionitf.h connection_base.h +1.7.8 + - BinaryString class for unescaping bytea strings + - PQAlloc template keeps track of libpq-allocated objects + - Removed some consts in Unique<>, ConnectionItf, sorry! + - Can now set session variables on connections, transactions +1.7.7 + - ./configure also looks for postgres in /usr/local/pgsql + - test007 now uses SQL_ASCII as its test encoding + - integrated Greg Hookey's Debian packaging +1.7.6 + - added postgres library (libpq) to dynamic link path +1.7.5 + - added test052 - test055 + - added Result::Tuple::ColumnNumber() + - also test setting of client encodings + - removed superfluous versions of to_file() from large object classes +1.7.4 + - new exception class, sql_error, remembers query text + - moved exception classes to new file include/pqxx/except.h + - test cases report texts of any failed queries + - added tools/rmlo.cxx +1.7.3 + - default constructors for connection classes + - revamped seeking operations on large objects + - better error messages in large objects + - added test050, test051 +1.7.2 + - more workarounds for Sun CC 5.1, thanks Jeroen van Erp! + - preliminary support for "named" queries + - can now Quote() string constants + - included Doxyfile in distribution archive + - helps avoid Windows memory allocation problem in DLLs + - allows setting of client character set encoding +1.7.1 + - regenerated documentation +1.7.0 + - removed all deprecated features + - connection string documentation in README + - separate Connection, LazyConnection classes + - made test001 more concise + - added test049 +1.6.4 + - configure script now respects different std namespace +1.6.3 + - olostream, lostream now flush themselves before closing + - fixed compilation problems when using ToString<>() on a plain char * + - compilation fixes for Sun compiler (thanks Jeroen van Erp!) + - added .pc file for pkgconfig (thanks Ray Dassen!) +1.6.2 + - Debian packaging added to distribution archive + - new ilostream, olostream, lostream classes +1.6.1 + - large object's cunlink() replaced by remove() + - default constructor for LargeObject +1.6.0 + - new large objects interface + - added test048 +1.5.0 + - allow result fields to be written to streams + - removed confusing CachedResult::clear() + - minor documentation updates + - added test046, test047 + - added convenience header +1.4.5 + - fixed crash CachedResult that was less shallow than I thought + - fixed quoting problem with adopted SQL cursors +1.4.4 + - (forgot to save cursor.cxx with new constructor in 1.4.4, sorry) +1.4.3 + - all tests now have three-digit numbers + - Cursor can adopt SQL cursor returned by a function +1.4.2 + - bugfix in CachedResult when accessing empty Results + - minor documentation improvements +1.4.1 + - documents new homepage: http://pqxx.tk/ + - Connection constructor accepts null connect string + - Exec() now also takes queries as C++ strings +1.4.0 + - Connection::IsOpen() renamed to is_open() + - NoticeProcessor replaced by Noticer (with C++ linkage) +1.3.7: + - detects nasty rare problem case with Cursors in unknown positions +1.3.6: + - fixed detection of missing PQescapeString(). Thanks David Wright! +v1.3.5: + - documented Windows build procedure + - fixed problem with upper-case letters in cursor names. Thanks key88! +2003-01-19 16:00, v1.3.4: + - support long double type + - clarified some error messages +2003-01-08 18:45, v1.3.3: + - fix missing include in test13 +2003-01-07 02:30, v1.3.2: + - configure looks for postgres includes/library in more places, thanks Ray! +2003-01-02 23:00, v1.3.1: + - bugfix in Cursor positioning +2003-01-02 20:30, v1.3.0: + - absolute positioning for Cursor + - better documentation on cursors + - reduced, but improved test suite output +2002-12-23 17:30, v1.2.8: + - Cursor::Move() returns number of rows skipped + - new typedef Cursor::size_type +2002-12-14 23:30, v1.2.7: + - test suite now distinguishes expected errors from unexpected ones +2002-12-09 20:00, v1.2.6: + - fixed some Cursor test cases for change in postgres 7.3 + - added important warning to Cursor +2002-12-09 02:00, v1.2.5: + - added important warning to CachedResult +2002-12-08 14:14, v1.2.4: + - fixed compile error on some systems in include/pqxx/util.h +2002-12-04 12:00, v1.2.3: + - workaround for broken on some systems + - fixed Quote() bug +2002-12-03 01:30, v1.2.2: + - fixed serious CachedResult bug + - added test41 +2002-12-02 17:00, v1.2.1: + - hopefully fixed cursor bug with PostgreSQL 7.3 +2002-12-01 22:00, v1.2.0: + - new CachedResult class +2002-11-07 13:15, v1.1.4: + - workaround for missing InvalidOid definition +2002-10-23 16:00, v1.1.3: + - Cursor & TableStream hierarchy now work on any transaction type + - get no. of affected rows & oid of inserted row from Result + - increased test coverage +2002-10-21 01:30, v1.1.2: + - updated build procedure + - Debian packaging improvements +2002-09-25 03:00, v1.1.1: + - supports activating/deactivating of connections + - various Connection getters now activate deferred connection first +2002-09-23 01:00, v1.1.0: + - supports lazy connections (added 19 test cases just for these) + - greatly reduced performance overhead for RobustTransaction + - removed id field from RobustTransaction's transaction log tables +2002-09-14 20:00, v1.0.1: + - now lives on GBorg + - various packaging updates +2002-06-12 17:30, v0.5.1: + - no longer have to destroy one transaction before creating the next +2002-06-07 17:15, v0.5.0: + - "make install" now finally installs headers! + - distribution now includes SGML (DocBook) version of tutorial +2002-06-04 15:00, v0.4.4: + - may now have multiple triggers with same name on single connection +2002-06-02 23:00, v0.4.3: + - fixed TableReader problem with \t and \n +2002-06-01 21:00, v0.4.2: + - hopefully fixes compile problem with broken std::iterator + - configure no longer requires --with-postgres-include=/usr/include/postgresql +2002-05-29 22:00, v0.4.1: + - can now also handle bool, unsigned char, short field types +2002-05-27 22:30, v0.4.0: + - RENAMED Transactor::TRANSACTIONTYPE to argument_type for STL conformance + - RENAMED Result::Field::name() to Name() + - documentation improvements + - minor optimizations +2002-05-18 00:00, v0.3.1: + - removed broken postgres_fe.h dependency (hopefully permanent fix) +2002-05-12 22:45, v0.3.0: + - also looks for postgres_fe.h in postgres' internal/ directory (tmp fix) +2002-05-05 01:30, v0.2.3: + - extensive build instructions in README + - make check now controlled through PG environment variables +2002-05-04 19:30, v0.2.2: + - more STL conformance + - fixed regression test + - test6 now copies "orgevents" to "events" by default +2002-04-28 23:45 Version bumped to 0.2 +2002-04-28 23:45 Self-generated distribution archive +2002-04-27 14:20 Replaced automake symlinks with actual files +2002-04-07 02:30 Released with configure script +2002-03-29 01:15 Not yet released. Still integrating autogen stuff... diff --git a/ext/libpqxx-7.7.3/README.md b/ext/libpqxx-7.7.3/README.md new file mode 100644 index 000000000..3a1c17ded --- /dev/null +++ b/ext/libpqxx-7.7.3/README.md @@ -0,0 +1,199 @@ +libpqxx +======= + +Welcome to libpqxx, the C++ API to the PostgreSQL database management system. + +Home page: http://pqxx.org/development/libpqxx/ + +Find libpqxx on Github: https://github.com/jtv/libpqxx + +Documentation on Read The Docs: https://libpqxx.readthedocs.io + +Compiling this package requires PostgreSQL to be installed -- or at least the C +headers and library for client development. The library builds on top of +PostgreSQL's standard C API, libpq, though your code won't notice. + +If you're getting the code straight from the Git repo, the head of the `master` +branch represents the current _development version._ Releases are tags on +commits in `master`. For example, to get version 7.1.1: + + git checkout 7.1.1 + + +Upgrade notes +------------- + +**The 7.x versions require at least C++17.** Make sure your compiler is up to +date. For libpqxx 8.x you will need at least C++20. + +Also, **7.0 makes some breaking changes in rarely used APIs:** +* There is just a single `connection` class. It connects immediately. +* Custom `connection` classes are no longer supported. +* It's no longer possible to reactivate a connection once it's been closed. +* The API for defining string conversions has changed. + +If you're defining your own type conversions, **7.1 requires one additional +field in your `nullness` traits.** + + +Building libpqxx +---------------- + +There are two different ways of building libpqxx from the command line: +1. Using CMake, on any system which supports it. +2. On Unix-like systems, using a `configure` script. + +"Unix-like" systems include GNU/Linux, Apple macOS and the BSD family, AIX, +HP-UX, Irix, Solaris, etc. Even on Microsoft Windows, a Unix-like environment +such as WSL, Cygwin, or MinGW should work. + +You'll find detailed build and install instructions in `BUILDING-configure.md` +and `BUILDING-cmake.md`, respectively. + +And if you're working with Microsoft Visual Studio, have a look at Gordon +Elliott's +[ + Easy-PQXX Build for Windows Visual Studio +](https://github.com/GordonLElliott/Easy-PQXX-Build-for-Windows-Visual-Studio) +project. + + +Documentation +------------- + +Building the library, if you have the right tools installed, generates HTML +documentation in the `doc/` directory. It is based on the headers in +`include/pqxx/` and text in `include/pqxx/doc/`. This documentation is also +available online at [readthedocs](https://libpqxx.readthedocs.io). + + +Programming with libpqxx +------------------------ + +Your first program will involve the libpqxx classes `connection` (see the +`pqxx/connection.hxx` header), and `work` (a convenience alias for +`transaction<>` which conforms to the interface defined in +`pqxx/transaction_base.hxx`). + +These `*.hxx` headers are not the ones you include in your program. Instead, +include the versions without filename suffix (e.g. `pqxx/connection`). Those +will include the actual .hxx files for you. This was done so that includes are +in standard C++ style (as in `` etc.), but an editor will still +recognize them as files containing C++ code. + +Continuing the list of classes, you will most likely also need the result class +(`pqxx/result.hxx`). In a nutshell, you create a `connection` based on a +Postgres connection string (see below), create a `work` in the context of that +connection, and run one or more queries on the work which return `result` +objects. The results are containers of rows of data, each of which you can +treat as an array of strings: one for each field in the row. It's that simple. + +Here is a simple example program to get you going, with full error handling: + +```c++ + #include + #include + + int main() + { + try + { + // Connect to the database. + pqxx::connection C; + std::cout << "Connected to " << C.dbname() << '\n'; + + // Start a transaction. + pqxx::work W{C}; + + // Perform a query and retrieve all results. + pqxx::result R{W.exec("SELECT name FROM employee")}; + + // Iterate over results. + std::cout << "Found " << R.size() << "employees:\n"; + for (auto row: R) + std::cout << row[0].c_str() << '\n'; + + // Perform a query and check that it returns no result. + std::cout << "Doubling all employees' salaries...\n"; + W.exec0("UPDATE employee SET salary = salary*2"); + + // Commit the transaction. + std::cout << "Making changes definite: "; + W.commit(); + std::cout << "OK.\n"; + } + catch (std::exception const &e) + { + std::cerr << e.what() << '\n'; + return 1; + } + return 0; + } +``` + + +Connection strings +------------------ + +Postgres connection strings state which database server you wish to connect to, +under which username, using which password, and so on. Their format is defined +in the documentation for libpq, the C client interface for PostgreSQL. +Alternatively, these values may be defined by setting certain environment +variables as documented in e.g. the manual for psql, the command line interface +to PostgreSQL. Again the definitions are the same for libpqxx-based programs. + +The connection strings and variables are not fully and definitively documented +here; this document will tell you just enough to get going. Check the +PostgreSQL documentation for authoritative information. + +The connection string consists of attribute=value pairs separated by spaces, +e.g. "user=john password=1x2y3z4". The valid attributes include: +* `host` — + Name of server to connect to, or the full file path (beginning with a + slash) to a Unix-domain socket on the local machine. Defaults to + "/tmp". Equivalent to (but overrides) environment variable PGHOST. +* `hostaddr` — + IP address of a server to connect to; mutually exclusive with "host". +* `port` — + Port number at the server host to connect to, or socket file name + extension for Unix-domain connections. Equivalent to (but overrides) + environment variable PGPORT. +* `dbname` — + Name of the database to connect to. A single server may host multiple + databases. Defaults to the same name as the current user's name. + Equivalent to (but overrides) environment variable PGDATABASE. +* `user` — + User name to connect under. This defaults to the name of the current + user, although PostgreSQL users are not necessarily the same thing as + system users. +* `requiressl` — + If set to 1, demands an encrypted SSL connection (and fails if no SSL + connection can be created). + +Settings in the connection strings override the environment variables, which in +turn override the default, on a variable-by-variable basis. You only need to +define those variables that require non-default values. + + +Linking with libpqxx +-------------------- + +To link your final program, make sure you link to both the C-level libpq library +and the actual C++ library, libpqxx. With most Unix-style compilers, you'd do +this using the options + + -lpqxx -lpq + +while linking. Both libraries must be in your link path, so the linker knows +where to find them. Any dynamic libraries you use must also be in a place +where the loader can find them when loading your program at runtime. + +Some users have reported problems using the above syntax, however, particularly +when multiple versions of libpqxx are partially or incorrectly installed on the +system. If you get massive link errors, try removing the "-lpqxx" argument from +the command line and replacing it with the name of the libpqxx library binary +instead. That's typically libpqxx.a, but you'll have to add the path to its +location as well, e.g. /usr/local/pqxx/lib/libpqxx.a. This will ensure that the +linker will use that exact version of the library rather than one found +elsewhere on the system, and eliminate worries about the exact right version of +the library being installed with your program.. diff --git a/ext/libpqxx-7.7.3/VERSION b/ext/libpqxx-7.7.3/VERSION new file mode 100644 index 000000000..d92ba80de --- /dev/null +++ b/ext/libpqxx-7.7.3/VERSION @@ -0,0 +1 @@ +7.7.3 diff --git a/ext/libpqxx-7.7.3/aclocal.m4 b/ext/libpqxx-7.7.3/aclocal.m4 new file mode 100644 index 000000000..2c938cff6 --- /dev/null +++ b/ext/libpqxx-7.7.3/aclocal.m4 @@ -0,0 +1,1187 @@ +# generated automatically by aclocal 1.16.4 -*- Autoconf -*- + +# Copyright (C) 1996-2021 Free Software Foundation, Inc. + +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, +[m4_warning([this file was generated for autoconf 2.69. +You have another version of autoconf. It may work, but is not guaranteed to. +If you have problems, you may need to regenerate the build system entirely. +To do so, use the procedure documented by the package, typically 'autoreconf'.])]) + +# Copyright (C) 2002-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_AUTOMAKE_VERSION(VERSION) +# ---------------------------- +# Automake X.Y traces this macro to ensure aclocal.m4 has been +# generated from the m4 files accompanying Automake X.Y. +# (This private macro should not be called outside this file.) +AC_DEFUN([AM_AUTOMAKE_VERSION], +[am__api_version='1.16' +dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to +dnl require some minimum version. Point them to the right macro. +m4_if([$1], [1.16.4], [], + [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl +]) + +# _AM_AUTOCONF_VERSION(VERSION) +# ----------------------------- +# aclocal traces this macro to find the Autoconf version. +# This is a private macro too. Using m4_define simplifies +# the logic in aclocal, which can simply ignore this definition. +m4_define([_AM_AUTOCONF_VERSION], []) + +# AM_SET_CURRENT_AUTOMAKE_VERSION +# ------------------------------- +# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. +# This function is AC_REQUIREd by AM_INIT_AUTOMAKE. +AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], +[AM_AUTOMAKE_VERSION([1.16.4])dnl +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) + +# AM_AUX_DIR_EXPAND -*- Autoconf -*- + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets +# $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to +# '$srcdir', '$srcdir/..', or '$srcdir/../..'. +# +# Of course, Automake must honor this variable whenever it calls a +# tool from the auxiliary directory. The problem is that $srcdir (and +# therefore $ac_aux_dir as well) can be either absolute or relative, +# depending on how configure is run. This is pretty annoying, since +# it makes $ac_aux_dir quite unusable in subdirectories: in the top +# source directory, any form will work fine, but in subdirectories a +# relative path needs to be adjusted first. +# +# $ac_aux_dir/missing +# fails when called from a subdirectory if $ac_aux_dir is relative +# $top_srcdir/$ac_aux_dir/missing +# fails if $ac_aux_dir is absolute, +# fails when called from a subdirectory in a VPATH build with +# a relative $ac_aux_dir +# +# The reason of the latter failure is that $top_srcdir and $ac_aux_dir +# are both prefixed by $srcdir. In an in-source build this is usually +# harmless because $srcdir is '.', but things will broke when you +# start a VPATH build or use an absolute $srcdir. +# +# So we could use something similar to $top_srcdir/$ac_aux_dir/missing, +# iff we strip the leading $srcdir from $ac_aux_dir. That would be: +# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` +# and then we would define $MISSING as +# MISSING="\${SHELL} $am_aux_dir/missing" +# This will work as long as MISSING is not called from configure, because +# unfortunately $(top_srcdir) has no meaning in configure. +# However there are other variables, like CC, which are often used in +# configure, and could therefore not use this "fixed" $ac_aux_dir. +# +# Another solution, used here, is to always expand $ac_aux_dir to an +# absolute PATH. The drawback is that using absolute paths prevent a +# configured tree to be moved without reconfiguration. + +AC_DEFUN([AM_AUX_DIR_EXPAND], +[AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl +# Expand $ac_aux_dir to an absolute path. +am_aux_dir=`cd "$ac_aux_dir" && pwd` +]) + +# AM_CONDITIONAL -*- Autoconf -*- + +# Copyright (C) 1997-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_CONDITIONAL(NAME, SHELL-CONDITION) +# ------------------------------------- +# Define a conditional. +AC_DEFUN([AM_CONDITIONAL], +[AC_PREREQ([2.52])dnl + m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], + [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl +AC_SUBST([$1_TRUE])dnl +AC_SUBST([$1_FALSE])dnl +_AM_SUBST_NOTMAKE([$1_TRUE])dnl +_AM_SUBST_NOTMAKE([$1_FALSE])dnl +m4_define([_AM_COND_VALUE_$1], [$2])dnl +if $2; then + $1_TRUE= + $1_FALSE='#' +else + $1_TRUE='#' + $1_FALSE= +fi +AC_CONFIG_COMMANDS_PRE( +[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then + AC_MSG_ERROR([[conditional "$1" was never defined. +Usually this means the macro was only invoked conditionally.]]) +fi])]) + +# Copyright (C) 1999-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + + +# There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be +# written in clear, in which case automake, when reading aclocal.m4, +# will think it sees a *use*, and therefore will trigger all it's +# C support machinery. Also note that it means that autoscan, seeing +# CC etc. in the Makefile, will ask for an AC_PROG_CC use... + + +# _AM_DEPENDENCIES(NAME) +# ---------------------- +# See how the compiler implements dependency checking. +# NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". +# We try a few techniques and use that to set a single cache variable. +# +# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was +# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular +# dependency, and given that the user is not expected to run this macro, +# just rely on AC_PROG_CC. +AC_DEFUN([_AM_DEPENDENCIES], +[AC_REQUIRE([AM_SET_DEPDIR])dnl +AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl +AC_REQUIRE([AM_MAKE_INCLUDE])dnl +AC_REQUIRE([AM_DEP_TRACK])dnl + +m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], + [$1], [CXX], [depcc="$CXX" am_compiler_list=], + [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], + [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], + [$1], [UPC], [depcc="$UPC" am_compiler_list=], + [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], + [depcc="$$1" am_compiler_list=]) + +AC_CACHE_CHECK([dependency style of $depcc], + [am_cv_$1_dependencies_compiler_type], +[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_$1_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` + fi + am__universal=false + m4_case([$1], [CC], + [case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac], + [CXX], + [case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac]) + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_$1_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_$1_dependencies_compiler_type=none +fi +]) +AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) +AM_CONDITIONAL([am__fastdep$1], [ + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) +]) + + +# AM_SET_DEPDIR +# ------------- +# Choose a directory name for dependency files. +# This macro is AC_REQUIREd in _AM_DEPENDENCIES. +AC_DEFUN([AM_SET_DEPDIR], +[AC_REQUIRE([AM_SET_LEADING_DOT])dnl +AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl +]) + + +# AM_DEP_TRACK +# ------------ +AC_DEFUN([AM_DEP_TRACK], +[AC_ARG_ENABLE([dependency-tracking], [dnl +AS_HELP_STRING( + [--enable-dependency-tracking], + [do not reject slow dependency extractors]) +AS_HELP_STRING( + [--disable-dependency-tracking], + [speeds up one-time build])]) +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi +AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) +AC_SUBST([AMDEPBACKSLASH])dnl +_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl +AC_SUBST([am__nodep])dnl +_AM_SUBST_NOTMAKE([am__nodep])dnl +]) + +# Generate code to set up dependency tracking. -*- Autoconf -*- + +# Copyright (C) 1999-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_OUTPUT_DEPENDENCY_COMMANDS +# ------------------------------ +AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], +[{ + # Older Autoconf quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + # TODO: see whether this extra hack can be removed once we start + # requiring Autoconf 2.70 or later. + AS_CASE([$CONFIG_FILES], + [*\'*], [eval set x "$CONFIG_FILES"], + [*], [set x $CONFIG_FILES]) + shift + # Used to flag and report bootstrapping failures. + am_rc=0 + for am_mf + do + # Strip MF so we end up with the name of the file. + am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile which includes + # dependency-tracking related rules and includes. + # Grep'ing the whole file directly is not great: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ + || continue + am_dirpart=`AS_DIRNAME(["$am_mf"])` + am_filepart=`AS_BASENAME(["$am_mf"])` + AM_RUN_LOG([cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles]) || am_rc=$? + done + if test $am_rc -ne 0; then + AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments + for automatic dependency tracking. If GNU make was not used, consider + re-running the configure script with MAKE="gmake" (or whatever is + necessary). You can also try re-running configure with the + '--disable-dependency-tracking' option to at least be able to build + the package (albeit without support for automatic dependency tracking).]) + fi + AS_UNSET([am_dirpart]) + AS_UNSET([am_filepart]) + AS_UNSET([am_mf]) + AS_UNSET([am_rc]) + rm -f conftest-deps.mk +} +])# _AM_OUTPUT_DEPENDENCY_COMMANDS + + +# AM_OUTPUT_DEPENDENCY_COMMANDS +# ----------------------------- +# This macro should only be invoked once -- use via AC_REQUIRE. +# +# This code is only required when automatic dependency tracking is enabled. +# This creates each '.Po' and '.Plo' makefile fragment that we'll need in +# order to bootstrap the dependency handling code. +AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], +[AC_CONFIG_COMMANDS([depfiles], + [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], + [AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])]) + +# Do all the work for Automake. -*- Autoconf -*- + +# Copyright (C) 1996-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This macro actually does too much. Some checks are only needed if +# your package does certain things. But this isn't really a big deal. + +dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. +m4_define([AC_PROG_CC], +m4_defn([AC_PROG_CC]) +[_AM_PROG_CC_C_O +]) + +# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) +# AM_INIT_AUTOMAKE([OPTIONS]) +# ----------------------------------------------- +# The call with PACKAGE and VERSION arguments is the old style +# call (pre autoconf-2.50), which is being phased out. PACKAGE +# and VERSION should now be passed to AC_INIT and removed from +# the call to AM_INIT_AUTOMAKE. +# We support both call styles for the transition. After +# the next Automake release, Autoconf can make the AC_INIT +# arguments mandatory, and then we can depend on a new Autoconf +# release and drop the old call support. +AC_DEFUN([AM_INIT_AUTOMAKE], +[AC_PREREQ([2.65])dnl +dnl Autoconf wants to disallow AM_ names. We explicitly allow +dnl the ones we care about. +m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl +AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl +AC_REQUIRE([AC_PROG_INSTALL])dnl +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi +AC_SUBST([CYGPATH_W]) + +# Define the identity of the package. +dnl Distinguish between old-style and new-style calls. +m4_ifval([$2], +[AC_DIAGNOSE([obsolete], + [$0: two- and three-arguments forms are deprecated.]) +m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl + AC_SUBST([PACKAGE], [$1])dnl + AC_SUBST([VERSION], [$2])], +[_AM_SET_OPTIONS([$1])dnl +dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. +m4_if( + m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]), + [ok:ok],, + [m4_fatal([AC_INIT should be called with package and version arguments])])dnl + AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl + AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl + +_AM_IF_OPTION([no-define],, +[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) + AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl + +# Some tools Automake needs. +AC_REQUIRE([AM_SANITY_CHECK])dnl +AC_REQUIRE([AC_ARG_PROGRAM])dnl +AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) +AM_MISSING_PROG([AUTOCONF], [autoconf]) +AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) +AM_MISSING_PROG([AUTOHEADER], [autoheader]) +AM_MISSING_PROG([MAKEINFO], [makeinfo]) +AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl +AC_REQUIRE([AC_PROG_MKDIR_P])dnl +# For better backward compatibility. To be removed once Automake 1.9.x +# dies out for good. For more background, see: +# +# +AC_SUBST([mkdir_p], ['$(MKDIR_P)']) +# We need awk for the "check" target (and possibly the TAP driver). The +# system "awk" is bad on some platforms. +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([AC_PROG_MAKE_SET])dnl +AC_REQUIRE([AM_SET_LEADING_DOT])dnl +_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], + [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], + [_AM_PROG_TAR([v7])])]) +_AM_IF_OPTION([no-dependencies],, +[AC_PROVIDE_IFELSE([AC_PROG_CC], + [_AM_DEPENDENCIES([CC])], + [m4_define([AC_PROG_CC], + m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl +AC_PROVIDE_IFELSE([AC_PROG_CXX], + [_AM_DEPENDENCIES([CXX])], + [m4_define([AC_PROG_CXX], + m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl +AC_PROVIDE_IFELSE([AC_PROG_OBJC], + [_AM_DEPENDENCIES([OBJC])], + [m4_define([AC_PROG_OBJC], + m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl +AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], + [_AM_DEPENDENCIES([OBJCXX])], + [m4_define([AC_PROG_OBJCXX], + m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl +]) +# Variables for tags utilities; see am/tags.am +if test -z "$CTAGS"; then + CTAGS=ctags +fi +AC_SUBST([CTAGS]) +if test -z "$ETAGS"; then + ETAGS=etags +fi +AC_SUBST([ETAGS]) +if test -z "$CSCOPE"; then + CSCOPE=cscope +fi +AC_SUBST([CSCOPE]) + +AC_REQUIRE([AM_SILENT_RULES])dnl +dnl The testsuite driver may need to know about EXEEXT, so add the +dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This +dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. +AC_CONFIG_COMMANDS_PRE(dnl +[m4_provide_if([_AM_COMPILER_EXEEXT], + [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl + +# POSIX will say in a future version that running "rm -f" with no argument +# is OK; and we want to be able to make that assumption in our Makefile +# recipes. So use an aggressive probe to check that the usage we want is +# actually supported "in the wild" to an acceptable degree. +# See automake bug#10828. +# To make any issue more visible, cause the running configure to be aborted +# by default if the 'rm' program in use doesn't match our expectations; the +# user can still override this though. +if rm -f && rm -fr && rm -rf; then : OK; else + cat >&2 <<'END' +Oops! + +Your 'rm' program seems unable to run without file operands specified +on the command line, even when the '-f' option is present. This is contrary +to the behaviour of most rm programs out there, and not conforming with +the upcoming POSIX standard: + +Please tell bug-automake@gnu.org about your system, including the value +of your $PATH and any error possibly output before this message. This +can help us improve future automake versions. + +END + if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then + echo 'Configuration will proceed anyway, since you have set the' >&2 + echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 + echo >&2 + else + cat >&2 <<'END' +Aborting the configuration process, to ensure you take notice of the issue. + +You can download and install GNU coreutils to get an 'rm' implementation +that behaves properly: . + +If you want to complete the configuration process using your problematic +'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM +to "yes", and re-run configure. + +END + AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) + fi +fi +dnl The trailing newline in this macro's definition is deliberate, for +dnl backward compatibility and to allow trailing 'dnl'-style comments +dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841. +]) + +dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not +dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further +dnl mangled by Autoconf and run in a shell conditional statement. +m4_define([_AC_COMPILER_EXEEXT], +m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) + +# When config.status generates a header, we must update the stamp-h file. +# This file resides in the same directory as the config header +# that is generated. The stamp files are numbered to have different names. + +# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the +# loop where config.status creates the headers, so we can generate +# our stamp files there. +AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], +[# Compute $1's index in $config_headers. +_am_arg=$1 +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_INSTALL_SH +# ------------------ +# Define $install_sh. +AC_DEFUN([AM_PROG_INSTALL_SH], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +if test x"${install_sh+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi +AC_SUBST([install_sh])]) + +# Copyright (C) 2003-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# Check whether the underlying file-system supports filenames +# with a leading dot. For instance MS-DOS doesn't. +AC_DEFUN([AM_SET_LEADING_DOT], +[rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null +AC_SUBST([am__leading_dot])]) + +# Add --enable-maintainer-mode option to configure. -*- Autoconf -*- +# From Jim Meyering + +# Copyright (C) 1996-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_MAINTAINER_MODE([DEFAULT-MODE]) +# ---------------------------------- +# Control maintainer-specific portions of Makefiles. +# Default is to disable them, unless 'enable' is passed literally. +# For symmetry, 'disable' may be passed as well. Anyway, the user +# can override the default with the --enable/--disable switch. +AC_DEFUN([AM_MAINTAINER_MODE], +[m4_case(m4_default([$1], [disable]), + [enable], [m4_define([am_maintainer_other], [disable])], + [disable], [m4_define([am_maintainer_other], [enable])], + [m4_define([am_maintainer_other], [enable]) + m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])]) +AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles]) + dnl maintainer-mode's default is 'disable' unless 'enable' is passed + AC_ARG_ENABLE([maintainer-mode], + [AS_HELP_STRING([--]am_maintainer_other[-maintainer-mode], + am_maintainer_other[ make rules and dependencies not useful + (and sometimes confusing) to the casual installer])], + [USE_MAINTAINER_MODE=$enableval], + [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes])) + AC_MSG_RESULT([$USE_MAINTAINER_MODE]) + AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes]) + MAINT=$MAINTAINER_MODE_TRUE + AC_SUBST([MAINT])dnl +] +) + +# Check to see how 'make' treats includes. -*- Autoconf -*- + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_MAKE_INCLUDE() +# ----------------- +# Check whether make has an 'include' directive that can support all +# the idioms we need for our automatic dependency tracking code. +AC_DEFUN([AM_MAKE_INCLUDE], +[AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive]) +cat > confinc.mk << 'END' +am__doit: + @echo this is the am__doit target >confinc.out +.PHONY: am__doit +END +am__include="#" +am__quote= +# BSD make does it like this. +echo '.include "confinc.mk" # ignored' > confmf.BSD +# Other make implementations (GNU, Solaris 10, AIX) do it like this. +echo 'include confinc.mk # ignored' > confmf.GNU +_am_result=no +for s in GNU BSD; do + AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out]) + AS_CASE([$?:`cat confinc.out 2>/dev/null`], + ['0:this is the am__doit target'], + [AS_CASE([$s], + [BSD], [am__include='.include' am__quote='"'], + [am__include='include' am__quote=''])]) + if test "$am__include" != "#"; then + _am_result="yes ($s style)" + break + fi +done +rm -f confinc.* confmf.* +AC_MSG_RESULT([${_am_result}]) +AC_SUBST([am__include])]) +AC_SUBST([am__quote])]) + +# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- + +# Copyright (C) 1997-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_MISSING_PROG(NAME, PROGRAM) +# ------------------------------ +AC_DEFUN([AM_MISSING_PROG], +[AC_REQUIRE([AM_MISSING_HAS_RUN]) +$1=${$1-"${am_missing_run}$2"} +AC_SUBST($1)]) + +# AM_MISSING_HAS_RUN +# ------------------ +# Define MISSING if not defined so far and test if it is modern enough. +# If it is, set am_missing_run to use it, otherwise, to nothing. +AC_DEFUN([AM_MISSING_HAS_RUN], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +AC_REQUIRE_AUX_FILE([missing])dnl +if test x"${MISSING+set}" != xset; then + MISSING="\${SHELL} '$am_aux_dir/missing'" +fi +# Use eval to expand $SHELL +if eval "$MISSING --is-lightweight"; then + am_missing_run="$MISSING " +else + am_missing_run= + AC_MSG_WARN(['missing' script is too old or missing]) +fi +]) + +# Helper functions for option handling. -*- Autoconf -*- + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_MANGLE_OPTION(NAME) +# ----------------------- +AC_DEFUN([_AM_MANGLE_OPTION], +[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) + +# _AM_SET_OPTION(NAME) +# -------------------- +# Set option NAME. Presently that only means defining a flag for this option. +AC_DEFUN([_AM_SET_OPTION], +[m4_define(_AM_MANGLE_OPTION([$1]), [1])]) + +# _AM_SET_OPTIONS(OPTIONS) +# ------------------------ +# OPTIONS is a space-separated list of Automake options. +AC_DEFUN([_AM_SET_OPTIONS], +[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) + +# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) +# ------------------------------------------- +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +AC_DEFUN([_AM_IF_OPTION], +[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) + +# Copyright (C) 1999-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_PROG_CC_C_O +# --------------- +# Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC +# to automatically call this. +AC_DEFUN([_AM_PROG_CC_C_O], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +AC_REQUIRE_AUX_FILE([compile])dnl +AC_LANG_PUSH([C])dnl +AC_CACHE_CHECK( + [whether $CC understands -c and -o together], + [am_cv_prog_cc_c_o], + [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) + # Make sure it works both with $CC and with simple cc. + # Following AC_PROG_CC_C_O, we do the test twice because some + # compilers refuse to overwrite an existing .o file with -o, + # though they will create one. + am_cv_prog_cc_c_o=yes + for am_i in 1 2; do + if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \ + && test -f conftest2.$ac_objext; then + : OK + else + am_cv_prog_cc_c_o=no + break + fi + done + rm -f core conftest* + unset am_i]) +if test "$am_cv_prog_cc_c_o" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__CC in this case, + # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" + CC="$am_aux_dir/compile $CC" +fi +AC_LANG_POP([C])]) + +# For backward compatibility. +AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_RUN_LOG(COMMAND) +# ------------------- +# Run COMMAND, save the exit status in ac_status, and log it. +# (This has been adapted from Autoconf's _AC_RUN_LOG macro.) +AC_DEFUN([AM_RUN_LOG], +[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD + ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + (exit $ac_status); }]) + +# Check to make sure that the build environment is sane. -*- Autoconf -*- + +# Copyright (C) 1996-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_SANITY_CHECK +# --------------- +AC_DEFUN([AM_SANITY_CHECK], +[AC_MSG_CHECKING([whether build environment is sane]) +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[[\\\"\#\$\&\'\`$am_lf]]*) + AC_MSG_ERROR([unsafe absolute working directory name]);; +esac +case $srcdir in + *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) + AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; +esac + +# Do 'set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + am_has_slept=no + for am_try in 1 2; do + echo "timestamp, slept: $am_has_slept" > conftest.file + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$[*]" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + if test "$[*]" != "X $srcdir/configure conftest.file" \ + && test "$[*]" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken + alias in your environment]) + fi + if test "$[2]" = conftest.file || test $am_try -eq 2; then + break + fi + # Just in case. + sleep 1 + am_has_slept=yes + done + test "$[2]" = conftest.file + ) +then + # Ok. + : +else + AC_MSG_ERROR([newly created file is older than distributed files! +Check your system clock]) +fi +AC_MSG_RESULT([yes]) +# If we didn't sleep, we still need to ensure time stamps of config.status and +# generated files are strictly newer. +am_sleep_pid= +if grep 'slept: no' conftest.file >/dev/null 2>&1; then + ( sleep 1 ) & + am_sleep_pid=$! +fi +AC_CONFIG_COMMANDS_PRE( + [AC_MSG_CHECKING([that generated files are newer than configure]) + if test -n "$am_sleep_pid"; then + # Hide warnings about reused PIDs. + wait $am_sleep_pid 2>/dev/null + fi + AC_MSG_RESULT([done])]) +rm -f conftest.file +]) + +# Copyright (C) 2009-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_SILENT_RULES([DEFAULT]) +# -------------------------- +# Enable less verbose build rules; with the default set to DEFAULT +# ("yes" being less verbose, "no" or empty being verbose). +AC_DEFUN([AM_SILENT_RULES], +[AC_ARG_ENABLE([silent-rules], [dnl +AS_HELP_STRING( + [--enable-silent-rules], + [less verbose build output (undo: "make V=1")]) +AS_HELP_STRING( + [--disable-silent-rules], + [verbose build output (undo: "make V=0")])dnl +]) +case $enable_silent_rules in @%:@ ((( + yes) AM_DEFAULT_VERBOSITY=0;; + no) AM_DEFAULT_VERBOSITY=1;; + *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; +esac +dnl +dnl A few 'make' implementations (e.g., NonStop OS and NextStep) +dnl do not support nested variable expansions. +dnl See automake bug#9928 and bug#10237. +am_make=${MAKE-make} +AC_CACHE_CHECK([whether $am_make supports nested variables], + [am_cv_make_support_nested_variables], + [if AS_ECHO([['TRUE=$(BAR$(V)) +BAR0=false +BAR1=true +V=1 +am__doit: + @$(TRUE) +.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then + am_cv_make_support_nested_variables=yes +else + am_cv_make_support_nested_variables=no +fi]) +if test $am_cv_make_support_nested_variables = yes; then + dnl Using '$V' instead of '$(V)' breaks IRIX make. + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +else + AM_V=$AM_DEFAULT_VERBOSITY + AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +fi +AC_SUBST([AM_V])dnl +AM_SUBST_NOTMAKE([AM_V])dnl +AC_SUBST([AM_DEFAULT_V])dnl +AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl +AC_SUBST([AM_DEFAULT_VERBOSITY])dnl +AM_BACKSLASH='\' +AC_SUBST([AM_BACKSLASH])dnl +_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl +]) + +# Copyright (C) 2001-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# AM_PROG_INSTALL_STRIP +# --------------------- +# One issue with vendor 'install' (even GNU) is that you can't +# specify the program used to strip binaries. This is especially +# annoying in cross-compiling environments, where the build's strip +# is unlikely to handle the host's binaries. +# Fortunately install-sh will honor a STRIPPROG variable, so we +# always use install-sh in "make install-strip", and initialize +# STRIPPROG with the value of the STRIP variable (set by the user). +AC_DEFUN([AM_PROG_INSTALL_STRIP], +[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +# Installed binaries are usually stripped using 'strip' when the user +# run "make install-strip". However 'strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the 'STRIP' environment variable to overrule this program. +dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. +if test "$cross_compiling" != no; then + AC_CHECK_TOOL([STRIP], [strip], :) +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" +AC_SUBST([INSTALL_STRIP_PROGRAM])]) + +# Copyright (C) 2006-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_SUBST_NOTMAKE(VARIABLE) +# --------------------------- +# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. +# This macro is traced by Automake. +AC_DEFUN([_AM_SUBST_NOTMAKE]) + +# AM_SUBST_NOTMAKE(VARIABLE) +# -------------------------- +# Public sister of _AM_SUBST_NOTMAKE. +AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) + +# Check how to create a tarball. -*- Autoconf -*- + +# Copyright (C) 2004-2021 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# _AM_PROG_TAR(FORMAT) +# -------------------- +# Check how to create a tarball in format FORMAT. +# FORMAT should be one of 'v7', 'ustar', or 'pax'. +# +# Substitute a variable $(am__tar) that is a command +# writing to stdout a FORMAT-tarball containing the directory +# $tardir. +# tardir=directory && $(am__tar) > result.tar +# +# Substitute a variable $(am__untar) that extract such +# a tarball read from stdin. +# $(am__untar) < result.tar +# +AC_DEFUN([_AM_PROG_TAR], +[# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AC_SUBST([AMTAR], ['$${TAR-tar}']) + +# We'll loop over all known methods to create a tar archive until one works. +_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' + +m4_if([$1], [v7], + [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], + + [m4_case([$1], + [ustar], + [# The POSIX 1988 'ustar' format is defined with fixed-size fields. + # There is notably a 21 bits limit for the UID and the GID. In fact, + # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 + # and bug#13588). + am_max_uid=2097151 # 2^21 - 1 + am_max_gid=$am_max_uid + # The $UID and $GID variables are not portable, so we need to resort + # to the POSIX-mandated id(1) utility. Errors in the 'id' calls + # below are definitely unexpected, so allow the users to see them + # (that is, avoid stderr redirection). + am_uid=`id -u || echo unknown` + am_gid=`id -g || echo unknown` + AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) + if test $am_uid -le $am_max_uid; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + _am_tools=none + fi + AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) + if test $am_gid -le $am_max_gid; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + _am_tools=none + fi], + + [pax], + [], + + [m4_fatal([Unknown tar format])]) + + AC_MSG_CHECKING([how to create a $1 tar archive]) + + # Go ahead even if we have the value already cached. We do so because we + # need to set the values for the 'am__tar' and 'am__untar' variables. + _am_tools=${am_cv_prog_tar_$1-$_am_tools} + + for _am_tool in $_am_tools; do + case $_am_tool in + gnutar) + for _am_tar in tar gnutar gtar; do + AM_RUN_LOG([$_am_tar --version]) && break + done + am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' + am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' + am__untar="$_am_tar -xf -" + ;; + plaintar) + # Must skip GNU tar: if it does not support --format= it doesn't create + # ustar tarball either. + (tar --version) >/dev/null 2>&1 && continue + am__tar='tar chf - "$$tardir"' + am__tar_='tar chf - "$tardir"' + am__untar='tar xf -' + ;; + pax) + am__tar='pax -L -x $1 -w "$$tardir"' + am__tar_='pax -L -x $1 -w "$tardir"' + am__untar='pax -r' + ;; + cpio) + am__tar='find "$$tardir" -print | cpio -o -H $1 -L' + am__tar_='find "$tardir" -print | cpio -o -H $1 -L' + am__untar='cpio -i -H $1 -d' + ;; + none) + am__tar=false + am__tar_=false + am__untar=false + ;; + esac + + # If the value was cached, stop now. We just wanted to have am__tar + # and am__untar set. + test -n "${am_cv_prog_tar_$1}" && break + + # tar/untar a dummy directory, and stop if the command works. + rm -rf conftest.dir + mkdir conftest.dir + echo GrepMe > conftest.dir/file + AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) + rm -rf conftest.dir + if test -s conftest.tar; then + AM_RUN_LOG([$am__untar /dev/null 2>&1 && break + fi + done + rm -rf conftest.dir + + AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) + AC_MSG_RESULT([$am_cv_prog_tar_$1])]) + +AC_SUBST([am__tar]) +AC_SUBST([am__untar]) +]) # _AM_PROG_TAR + +m4_include([config/m4/libtool.m4]) +m4_include([config/m4/ltoptions.m4]) +m4_include([config/m4/ltsugar.m4]) +m4_include([config/m4/ltversion.m4]) +m4_include([config/m4/lt~obsolete.m4]) diff --git a/ext/libpqxx-7.7.3/appveyor.yml b/ext/libpqxx-7.7.3/appveyor.yml new file mode 100644 index 000000000..110bd75ad --- /dev/null +++ b/ext/libpqxx-7.7.3/appveyor.yml @@ -0,0 +1,30 @@ +# Configuration for test runs in Appveyor. +version: 1.0.{build} +image: Visual Studio 2022 +services: postgresql13 +# Run CMake to build libpqxx.sln. +before_build: +- cmd: >- + call "C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvars64.bat" + + cmake -DBUILD_SHARED_LIBS=1 -DCMAKE_CXX_STANDARD=23 +configuration: Release +build: + parallel: true + project: libpqxx.sln +test_script: +- ps: >- + $env:Path += ";.\src\Release;C:\Program Files\PostgreSQL\13\bin" + + $env:PGUSER = "postgres" + + $env:PGPASSWORD = "Password12!" + + .\test\Release\runner.exe +notifications: +- provider: Email + subject: 'libpqxx: AppVeyor build failure' + message: The libpqxx AppVeyor build has failed. + on_build_success: false + on_build_failure: true + on_build_status_changed: false diff --git a/ext/libpqxx-7.7.3/autogen.sh b/ext/libpqxx-7.7.3/autogen.sh new file mode 100755 index 000000000..a09cb5ae7 --- /dev/null +++ b/ext/libpqxx-7.7.3/autogen.sh @@ -0,0 +1,44 @@ +#! /bin/sh +# Run this to generate the configure script etc. + +set -eu + +PQXXVERSION=$(./tools/extract_version) +PQXX_ABI=$(./tools/extract_version --abi) +PQXX_MAJOR=$(./tools/extract_version --major) +PQXX_MINOR=$(./tools/extract_version --minor) +echo "libpqxx version $PQXXVERSION" +echo "libpqxx ABI version $PQXX_ABI" + +substitute() { + sed -e "s/@PQXXVERSION@/$PQXXVERSION/g" \ + -e "s/@PQXX_MAJOR@/$PQXX_MAJOR/g" \ + -e "s/@PQXX_MINOR@/$PQXX_MINOR/g" \ + -e "s/@PQXX_ABI@/$PQXX_ABI/g" \ + "$1" +} + + +# Use templating system to generate various Makefiles. +expand_templates() { + for template in "$@" + do + ./tools/template2mak.py "$template" "${template%.template}" + done +} + + +# We have two kinds of templates. One uses our custom templating tool. And +# a few others simply have some substitutions done. +expand_templates $(find -name \*.template) +substitute include/pqxx/version.hxx.template >include/pqxx/version.hxx +substitute include/pqxx/doc/mainpage.md.template >include/pqxx/doc/mainpage.md + + +autoheader +libtoolize --force --automake --copy +aclocal -I . -I config/m4 +automake --add-missing --copy +autoconf + +echo "Done." diff --git a/ext/libpqxx-7.7.3/cmake/config.cmake b/ext/libpqxx-7.7.3/cmake/config.cmake new file mode 100644 index 000000000..816ee4474 --- /dev/null +++ b/ext/libpqxx-7.7.3/cmake/config.cmake @@ -0,0 +1,157 @@ +function(detect_code_compiled code macro msg) + message(STATUS "Detecting ${msg}") + check_cxx_source_compiles("${code}" "${macro}" FAIL_REGEX "warning") + if(${macro}) + message(STATUS "Detecting ${msg} - supported") + else() + message(STATUS "Detecting ${msg} - not supported") + endif() +endfunction(detect_code_compiled) + +include(CheckIncludeFileCXX) +include(CheckFunctionExists) +include(CheckSymbolExists) +include(CMakeDetermineCompileFeatures) +include(CheckCXXSourceCompiles) +include(CMakeFindDependencyMacro) + +if(NOT PostgreSQL_FOUND) + if(POLICY CMP0074) + cmake_policy(PUSH) + # CMP0074 is `OLD` by `cmake_minimum_required(VERSION 3.7)`, + # sets `NEW` to enable support CMake variable `PostgreSQL_ROOT`. + cmake_policy(SET CMP0074 NEW) + endif() + + find_package(PostgreSQL REQUIRED) + + if(POLICY CMP0074) + cmake_policy(POP) + endif() +endif() + +check_function_exists("poll" PQXX_HAVE_POLL) + +set(CMAKE_REQUIRED_LIBRARIES pq) +check_symbol_exists( + PQencryptPasswordConn + "${PostgreSQL_INCLUDE_DIR}/libpq-fe.h" + PQXX_HAVE_PQENCRYPTPASSWORDCONN) +check_symbol_exists( + PQenterPipelineMode + "${PostgreSQL_INCLUDE_DIR}/libpq-fe.h" + PQXX_HAVE_PQ_PIPELINE) + +cmake_determine_compile_features(CXX) +cmake_policy(SET CMP0057 NEW) + +# check_cxx_source_compiles requires CMAKE_REQUIRED_DEFINITIONS to specify +# compiling arguments. +# Wordaround: Push CMAKE_REQUIRED_DEFINITIONS +if(CMAKE_REQUIRED_DEFINITIONS) + set(def "${CMAKE_REQUIRED_DEFINITIONS}") +endif() +set(CMAKE_REQUIRED_DEFINITIONS ${CMAKE_CXX${CMAKE_CXX_STANDARD}_STANDARD_COMPILE_OPTION}) +set(CMAKE_REQUIRED_QUIET ON) + +try_compile( + PQXX_HAVE_GCC_PURE + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/gcc_pure.cxx) +try_compile( + PQXX_HAVE_GCC_VISIBILITY + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/gcc_visibility.cxx) +try_compile( + PQXX_HAVE_LIKELY + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/likely.cxx) +try_compile( + PQXX_HAVE_CXA_DEMANGLE + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/cxa_demangle.cxx) +try_compile( + PQXX_HAVE_CONCEPTS + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/concepts.cxx) +try_compile( + PQXX_HAVE_SPAN + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/span.cxx) +try_compile( + PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/multidim-subscript.cxx) +try_compile( + PQXX_HAVE_CHARCONV_FLOAT + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/charconv_float.cxx) +try_compile( + PQXX_HAVE_CHARCONV_INT + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/charconv_int.cxx) +try_compile( + PQXX_HAVE_PATH + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/fs.cxx) +try_compile( + PQXX_HAVE_THREAD_LOCAL + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/thread_local.cxx) +try_compile( + PQXX_HAVE_SLEEP_FOR + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/sleep_for.cxx) +try_compile( + PQXX_HAVE_STRERROR_R + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/strerror_r.cxx) +try_compile( + PQXX_HAVE_STRERROR_S + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/strerror_s.cxx) +try_compile( + PQXX_HAVE_YEAR_MONTH_DAY + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/year_month_day.cxx) +try_compile( + PQXX_HAVE_CMP + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/cmp.cxx) + +try_compile( + need_fslib + ${PROJECT_BINARY_DIR} + SOURCES ${PROJECT_SOURCE_DIR}/config-tests/need_fslib.cxx) +if(!need_fslib) + # TODO: This may work for gcc 8, but some clang versions may need -lc++fs. + link_libraries(stdc++fs) +endif() + +# check_cxx_source_compiles requires CMAKE_REQUIRED_DEFINITIONS to specify +# compiling arguments. +# Workaround: Pop CMAKE_REQUIRED_DEFINITIONS +if(def) + set(CMAKE_REQUIRED_DEFINITIONS ${def}) + unset(def CACHE) +else() + unset(CMAKE_REQUIRED_DEFINITIONS CACHE) +endif() +set(CMAKE_REQUIRED_QUIET OFF) + +set(AC_CONFIG_H_IN "${PROJECT_SOURCE_DIR}/include/pqxx/config.h.in") +set(CM_CONFIG_H_IN "${PROJECT_BINARY_DIR}/include/pqxx/config_cmake.h.in") +set(CM_CONFIG_PUB "${PROJECT_BINARY_DIR}/include/pqxx/config-public-compiler.h") +set(CM_CONFIG_INT "${PROJECT_BINARY_DIR}/include/pqxx/config-internal-compiler.h") +set(CM_CONFIG_PQ "${PROJECT_BINARY_DIR}/include/pqxx/config-internal-libpq.h") +message(STATUS "Generating config.h") +file(WRITE "${CM_CONFIG_H_IN}" "") +file(STRINGS "${AC_CONFIG_H_IN}" lines) +foreach(line ${lines}) + string(REGEX REPLACE "^#undef" "#cmakedefine" l "${line}") + file(APPEND "${CM_CONFIG_H_IN}" "${l}\n") +endforeach() +configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_INT}" @ONLY) +configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_PUB}" @ONLY) +configure_file("${CM_CONFIG_H_IN}" "${CM_CONFIG_PQ}" @ONLY) +message(STATUS "Generating config.h - done") diff --git a/ext/libpqxx-7.7.3/cmake/libpqxx-config.cmake b/ext/libpqxx-7.7.3/cmake/libpqxx-config.cmake new file mode 100644 index 000000000..cb25a05f2 --- /dev/null +++ b/ext/libpqxx-7.7.3/cmake/libpqxx-config.cmake @@ -0,0 +1,4 @@ +include(CMakeFindDependencyMacro) +find_dependency(PostgreSQL) + +include("${CMAKE_CURRENT_LIST_DIR}/libpqxx-targets.cmake") diff --git a/ext/libpqxx-7.7.3/compile_flags.in b/ext/libpqxx-7.7.3/compile_flags.in new file mode 100644 index 000000000..1304e8aa8 --- /dev/null +++ b/ext/libpqxx-7.7.3/compile_flags.in @@ -0,0 +1 @@ +@CPPFLAGS@ @CXXFLAGS@ diff --git a/ext/libpqxx-7.7.3/config-tests/README.md b/ext/libpqxx-7.7.3/config-tests/README.md new file mode 100644 index 000000000..b3865d0f0 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/README.md @@ -0,0 +1,22 @@ +Configuration tests +=================== + +Libpqxx comes with support for different build systems: the GNU autotools, +CMake, Visual Studio's "nmake", and raw GNU "make" on Windows. + +For several of these build systems, we need to test things like "does this +compiler environment support `std::to_chars` for floating-point types?" + +We test these things by trying to compile a particular snippet of code, and +seeing whether that succeeds. + +To avoid duplicating those snippets for multiple build systems, we put them +here. Both the autotools configuration and the CMake configuration can refer to +them that way. + +It took a bit of nasty magic to read a C++ source file into m4 and treat it as +a string literal, without macro expansion. There is every chance that I missed +something, so be prepared for tests failing for unexpected reasons! Some C++ +syntax may end up having an unforeseen meaning in m4, and screw up the handling +of the code snippet. Re-configure, and read your logs carefully after editing +these snippets. diff --git a/ext/libpqxx-7.7.3/config-tests/charconv_float.cxx b/ext/libpqxx-7.7.3/config-tests/charconv_float.cxx new file mode 100644 index 000000000..bc5d973e3 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/charconv_float.cxx @@ -0,0 +1,16 @@ +// Test for std::to_string/std::from_string for floating-point types. +#include +#include + +int main() +{ + char z[100]; + auto rt = std::to_chars(std::begin(z), std::end(z), 3.14159L); + if (rt.ec != std::errc{}) + return 1; + long double n; + auto rf = std::from_chars(std::cbegin(z), std::cend(z), n); + if (rf.ec != std::errc{}) + return 2; + return (n > 3 and n < 4) ? 0 : 1; +} diff --git a/ext/libpqxx-7.7.3/config-tests/charconv_int.cxx b/ext/libpqxx-7.7.3/config-tests/charconv_int.cxx new file mode 100644 index 000000000..076ee0de3 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/charconv_int.cxx @@ -0,0 +1,16 @@ +// Test for std::to_string/std::from_string for integral types. +#include +#include + +int main() +{ + char z[100]; + auto rt = std::to_chars(std::begin(z), std::end(z), 9ULL); + if (rt.ec != std::errc{}) + return 1; + unsigned long long n; + auto rf = std::from_chars(std::cbegin(z), std::cend(z), n); + if (rf.ec != std::errc{}) + return 2; + return (n == 9ULL) ? 0 : 1; +} diff --git a/ext/libpqxx-7.7.3/config-tests/cmp.cxx b/ext/libpqxx-7.7.3/config-tests/cmp.cxx new file mode 100644 index 000000000..7f5abaa5d --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/cmp.cxx @@ -0,0 +1,8 @@ +// Test for C++20 std::cmp_greater etc. support. +#include + + +int main() +{ + return std::cmp_greater(-1, 2u) && std::cmp_less_equal(3, 0); +} diff --git a/ext/libpqxx-7.7.3/config-tests/concepts.cxx b/ext/libpqxx-7.7.3/config-tests/concepts.cxx new file mode 100644 index 000000000..5589b4e58 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/concepts.cxx @@ -0,0 +1,21 @@ +// Test for concepts support. Not just the language feature; also headers. +#include +#include +#include + + +template +concept Foo = std::ranges::input_range; + + +template auto foo(F const &r) +{ + return std::distance(std::begin(r), std::end(r)); +} + + +int main() +{ + std::vector const v{1, 2, 3}; + std::cout << foo(v) << '\n'; +} diff --git a/ext/libpqxx-7.7.3/config-tests/cxa_demangle.cxx b/ext/libpqxx-7.7.3/config-tests/cxa_demangle.cxx new file mode 100644 index 000000000..4d4ee7e6b --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/cxa_demangle.cxx @@ -0,0 +1,19 @@ +// Test for cross-vendor C++ ABI's __cxa_demangle function. +#include +#include +#include +#include + +#include + +int main() +{ + int status = 0; + char *name = + abi::__cxa_demangle(typeid(10).name(), nullptr, nullptr, &status); + if (status != 0) + throw std::runtime_error("Demangle failed!"); + int result = std::strcmp(name, "int"); + std::free(name); + return result; +} diff --git a/ext/libpqxx-7.7.3/config-tests/fs.cxx b/ext/libpqxx-7.7.3/config-tests/fs.cxx new file mode 100644 index 000000000..d93d37f5d --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/fs.cxx @@ -0,0 +1,9 @@ +// Check for working std::filesystem support. +#include + + +int main() +{ + // Apparently some versions of MinGW lack this comparison operator. + return std::filesystem::path{} != std::filesystem::path{}; +} diff --git a/ext/libpqxx-7.7.3/config-tests/gcc_pure.cxx b/ext/libpqxx-7.7.3/config-tests/gcc_pure.cxx new file mode 100644 index 000000000..4edd267c6 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/gcc_pure.cxx @@ -0,0 +1,10 @@ +// Test for gcc-style "pure" attribute. +int __attribute__((pure)) f() +{ + return 0; +} + +int main() +{ + return f(); +} diff --git a/ext/libpqxx-7.7.3/config-tests/gcc_visibility.cxx b/ext/libpqxx-7.7.3/config-tests/gcc_visibility.cxx new file mode 100644 index 000000000..23d0111d7 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/gcc_visibility.cxx @@ -0,0 +1,12 @@ +// Test for gcc-style "visibility" attribute. +struct __attribute__((visibility("hidden"))) D +{ + D() {} + int f() { return 0; } +}; + +int main() +{ + D d; + return d.f(); +} diff --git a/ext/libpqxx-7.7.3/config-tests/likely.cxx b/ext/libpqxx-7.7.3/config-tests/likely.cxx new file mode 100644 index 000000000..9d0eca8d3 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/likely.cxx @@ -0,0 +1,15 @@ +// Test for C++20 [[likely]] and [[unlikely]] attributes. + +int main(int argc, char **) +{ +#if __cplusplus < 202002L + deliberately_fail(because, older, C++, standard); +#endif + + int x = 0; + if (argc == 1) [[likely]] + x = 0; + else + x = 1; + return x; +} diff --git a/ext/libpqxx-7.7.3/config-tests/multidim-subscript.cxx b/ext/libpqxx-7.7.3/config-tests/multidim-subscript.cxx new file mode 100644 index 000000000..f75d1aa7a --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/multidim-subscript.cxx @@ -0,0 +1,14 @@ +// Test for multidimensional subscript operator support. +// Proposed for C++23: P2128R6. +struct table +{ + int width = 100; + + int operator[](int x, int y) const { return x + width * y; } +}; + + +int main() +{ + return table{}[0, 0]; +} diff --git a/ext/libpqxx-7.7.3/config-tests/need_fslib.cxx b/ext/libpqxx-7.7.3/config-tests/need_fslib.cxx new file mode 100644 index 000000000..041c46b43 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/need_fslib.cxx @@ -0,0 +1,21 @@ +// Check whether we need to link to the stdc++fs library. +// +// We assume that the presence of the header means that we have +// support for the basics of std::filesystem. This check will succeed if +// either there is no header, or there is one and it works without +// any special options. If the link fails, we assume that -lstdc++fs will fix +// it for us. + +#include + +#if __has_include() +# include +#endif + + +int main() +{ +#if __has_include() + std::cout << std::filesystem::path{"foo.bar"}.c_str() << '\n'; +#endif +} diff --git a/ext/libpqxx-7.7.3/config-tests/poll.cxx b/ext/libpqxx-7.7.3/config-tests/poll.cxx new file mode 100644 index 000000000..d67cef55d --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/poll.cxx @@ -0,0 +1,7 @@ +// Test for poll(). +#include + +int main() +{ + return poll(nullptr, 0, 0); +} diff --git a/ext/libpqxx-7.7.3/config-tests/sleep_for.cxx b/ext/libpqxx-7.7.3/config-tests/sleep_for.cxx new file mode 100644 index 000000000..f081fe0e5 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/sleep_for.cxx @@ -0,0 +1,28 @@ +// Test for std::this_thread::sleep_for(). +/* For some reason MinGW's header seems to be broken. + * + * But it gets worse. It looks as if we can include without problems + * in this configuration test. Why does it break when MinGW users try to build + * the library, but succeed when we try it here? + * + * To try and get close to the situation in the library code itself, we try + * including some standard headers that we don't strictly need here. + */ + +#if __has_include() +# include +#endif + +#include +#include +#include +#include +#include + +#include +#include + +int main() +{ + std::this_thread::sleep_for(std::chrono::microseconds{10u}); +} diff --git a/ext/libpqxx-7.7.3/config-tests/span.cxx b/ext/libpqxx-7.7.3/config-tests/span.cxx new file mode 100644 index 000000000..8c13b97c9 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/span.cxx @@ -0,0 +1,8 @@ +// Test for std::span. +#include + +int main(int argc, char **argv) +{ + std::span args{argv, static_cast(argc)}; + return static_cast(std::size(args) - 1u); +} diff --git a/ext/libpqxx-7.7.3/config-tests/strerror_r.cxx b/ext/libpqxx-7.7.3/config-tests/strerror_r.cxx new file mode 100644 index 000000000..604d3b899 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/strerror_r.cxx @@ -0,0 +1,14 @@ +// Check for strerror_r. +// It can be either the POSIX version (which returns int) or the GNU version +// (which returns char *). + +#include +#include + +int main() +{ + char buffer[200]; + auto res{strerror_r(1, buffer, 200)}; + // Sidestep type differences. We don't really care what the value is. + return not not res; +} diff --git a/ext/libpqxx-7.7.3/config-tests/strerror_s.cxx b/ext/libpqxx-7.7.3/config-tests/strerror_s.cxx new file mode 100644 index 000000000..f6f99a7a5 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/strerror_s.cxx @@ -0,0 +1,11 @@ +// Test for strerror_s, as defined in Windows and C11. +// Presumably this'll be part of the C++ standard some day. + +#include + +int main() +{ + using namespace std; + char buf[200]; + return strerror_s(buf, 200, 1); +} diff --git a/ext/libpqxx-7.7.3/config-tests/thread_local.cxx b/ext/libpqxx-7.7.3/config-tests/thread_local.cxx new file mode 100644 index 000000000..b5a98b7b8 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/thread_local.cxx @@ -0,0 +1,15 @@ +// Test for std::to_string/std::from_string for floating-point types. +#include +#include + +int main(int argc, char **) +{ +#if defined(__MINGW32__) && defined(__GNUC__) +# if __GNUC__ < 11 || ((__GNUC__ == 11) && (__GNU_MINOR__ == 0)) +# error "On MinGW before gcc 11.1, thread_local breaks at run time." +# endif +#endif + thread_local std::stringstream s; + s << argc; + std::cout << s.str(); +} diff --git a/ext/libpqxx-7.7.3/config-tests/year_month_day.cxx b/ext/libpqxx-7.7.3/config-tests/year_month_day.cxx new file mode 100644 index 000000000..dbe543b55 --- /dev/null +++ b/ext/libpqxx-7.7.3/config-tests/year_month_day.cxx @@ -0,0 +1,7 @@ +// Test for std::chrono::year_month_day etc. +#include + +int main() +{ + return int(std::chrono::year{1}); +} diff --git a/ext/libpqxx-7.7.3/config/Makefile.am b/ext/libpqxx-7.7.3/config/Makefile.am new file mode 100644 index 000000000..f34b3107d --- /dev/null +++ b/ext/libpqxx-7.7.3/config/Makefile.am @@ -0,0 +1,8 @@ +EXTRA_DIST=m4/Makefile.am sample-headers +MAINTAINERCLEANFILES=Makefile.in config.guess config.sub install-sh \ + ltmain.sh missing mkinstalldirs + +dist-hook: + find "${distdir}" -type d -name CVS -print0 | xargs -0 rm -rf + find "${distdir}" -type d -name .svn -print0 | xargs -0 rm -rf + diff --git a/ext/libpqxx-7.7.3/config/Makefile.in b/ext/libpqxx-7.7.3/config/Makefile.in new file mode 100644 index 000000000..bf7aa004a --- /dev/null +++ b/ext/libpqxx-7.7.3/config/Makefile.in @@ -0,0 +1,470 @@ +# Makefile.in generated by automake 1.16.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2021 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = config +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/config/m4/libtool.m4 \ + $(top_srcdir)/config/m4/ltoptions.m4 \ + $(top_srcdir)/config/m4/ltsugar.m4 \ + $(top_srcdir)/config/m4/ltversion.m4 \ + $(top_srcdir)/config/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs +CONFIG_HEADER = $(top_builddir)/include/pqxx/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +am__DIST_COMMON = $(srcdir)/Makefile.in compile config.guess \ + config.sub install-sh ltmain.sh missing mkinstalldirs +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ETAGS = @ETAGS@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_DOT = @HAVE_DOT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR = @MKDIR@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PG_CONFIG = @PG_CONFIG@ +PKG_CONFIG = @PKG_CONFIG@ +POSTGRES_INCLUDE = @POSTGRES_INCLUDE@ +PQXXVERSION = @PQXXVERSION@ +PQXX_ABI = @PQXX_ABI@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +with_postgres_lib = @with_postgres_lib@ +EXTRA_DIST = m4/Makefile.am sample-headers +MAINTAINERCLEANFILES = Makefile.in config.guess config.sub install-sh \ + ltmain.sh missing mkinstalldirs + +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu config/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu config/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$(top_distdir)" distdir="$(distdir)" \ + dist-hook +check-am: all-am +check: check-am +all-am: Makefile +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am check check-am clean clean-generic clean-libtool \ + cscopelist-am ctags-am dist-hook distclean distclean-generic \ + distclean-libtool distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags-am uninstall uninstall-am + +.PRECIOUS: Makefile + + +dist-hook: + find "${distdir}" -type d -name CVS -print0 | xargs -0 rm -rf + find "${distdir}" -type d -name .svn -print0 | xargs -0 rm -rf + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/ext/libpqxx-7.7.3/config/compile b/ext/libpqxx-7.7.3/config/compile new file mode 100755 index 000000000..df363c8fb --- /dev/null +++ b/ext/libpqxx-7.7.3/config/compile @@ -0,0 +1,348 @@ +#! /bin/sh +# Wrapper for compilers which do not understand '-c -o'. + +scriptversion=2018-03-07.03; # UTC + +# Copyright (C) 1999-2021 Free Software Foundation, Inc. +# Written by Tom Tromey . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +nl=' +' + +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent tools from complaining about whitespace usage. +IFS=" "" $nl" + +file_conv= + +# func_file_conv build_file lazy +# Convert a $build file to $host form and store it in $file +# Currently only supports Windows hosts. If the determined conversion +# type is listed in (the comma separated) LAZY, no conversion will +# take place. +func_file_conv () +{ + file=$1 + case $file in + / | /[!/]*) # absolute file, and not a UNC file + if test -z "$file_conv"; then + # lazily determine how to convert abs files + case `uname -s` in + MINGW*) + file_conv=mingw + ;; + CYGWIN* | MSYS*) + file_conv=cygwin + ;; + *) + file_conv=wine + ;; + esac + fi + case $file_conv/,$2, in + *,$file_conv,*) + ;; + mingw/*) + file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` + ;; + cygwin/* | msys/*) + file=`cygpath -m "$file" || echo "$file"` + ;; + wine/*) + file=`winepath -w "$file" || echo "$file"` + ;; + esac + ;; + esac +} + +# func_cl_dashL linkdir +# Make cl look for libraries in LINKDIR +func_cl_dashL () +{ + func_file_conv "$1" + if test -z "$lib_path"; then + lib_path=$file + else + lib_path="$lib_path;$file" + fi + linker_opts="$linker_opts -LIBPATH:$file" +} + +# func_cl_dashl library +# Do a library search-path lookup for cl +func_cl_dashl () +{ + lib=$1 + found=no + save_IFS=$IFS + IFS=';' + for dir in $lib_path $LIB + do + IFS=$save_IFS + if $shared && test -f "$dir/$lib.dll.lib"; then + found=yes + lib=$dir/$lib.dll.lib + break + fi + if test -f "$dir/$lib.lib"; then + found=yes + lib=$dir/$lib.lib + break + fi + if test -f "$dir/lib$lib.a"; then + found=yes + lib=$dir/lib$lib.a + break + fi + done + IFS=$save_IFS + + if test "$found" != yes; then + lib=$lib.lib + fi +} + +# func_cl_wrapper cl arg... +# Adjust compile command to suit cl +func_cl_wrapper () +{ + # Assume a capable shell + lib_path= + shared=: + linker_opts= + for arg + do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + eat=1 + case $2 in + *.o | *.[oO][bB][jJ]) + func_file_conv "$2" + set x "$@" -Fo"$file" + shift + ;; + *) + func_file_conv "$2" + set x "$@" -Fe"$file" + shift + ;; + esac + ;; + -I) + eat=1 + func_file_conv "$2" mingw + set x "$@" -I"$file" + shift + ;; + -I*) + func_file_conv "${1#-I}" mingw + set x "$@" -I"$file" + shift + ;; + -l) + eat=1 + func_cl_dashl "$2" + set x "$@" "$lib" + shift + ;; + -l*) + func_cl_dashl "${1#-l}" + set x "$@" "$lib" + shift + ;; + -L) + eat=1 + func_cl_dashL "$2" + ;; + -L*) + func_cl_dashL "${1#-L}" + ;; + -static) + shared=false + ;; + -Wl,*) + arg=${1#-Wl,} + save_ifs="$IFS"; IFS=',' + for flag in $arg; do + IFS="$save_ifs" + linker_opts="$linker_opts $flag" + done + IFS="$save_ifs" + ;; + -Xlinker) + eat=1 + linker_opts="$linker_opts $2" + ;; + -*) + set x "$@" "$1" + shift + ;; + *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) + func_file_conv "$1" + set x "$@" -Tp"$file" + shift + ;; + *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) + func_file_conv "$1" mingw + set x "$@" "$file" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift + done + if test -n "$linker_opts"; then + linker_opts="-link$linker_opts" + fi + exec "$@" $linker_opts + exit 1 +} + +eat= + +case $1 in + '') + echo "$0: No command. Try '$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: compile [--help] [--version] PROGRAM [ARGS] + +Wrapper for compilers which do not understand '-c -o'. +Remove '-o dest.o' from ARGS, run PROGRAM with the remaining +arguments, and rename the output as expected. + +If you are trying to build a whole package this is not the +right script to run: please start by reading the file 'INSTALL'. + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "compile $scriptversion" + exit $? + ;; + cl | *[/\\]cl | cl.exe | *[/\\]cl.exe | \ + icl | *[/\\]icl | icl.exe | *[/\\]icl.exe ) + func_cl_wrapper "$@" # Doesn't return... + ;; +esac + +ofile= +cfile= + +for arg +do + if test -n "$eat"; then + eat= + else + case $1 in + -o) + # configure might choose to run compile as 'compile cc -o foo foo.c'. + # So we strip '-o arg' only if arg is an object. + eat=1 + case $2 in + *.o | *.obj) + ofile=$2 + ;; + *) + set x "$@" -o "$2" + shift + ;; + esac + ;; + *.c) + cfile=$1 + set x "$@" "$1" + shift + ;; + *) + set x "$@" "$1" + shift + ;; + esac + fi + shift +done + +if test -z "$ofile" || test -z "$cfile"; then + # If no '-o' option was seen then we might have been invoked from a + # pattern rule where we don't need one. That is ok -- this is a + # normal compilation that the losing compiler can handle. If no + # '.c' file was seen then we are probably linking. That is also + # ok. + exec "$@" +fi + +# Name of file we expect compiler to create. +cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` + +# Create the lock directory. +# Note: use '[/\\:.-]' here to ensure that we don't use the same name +# that we are using for the .o file. Also, base the name on the expected +# object file name, since that is what matters with a parallel build. +lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d +while true; do + if mkdir "$lockdir" >/dev/null 2>&1; then + break + fi + sleep 1 +done +# FIXME: race condition here if user kills between mkdir and trap. +trap "rmdir '$lockdir'; exit 1" 1 2 15 + +# Run the compile. +"$@" +ret=$? + +if test -f "$cofile"; then + test "$cofile" = "$ofile" || mv "$cofile" "$ofile" +elif test -f "${cofile}bj"; then + test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" +fi + +rmdir "$lockdir" +exit $ret + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/ext/libpqxx-7.7.3/config/config.guess b/ext/libpqxx-7.7.3/config/config.guess new file mode 100755 index 000000000..2e9ad7fe8 --- /dev/null +++ b/ext/libpqxx-7.7.3/config/config.guess @@ -0,0 +1,1462 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright 1992-2016 Free Software Foundation, Inc. + +timestamp='2016-10-02' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). +# +# Originally written by Per Bothner; maintained since 2000 by Ben Elliston. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# +# Please send patches to . + + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright 1992-2016 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +case "${UNAME_SYSTEM}" in +Linux|GNU|GNU/*) + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + LIBC=gnu + + eval $set_cc_for_build + cat <<-EOF > $dummy.c + #include + #if defined(__UCLIBC__) + LIBC=uclibc + #elif defined(__dietlibc__) + LIBC=dietlibc + #else + LIBC=gnu + #endif + EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + ;; +esac + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ + /sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || \ + echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + earmv*) + arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'` + endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'` + machine=${arch}${endian}-unknown + ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently (or will in the future) and ABI. + case "${UNAME_MACHINE_ARCH}" in + earm*) + os=netbsdelf + ;; + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # Determine ABI tags. + case "${UNAME_MACHINE_ARCH}" in + earm*) + expr='s/^earmv[0-9]/-eabi/;s/eb$//' + abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"` + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}${abi}" + exit ;; + *:Bitrig:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:LibertyBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:Sortix:*:*) + echo ${UNAME_MACHINE}-unknown-sortix + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE=alpha ;; + "EV4.5 (21064)") + UNAME_MACHINE=alpha ;; + "LCA4 (21066/21068)") + UNAME_MACHINE=alpha ;; + "EV5 (21164)") + UNAME_MACHINE=alphaev5 ;; + "EV5.6 (21164A)") + UNAME_MACHINE=alphaev56 ;; + "EV5.6 (21164PC)") + UNAME_MACHINE=alphapca56 ;; + "EV5.7 (21164PC)") + UNAME_MACHINE=alphapca57 ;; + "EV6 (21264)") + UNAME_MACHINE=alphaev6 ;; + "EV6.7 (21264A)") + UNAME_MACHINE=alphaev67 ;; + "EV6.8CB (21264C)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8AL (21264B)") + UNAME_MACHINE=alphaev68 ;; + "EV6.8CX (21264D)") + UNAME_MACHINE=alphaev68 ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE=alphaev69 ;; + "EV7 (21364)") + UNAME_MACHINE=alphaev7 ;; + "EV7.9 (21364A)") + UNAME_MACHINE=alphaev79 ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm*:riscos:*:*|arm*:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH=i386 + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH=x86_64 + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/lslpp ] ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 + 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH=hppa2.0n ;; + 64) HP_ARCH=hppa2.0w ;; + '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = hppa2.0w ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH=hppa2.0w + else + HP_ARCH=hppa64 + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + *) + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + esac + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW64*:*) + echo ${UNAME_MACHINE}-pc-mingw64 + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + *:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + aarch64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC=gnulibc1 ; fi + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arc:Linux:*:* | arceb:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi + else + echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-${LIBC} + exit ;; + e2k:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + k1om:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } + ;; + mips64el:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + openrisc*:Linux:*:*) + echo or1k-unknown-linux-${LIBC} + exit ;; + or32:Linux:*:* | or1k*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-${LIBC} + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-${LIBC} + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; + PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; + *) echo hppa-unknown-linux-${LIBC} ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-${LIBC} + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-${LIBC} + exit ;; + ppc64le:Linux:*:*) + echo powerpc64le-unknown-linux-${LIBC} + exit ;; + ppcle:Linux:*:*) + echo powerpcle-unknown-linux-${LIBC} + exit ;; + riscv32:Linux:*:* | riscv64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux-${LIBC} + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-${LIBC} + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-pc-linux-${LIBC} + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-${LIBC} + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configure will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + x86_64:Haiku:*:*) + echo x86_64-unknown-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + SX-ACE:SUPER-UX:*:*) + echo sxace-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + eval $set_cc_for_build + if test "$UNAME_PROCESSOR" = unknown ; then + UNAME_PROCESSOR=powerpc + fi + if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then + if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + case $UNAME_PROCESSOR in + i386) UNAME_PROCESSOR=x86_64 ;; + powerpc) UNAME_PROCESSOR=powerpc64 ;; + esac + fi + fi + elif test "$UNAME_PROCESSOR" = i386 ; then + # Avoid executing cc on OS X 10.9, as it ships with a stub + # that puts up a graphical alert prompting to install + # developer tools. Any system running Mac OS X 10.7 or + # later (Darwin 11 and later) is required to have a 64-bit + # processor. This is not true of the ARM version of Darwin + # that Apple uses in portable devices. + UNAME_PROCESSOR=x86_64 + fi + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = x86; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-?:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-*:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = 386; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'` + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo ${UNAME_MACHINE}-unknown-esx + exit ;; + amd64:Isilon\ OneFS:*:*) + echo x86_64-unknown-onefs + exit ;; +esac + +cat >&2 </dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/ext/libpqxx-7.7.3/config/config.sub b/ext/libpqxx-7.7.3/config/config.sub new file mode 100755 index 000000000..dd2ca93c6 --- /dev/null +++ b/ext/libpqxx-7.7.3/config/config.sub @@ -0,0 +1,1825 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright 1992-2016 Free Software Foundation, Inc. + +timestamp='2016-11-04' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that +# program. This Exception is an additional permission under section 7 +# of the GNU General Public License, version 3 ("GPLv3"). + + +# Please send patches to . +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright 1992-2016 Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \ + kopensolaris*-gnu* | cloudabi*-eabi* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + android-linux) + os=-linux-android + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze*) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*178) + os=-lynxos178 + ;; + -lynx*5) + os=-lynxos5 + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arceb \ + | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \ + | avr | avr32 \ + | ba \ + | be32 | be64 \ + | bfin \ + | c4x | c8051 | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | e2k | epiphany \ + | fido | fr30 | frv | ft32 \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | k1om \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | microblazeel | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa32r6 | mipsisa32r6el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64r6 | mipsisa64r6el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipsr5900 | mipsr5900el \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 | nios2eb | nios2el \ + | ns16k | ns32k \ + | open8 | or1k | or1knd | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pru \ + | pyramid \ + | riscv32 | riscv64 \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | visium \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + leon|leon[3-9]) + basic_machine=sparc-$basic_machine + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + xgate) + basic_machine=$basic_machine-unknown + os=-none + ;; + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | aarch64-* | aarch64_be-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | ba-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | c8051-* | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | e2k-* | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | k1om-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \ + | microblaze-* | microblazeel-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa32r6-* | mipsisa32r6el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64r6-* | mipsisa64r6el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipsr5900-* | mipsr5900el-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* | nios2eb-* | nios2el-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | or1k*-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pru-* \ + | pyramid-* \ + | riscv32-* | riscv64-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | visium-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + asmjs) + basic_machine=asmjs-unknown + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + e500v[12]) + basic_machine=powerpc-unknown + os=$os"spe" + ;; + e500v[12]-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + os=$os"spe" + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + leon-*|leon[3-9]-*) + basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'` + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze*) + basic_machine=microblaze-xilinx + ;; + mingw64) + basic_machine=x86_64-pc + os=-mingw64 + ;; + mingw32) + basic_machine=i686-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + moxiebox) + basic_machine=moxie-unknown + os=-moxiebox + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i686-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos | rdos64) + basic_machine=x86_64-pc + os=-rdos + ;; + rdos32) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* | -plan9* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* | -cloudabi* | -sortix* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-musl* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \ + | -onefs* | -tirtos* | -phoenix* | -fuchsia*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -ios) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + c8051-*) + os=-elf + ;; + hexagon-*) + os=-elf + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/ext/libpqxx-7.7.3/config/depcomp b/ext/libpqxx-7.7.3/config/depcomp new file mode 100755 index 000000000..715e34311 --- /dev/null +++ b/ext/libpqxx-7.7.3/config/depcomp @@ -0,0 +1,791 @@ +#! /bin/sh +# depcomp - compile a program generating dependencies as side-effects + +scriptversion=2018-03-07.03; # UTC + +# Copyright (C) 1999-2021 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Originally written by Alexandre Oliva . + +case $1 in + '') + echo "$0: No command. Try '$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: depcomp [--help] [--version] PROGRAM [ARGS] + +Run PROGRAMS ARGS to compile a file, generating dependencies +as side-effects. + +Environment variables: + depmode Dependency tracking mode. + source Source file read by 'PROGRAMS ARGS'. + object Object file output by 'PROGRAMS ARGS'. + DEPDIR directory where to store dependencies. + depfile Dependency file to output. + tmpdepfile Temporary file to use when outputting dependencies. + libtool Whether libtool is used (yes/no). + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "depcomp $scriptversion" + exit $? + ;; +esac + +# Get the directory component of the given path, and save it in the +# global variables '$dir'. Note that this directory component will +# be either empty or ending with a '/' character. This is deliberate. +set_dir_from () +{ + case $1 in + */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;; + *) dir=;; + esac +} + +# Get the suffix-stripped basename of the given path, and save it the +# global variable '$base'. +set_base_from () +{ + base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'` +} + +# If no dependency file was actually created by the compiler invocation, +# we still have to create a dummy depfile, to avoid errors with the +# Makefile "include basename.Plo" scheme. +make_dummy_depfile () +{ + echo "#dummy" > "$depfile" +} + +# Factor out some common post-processing of the generated depfile. +# Requires the auxiliary global variable '$tmpdepfile' to be set. +aix_post_process_depfile () +{ + # If the compiler actually managed to produce a dependency file, + # post-process it. + if test -f "$tmpdepfile"; then + # Each line is of the form 'foo.o: dependency.h'. + # Do two passes, one to just change these to + # $object: dependency.h + # and one to simply output + # dependency.h: + # which is needed to avoid the deleted-header problem. + { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile" + sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile" + } > "$depfile" + rm -f "$tmpdepfile" + else + make_dummy_depfile + fi +} + +# A tabulation character. +tab=' ' +# A newline character. +nl=' +' +# Character ranges might be problematic outside the C locale. +# These definitions help. +upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ +lower=abcdefghijklmnopqrstuvwxyz +digits=0123456789 +alpha=${upper}${lower} + +if test -z "$depmode" || test -z "$source" || test -z "$object"; then + echo "depcomp: Variables source, object and depmode must be set" 1>&2 + exit 1 +fi + +# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. +depfile=${depfile-`echo "$object" | + sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} +tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} + +rm -f "$tmpdepfile" + +# Avoid interferences from the environment. +gccflag= dashmflag= + +# Some modes work just like other modes, but use different flags. We +# parameterize here, but still list the modes in the big case below, +# to make depend.m4 easier to write. Note that we *cannot* use a case +# here, because this file can only contain one case statement. +if test "$depmode" = hp; then + # HP compiler uses -M and no extra arg. + gccflag=-M + depmode=gcc +fi + +if test "$depmode" = dashXmstdout; then + # This is just like dashmstdout with a different argument. + dashmflag=-xM + depmode=dashmstdout +fi + +cygpath_u="cygpath -u -f -" +if test "$depmode" = msvcmsys; then + # This is just like msvisualcpp but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvisualcpp +fi + +if test "$depmode" = msvc7msys; then + # This is just like msvc7 but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvc7 +fi + +if test "$depmode" = xlc; then + # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information. + gccflag=-qmakedep=gcc,-MF + depmode=gcc +fi + +case "$depmode" in +gcc3) +## gcc 3 implements dependency tracking that does exactly what +## we want. Yay! Note: for some reason libtool 1.4 doesn't like +## it if -MD -MP comes after the -MF stuff. Hmm. +## Unfortunately, FreeBSD c89 acceptance of flags depends upon +## the command line argument order; so add the flags where they +## appear in depend2.am. Note that the slowdown incurred here +## affects only configure: in makefiles, %FASTDEP% shortcuts this. + for arg + do + case $arg in + -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; + *) set fnord "$@" "$arg" ;; + esac + shift # fnord + shift # $arg + done + "$@" + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + mv "$tmpdepfile" "$depfile" + ;; + +gcc) +## Note that this doesn't just cater to obsosete pre-3.x GCC compilers. +## but also to in-use compilers like IMB xlc/xlC and the HP C compiler. +## (see the conditional assignment to $gccflag above). +## There are various ways to get dependency output from gcc. Here's +## why we pick this rather obscure method: +## - Don't want to use -MD because we'd like the dependencies to end +## up in a subdir. Having to rename by hand is ugly. +## (We might end up doing this anyway to support other compilers.) +## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like +## -MM, not -M (despite what the docs say). Also, it might not be +## supported by the other compilers which use the 'gcc' depmode. +## - Using -M directly means running the compiler twice (even worse +## than renaming). + if test -z "$gccflag"; then + gccflag=-MD, + fi + "$@" -Wp,"$gccflag$tmpdepfile" + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + # The second -e expression handles DOS-style file names with drive + # letters. + sed -e 's/^[^:]*: / /' \ + -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" +## This next piece of magic avoids the "deleted header file" problem. +## The problem is that when a header file which appears in a .P file +## is deleted, the dependency causes make to die (because there is +## typically no way to rebuild the header). We avoid this by adding +## dummy dependencies for each header file. Too bad gcc doesn't do +## this for us directly. +## Some versions of gcc put a space before the ':'. On the theory +## that the space means something, we add a space to the output as +## well. hp depmode also adds that space, but also prefixes the VPATH +## to the object. Take care to not repeat it in the output. +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +sgi) + if test "$libtool" = yes; then + "$@" "-Wp,-MDupdate,$tmpdepfile" + else + "$@" -MDupdate "$tmpdepfile" + fi + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + + if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files + echo "$object : \\" > "$depfile" + # Clip off the initial element (the dependent). Don't try to be + # clever and replace this with sed code, as IRIX sed won't handle + # lines with more than a fixed number of characters (4096 in + # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; + # the IRIX cc adds comments like '#:fec' to the end of the + # dependency line. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \ + | tr "$nl" ' ' >> "$depfile" + echo >> "$depfile" + # The second pass generates a dummy entry for each header file. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ + >> "$depfile" + else + make_dummy_depfile + fi + rm -f "$tmpdepfile" + ;; + +xlc) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +aix) + # The C for AIX Compiler uses -M and outputs the dependencies + # in a .u file. In older versions, this file always lives in the + # current directory. Also, the AIX compiler puts '$object:' at the + # start of each line; $object doesn't have directory information. + # Version 6 uses the directory in both cases. + set_dir_from "$object" + set_base_from "$object" + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.u + tmpdepfile2=$base.u + tmpdepfile3=$dir.libs/$base.u + "$@" -Wc,-M + else + tmpdepfile1=$dir$base.u + tmpdepfile2=$dir$base.u + tmpdepfile3=$dir$base.u + "$@" -M + fi + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + do + test -f "$tmpdepfile" && break + done + aix_post_process_depfile + ;; + +tcc) + # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26 + # FIXME: That version still under development at the moment of writing. + # Make that this statement remains true also for stable, released + # versions. + # It will wrap lines (doesn't matter whether long or short) with a + # trailing '\', as in: + # + # foo.o : \ + # foo.c \ + # foo.h \ + # + # It will put a trailing '\' even on the last line, and will use leading + # spaces rather than leading tabs (at least since its commit 0394caf7 + # "Emit spaces for -MD"). + "$@" -MD -MF "$tmpdepfile" + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'. + # We have to change lines of the first kind to '$object: \'. + sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile" + # And for each line of the second kind, we have to emit a 'dep.h:' + # dummy dependency, to avoid the deleted-header problem. + sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile" + rm -f "$tmpdepfile" + ;; + +## The order of this option in the case statement is important, since the +## shell code in configure will try each of these formats in the order +## listed in this file. A plain '-MD' option would be understood by many +## compilers, so we must ensure this comes after the gcc and icc options. +pgcc) + # Portland's C compiler understands '-MD'. + # Will always output deps to 'file.d' where file is the root name of the + # source file under compilation, even if file resides in a subdirectory. + # The object file name does not affect the name of the '.d' file. + # pgcc 10.2 will output + # foo.o: sub/foo.c sub/foo.h + # and will wrap long lines using '\' : + # foo.o: sub/foo.c ... \ + # sub/foo.h ... \ + # ... + set_dir_from "$object" + # Use the source, not the object, to determine the base name, since + # that's sadly what pgcc will do too. + set_base_from "$source" + tmpdepfile=$base.d + + # For projects that build the same source file twice into different object + # files, the pgcc approach of using the *source* file root name can cause + # problems in parallel builds. Use a locking strategy to avoid stomping on + # the same $tmpdepfile. + lockdir=$base.d-lock + trap " + echo '$0: caught signal, cleaning up...' >&2 + rmdir '$lockdir' + exit 1 + " 1 2 13 15 + numtries=100 + i=$numtries + while test $i -gt 0; do + # mkdir is a portable test-and-set. + if mkdir "$lockdir" 2>/dev/null; then + # This process acquired the lock. + "$@" -MD + stat=$? + # Release the lock. + rmdir "$lockdir" + break + else + # If the lock is being held by a different process, wait + # until the winning process is done or we timeout. + while test -d "$lockdir" && test $i -gt 0; do + sleep 1 + i=`expr $i - 1` + done + fi + i=`expr $i - 1` + done + trap - 1 2 13 15 + if test $i -le 0; then + echo "$0: failed to acquire lock after $numtries attempts" >&2 + echo "$0: check lockdir '$lockdir'" >&2 + exit 1 + fi + + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + # Each line is of the form `foo.o: dependent.h', + # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process this invocation + # correctly. Breaking it into two sed invocations is a workaround. + sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp2) + # The "hp" stanza above does not work with aCC (C++) and HP's ia64 + # compilers, which have integrated preprocessors. The correct option + # to use with these is +Maked; it writes dependencies to a file named + # 'foo.d', which lands next to the object file, wherever that + # happens to be. + # Much of this is similar to the tru64 case; see comments there. + set_dir_from "$object" + set_base_from "$object" + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir.libs/$base.d + "$@" -Wc,+Maked + else + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir$base.d + "$@" +Maked + fi + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile1" "$tmpdepfile2" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile" + # Add 'dependent.h:' lines. + sed -ne '2,${ + s/^ *// + s/ \\*$// + s/$/:/ + p + }' "$tmpdepfile" >> "$depfile" + else + make_dummy_depfile + fi + rm -f "$tmpdepfile" "$tmpdepfile2" + ;; + +tru64) + # The Tru64 compiler uses -MD to generate dependencies as a side + # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'. + # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put + # dependencies in 'foo.d' instead, so we check for that too. + # Subdirectories are respected. + set_dir_from "$object" + set_base_from "$object" + + if test "$libtool" = yes; then + # Libtool generates 2 separate objects for the 2 libraries. These + # two compilations output dependencies in $dir.libs/$base.o.d and + # in $dir$base.o.d. We have to check for both files, because + # one of the two compilations can be disabled. We should prefer + # $dir$base.o.d over $dir.libs/$base.o.d because the latter is + # automatically cleaned when .libs/ is deleted, while ignoring + # the former would cause a distcleancheck panic. + tmpdepfile1=$dir$base.o.d # libtool 1.5 + tmpdepfile2=$dir.libs/$base.o.d # Likewise. + tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504 + "$@" -Wc,-MD + else + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir$base.d + tmpdepfile3=$dir$base.d + "$@" -MD + fi + + stat=$? + if test $stat -ne 0; then + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + do + test -f "$tmpdepfile" && break + done + # Same post-processing that is required for AIX mode. + aix_post_process_depfile + ;; + +msvc7) + if test "$libtool" = yes; then + showIncludes=-Wc,-showIncludes + else + showIncludes=-showIncludes + fi + "$@" $showIncludes > "$tmpdepfile" + stat=$? + grep -v '^Note: including file: ' "$tmpdepfile" + if test $stat -ne 0; then + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + # The first sed program below extracts the file names and escapes + # backslashes for cygpath. The second sed program outputs the file + # name when reading, but also accumulates all include files in the + # hold buffer in order to output them again at the end. This only + # works with sed implementations that can handle large buffers. + sed < "$tmpdepfile" -n ' +/^Note: including file: *\(.*\)/ { + s//\1/ + s/\\/\\\\/g + p +}' | $cygpath_u | sort -u | sed -n ' +s/ /\\ /g +s/\(.*\)/'"$tab"'\1 \\/p +s/.\(.*\) \\/\1:/ +H +$ { + s/.*/'"$tab"'/ + G + p +}' >> "$depfile" + echo >> "$depfile" # make sure the fragment doesn't end with a backslash + rm -f "$tmpdepfile" + ;; + +msvc7msys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +#nosideeffect) + # This comment above is used by automake to tell side-effect + # dependency tracking mechanisms from slower ones. + +dashmstdout) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout, regardless of -o. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove '-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + test -z "$dashmflag" && dashmflag=-M + # Require at least two characters before searching for ':' + # in the target name. This is to cope with DOS-style filenames: + # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise. + "$@" $dashmflag | + sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile" + rm -f "$depfile" + cat < "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process this sed invocation + # correctly. Breaking it into two sed invocations is a workaround. + tr ' ' "$nl" < "$tmpdepfile" \ + | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +dashXmstdout) + # This case only exists to satisfy depend.m4. It is never actually + # run, as this mode is specially recognized in the preamble. + exit 1 + ;; + +makedepend) + "$@" || exit $? + # Remove any Libtool call + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + # X makedepend + shift + cleared=no eat=no + for arg + do + case $cleared in + no) + set ""; shift + cleared=yes ;; + esac + if test $eat = yes; then + eat=no + continue + fi + case "$arg" in + -D*|-I*) + set fnord "$@" "$arg"; shift ;; + # Strip any option that makedepend may not understand. Remove + # the object too, otherwise makedepend will parse it as a source file. + -arch) + eat=yes ;; + -*|$object) + ;; + *) + set fnord "$@" "$arg"; shift ;; + esac + done + obj_suffix=`echo "$object" | sed 's/^.*\././'` + touch "$tmpdepfile" + ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" + rm -f "$depfile" + # makedepend may prepend the VPATH from the source file name to the object. + # No need to regex-escape $object, excess matching of '.' is harmless. + sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process the last invocation + # correctly. Breaking it into two sed invocations is a workaround. + sed '1,2d' "$tmpdepfile" \ + | tr ' ' "$nl" \ + | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" "$tmpdepfile".bak + ;; + +cpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove '-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + "$@" -E \ + | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ + -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ + | sed '$ s: \\$::' > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + cat < "$tmpdepfile" >> "$depfile" + sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvisualcpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + IFS=" " + for arg + do + case "$arg" in + -o) + shift + ;; + $object) + shift + ;; + "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") + set fnord "$@" + shift + shift + ;; + *) + set fnord "$@" "$arg" + shift + shift + ;; + esac + done + "$@" -E 2>/dev/null | + sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile" + echo "$tab" >> "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvcmsys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +none) + exec "$@" + ;; + +*) + echo "Unknown depmode $depmode" 1>&2 + exit 1 + ;; +esac + +exit 0 + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/ext/libpqxx-7.7.3/config/install-sh b/ext/libpqxx-7.7.3/config/install-sh new file mode 100755 index 000000000..6781b987b --- /dev/null +++ b/ext/libpqxx-7.7.3/config/install-sh @@ -0,0 +1,520 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2009-04-28.21; # UTC + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. + +nl=' +' +IFS=" "" $nl" + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit=${DOITPROG-} +if test -z "$doit"; then + doit_exec=exec +else + doit_exec=$doit +fi + +# Put in absolute file names if you don't have them in your path; +# or use environment vars. + +chgrpprog=${CHGRPPROG-chgrp} +chmodprog=${CHMODPROG-chmod} +chownprog=${CHOWNPROG-chown} +cmpprog=${CMPPROG-cmp} +cpprog=${CPPROG-cp} +mkdirprog=${MKDIRPROG-mkdir} +mvprog=${MVPROG-mv} +rmprog=${RMPROG-rm} +stripprog=${STRIPPROG-strip} + +posix_glob='?' +initialize_posix_glob=' + test "$posix_glob" != "?" || { + if (set -f) 2>/dev/null; then + posix_glob= + else + posix_glob=: + fi + } +' + +posix_mkdir= + +# Desired mode of installed file. +mode=0755 + +chgrpcmd= +chmodcmd=$chmodprog +chowncmd= +mvcmd=$mvprog +rmcmd="$rmprog -f" +stripcmd= + +src= +dst= +dir_arg= +dst_arg= + +copy_on_change=false +no_target_directory= + +usage="\ +Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE + or: $0 [OPTION]... SRCFILES... DIRECTORY + or: $0 [OPTION]... -t DIRECTORY SRCFILES... + or: $0 [OPTION]... -d DIRECTORIES... + +In the 1st form, copy SRCFILE to DSTFILE. +In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. +In the 4th, create DIRECTORIES. + +Options: + --help display this help and exit. + --version display version info and exit. + + -c (ignored) + -C install only if different (preserve the last data modification time) + -d create directories instead of installing files. + -g GROUP $chgrpprog installed files to GROUP. + -m MODE $chmodprog installed files to MODE. + -o USER $chownprog installed files to USER. + -s $stripprog installed files. + -t DIRECTORY install into DIRECTORY. + -T report an error if DSTFILE is a directory. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG + RMPROG STRIPPROG +" + +while test $# -ne 0; do + case $1 in + -c) ;; + + -C) copy_on_change=true;; + + -d) dir_arg=true;; + + -g) chgrpcmd="$chgrpprog $2" + shift;; + + --help) echo "$usage"; exit $?;; + + -m) mode=$2 + case $mode in + *' '* | *' '* | *' +'* | *'*'* | *'?'* | *'['*) + echo "$0: invalid mode: $mode" >&2 + exit 1;; + esac + shift;; + + -o) chowncmd="$chownprog $2" + shift;; + + -s) stripcmd=$stripprog;; + + -t) dst_arg=$2 + shift;; + + -T) no_target_directory=true;; + + --version) echo "$0 $scriptversion"; exit $?;; + + --) shift + break;; + + -*) echo "$0: invalid option: $1" >&2 + exit 1;; + + *) break;; + esac + shift +done + +if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then + # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dst_arg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dst_arg" + shift # fnord + fi + shift # arg + dst_arg=$arg + done +fi + +if test $# -eq 0; then + if test -z "$dir_arg"; then + echo "$0: no input file specified." >&2 + exit 1 + fi + # It's OK to call `install-sh -d' without argument. + # This can happen when creating conditional directories. + exit 0 +fi + +if test -z "$dir_arg"; then + trap '(exit $?); exit' 1 2 13 15 + + # Set umask so as not to create temps with too-generous modes. + # However, 'strip' requires both read and write access to temps. + case $mode in + # Optimize common cases. + *644) cp_umask=133;; + *755) cp_umask=22;; + + *[0-7]) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw='% 200' + fi + cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; + *) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw=,u+rw + fi + cp_umask=$mode$u_plus_rw;; + esac +fi + +for src +do + # Protect names starting with `-'. + case $src in + -*) src=./$src;; + esac + + if test -n "$dir_arg"; then + dst=$src + dstdir=$dst + test -d "$dstdir" + dstdir_status=$? + else + + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dst_arg"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + + dst=$dst_arg + # Protect names starting with `-'. + case $dst in + -*) dst=./$dst;; + esac + + # If destination is a directory, append the input filename; won't work + # if double slashes aren't ignored. + if test -d "$dst"; then + if test -n "$no_target_directory"; then + echo "$0: $dst_arg: Is a directory" >&2 + exit 1 + fi + dstdir=$dst + dst=$dstdir/`basename "$src"` + dstdir_status=0 + else + # Prefer dirname, but fall back on a substitute if dirname fails. + dstdir=` + (dirname "$dst") 2>/dev/null || + expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$dst" : 'X\(//\)[^/]' \| \ + X"$dst" : 'X\(//\)$' \| \ + X"$dst" : 'X\(/\)' \| . 2>/dev/null || + echo X"$dst" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q' + ` + + test -d "$dstdir" + dstdir_status=$? + fi + fi + + obsolete_mkdir_used=false + + if test $dstdir_status != 0; then + case $posix_mkdir in + '') + # Create intermediate dirs using mode 755 as modified by the umask. + # This is like FreeBSD 'install' as of 1997-10-28. + umask=`umask` + case $stripcmd.$umask in + # Optimize common cases. + *[2367][2367]) mkdir_umask=$umask;; + .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; + + *[0-7]) + mkdir_umask=`expr $umask + 22 \ + - $umask % 100 % 40 + $umask % 20 \ + - $umask % 10 % 4 + $umask % 2 + `;; + *) mkdir_umask=$umask,go-w;; + esac + + # With -d, create the new directory with the user-specified mode. + # Otherwise, rely on $mkdir_umask. + if test -n "$dir_arg"; then + mkdir_mode=-m$mode + else + mkdir_mode= + fi + + posix_mkdir=false + case $umask in + *[123567][0-7][0-7]) + # POSIX mkdir -p sets u+wx bits regardless of umask, which + # is incompatible with FreeBSD 'install' when (umask & 300) != 0. + ;; + *) + tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ + trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 + + if (umask $mkdir_umask && + exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 + then + if test -z "$dir_arg" || { + # Check for POSIX incompatibilities with -m. + # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or + # other-writeable bit of parent directory when it shouldn't. + # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. + ls_ld_tmpdir=`ls -ld "$tmpdir"` + case $ls_ld_tmpdir in + d????-?r-*) different_mode=700;; + d????-?--*) different_mode=755;; + *) false;; + esac && + $mkdirprog -m$different_mode -p -- "$tmpdir" && { + ls_ld_tmpdir_1=`ls -ld "$tmpdir"` + test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" + } + } + then posix_mkdir=: + fi + rmdir "$tmpdir/d" "$tmpdir" + else + # Remove any dirs left behind by ancient mkdir implementations. + rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null + fi + trap '' 0;; + esac;; + esac + + if + $posix_mkdir && ( + umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" + ) + then : + else + + # The umask is ridiculous, or mkdir does not conform to POSIX, + # or it failed possibly due to a race condition. Create the + # directory the slow way, step by step, checking for races as we go. + + case $dstdir in + /*) prefix='/';; + -*) prefix='./';; + *) prefix='';; + esac + + eval "$initialize_posix_glob" + + oIFS=$IFS + IFS=/ + $posix_glob set -f + set fnord $dstdir + shift + $posix_glob set +f + IFS=$oIFS + + prefixes= + + for d + do + test -z "$d" && continue + + prefix=$prefix$d + if test -d "$prefix"; then + prefixes= + else + if $posix_mkdir; then + (umask=$mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break + # Don't fail if two instances are running concurrently. + test -d "$prefix" || exit 1 + else + case $prefix in + *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; + *) qprefix=$prefix;; + esac + prefixes="$prefixes '$qprefix'" + fi + fi + prefix=$prefix/ + done + + if test -n "$prefixes"; then + # Don't fail if two instances are running concurrently. + (umask $mkdir_umask && + eval "\$doit_exec \$mkdirprog $prefixes") || + test -d "$dstdir" || exit 1 + obsolete_mkdir_used=true + fi + fi + fi + + if test -n "$dir_arg"; then + { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && + { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || + test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 + else + + # Make a couple of temp file names in the proper directory. + dsttmp=$dstdir/_inst.$$_ + rmtmp=$dstdir/_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 + + # Copy the file name to the temp name. + (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $cpprog $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && + { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && + { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && + + # If -C, don't bother to copy if it wouldn't change the file. + if $copy_on_change && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + + eval "$initialize_posix_glob" && + $posix_glob set -f && + set X $old && old=:$2:$4:$5:$6 && + set X $new && new=:$2:$4:$5:$6 && + $posix_glob set +f && + + test "$old" = "$new" && + $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 + then + rm -f "$dsttmp" + else + # Rename the file to the real destination. + $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || + + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + { + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd -f "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" + } + fi || exit 1 + + trap '' 0 + fi +done + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/ext/libpqxx-7.7.3/config/ltmain.sh b/ext/libpqxx-7.7.3/config/ltmain.sh new file mode 100755 index 000000000..21e5e0784 --- /dev/null +++ b/ext/libpqxx-7.7.3/config/ltmain.sh @@ -0,0 +1,11251 @@ +#! /bin/sh +## DO NOT EDIT - This file generated from ./build-aux/ltmain.in +## by inline-source v2014-01-03.01 + +# libtool (GNU libtool) 2.4.6 +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +PROGRAM=libtool +PACKAGE=libtool +VERSION="2.4.6 Debian-2.4.6-15" +package_revision=2.4.6 + + +## ------ ## +## Usage. ## +## ------ ## + +# Run './libtool --help' for help with using this script from the +# command line. + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# After configure completes, it has a better idea of some of the +# shell tools we need than the defaults used by the functions shared +# with bootstrap, so set those here where they can still be over- +# ridden by the user, but otherwise take precedence. + +: ${AUTOCONF="autoconf"} +: ${AUTOMAKE="automake"} + + +## -------------------------- ## +## Source external libraries. ## +## -------------------------- ## + +# Much of our low-level functionality needs to be sourced from external +# libraries, which are installed to $pkgauxdir. + +# Set a version string for this script. +scriptversion=2015-01-20.17; # UTC + +# General shell script boiler plate, and helper functions. +# Written by Gary V. Vaughan, 2004 + +# Copyright (C) 2004-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. + +# As a special exception to the GNU General Public License, if you distribute +# this file as part of a program or library that is built using GNU Libtool, +# you may include this file under the same distribution terms that you use +# for the rest of that program. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNES FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Please report bugs or propose patches to gary@gnu.org. + + +## ------ ## +## Usage. ## +## ------ ## + +# Evaluate this file near the top of your script to gain access to +# the functions and variables defined here: +# +# . `echo "$0" | ${SED-sed} 's|[^/]*$||'`/build-aux/funclib.sh +# +# If you need to override any of the default environment variable +# settings, do that before evaluating this file. + + +## -------------------- ## +## Shell normalisation. ## +## -------------------- ## + +# Some shells need a little help to be as Bourne compatible as possible. +# Before doing anything else, make sure all that help has been provided! + +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac +fi + +# NLS nuisances: We save the old values in case they are required later. +_G_user_locale= +_G_safe_locale= +for _G_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +do + eval "if test set = \"\${$_G_var+set}\"; then + save_$_G_var=\$$_G_var + $_G_var=C + export $_G_var + _G_user_locale=\"$_G_var=\\\$save_\$_G_var; \$_G_user_locale\" + _G_safe_locale=\"$_G_var=C; \$_G_safe_locale\" + fi" +done + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Make sure IFS has a sensible default +sp=' ' +nl=' +' +IFS="$sp $nl" + +# There are apparently some retarded systems that use ';' as a PATH separator! +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + + +## ------------------------- ## +## Locate command utilities. ## +## ------------------------- ## + + +# func_executable_p FILE +# ---------------------- +# Check that FILE is an executable regular file. +func_executable_p () +{ + test -f "$1" && test -x "$1" +} + + +# func_path_progs PROGS_LIST CHECK_FUNC [PATH] +# -------------------------------------------- +# Search for either a program that responds to --version with output +# containing "GNU", or else returned by CHECK_FUNC otherwise, by +# trying all the directories in PATH with each of the elements of +# PROGS_LIST. +# +# CHECK_FUNC should accept the path to a candidate program, and +# set $func_check_prog_result if it truncates its output less than +# $_G_path_prog_max characters. +func_path_progs () +{ + _G_progs_list=$1 + _G_check_func=$2 + _G_PATH=${3-"$PATH"} + + _G_path_prog_max=0 + _G_path_prog_found=false + _G_save_IFS=$IFS; IFS=${PATH_SEPARATOR-:} + for _G_dir in $_G_PATH; do + IFS=$_G_save_IFS + test -z "$_G_dir" && _G_dir=. + for _G_prog_name in $_G_progs_list; do + for _exeext in '' .EXE; do + _G_path_prog=$_G_dir/$_G_prog_name$_exeext + func_executable_p "$_G_path_prog" || continue + case `"$_G_path_prog" --version 2>&1` in + *GNU*) func_path_progs_result=$_G_path_prog _G_path_prog_found=: ;; + *) $_G_check_func $_G_path_prog + func_path_progs_result=$func_check_prog_result + ;; + esac + $_G_path_prog_found && break 3 + done + done + done + IFS=$_G_save_IFS + test -z "$func_path_progs_result" && { + echo "no acceptable sed could be found in \$PATH" >&2 + exit 1 + } +} + + +# We want to be able to use the functions in this file before configure +# has figured out where the best binaries are kept, which means we have +# to search for them ourselves - except when the results are already set +# where we skip the searches. + +# Unless the user overrides by setting SED, search the path for either GNU +# sed, or the sed that truncates its output the least. +test -z "$SED" && { + _G_sed_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for _G_i in 1 2 3 4 5 6 7; do + _G_sed_script=$_G_sed_script$nl$_G_sed_script + done + echo "$_G_sed_script" 2>/dev/null | sed 99q >conftest.sed + _G_sed_script= + + func_check_prog_sed () + { + _G_path_prog=$1 + + _G_count=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo '' >> conftest.nl + "$_G_path_prog" -f conftest.sed conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "sed gsed" func_check_prog_sed $PATH:/usr/xpg4/bin + rm -f conftest.sed + SED=$func_path_progs_result +} + + +# Unless the user overrides by setting GREP, search the path for either GNU +# grep, or the grep that truncates its output the least. +test -z "$GREP" && { + func_check_prog_grep () + { + _G_path_prog=$1 + + _G_count=0 + _G_path_prog_max=0 + printf 0123456789 >conftest.in + while : + do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo 'GREP' >> conftest.nl + "$_G_path_prog" -e 'GREP$' -e '-(cannot match)-' conftest.out 2>/dev/null || break + diff conftest.out conftest.nl >/dev/null 2>&1 || break + _G_count=`expr $_G_count + 1` + if test "$_G_count" -gt "$_G_path_prog_max"; then + # Best one so far, save it but keep looking for a better one + func_check_prog_result=$_G_path_prog + _G_path_prog_max=$_G_count + fi + # 10*(2^10) chars as input seems more than enough + test 10 -lt "$_G_count" && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out + } + + func_path_progs "grep ggrep" func_check_prog_grep $PATH:/usr/xpg4/bin + GREP=$func_path_progs_result +} + + +## ------------------------------- ## +## User overridable command paths. ## +## ------------------------------- ## + +# All uppercase variable names are used for environment variables. These +# variables can be overridden by the user before calling a script that +# uses them if a suitable command of that name is not already available +# in the command search PATH. + +: ${CP="cp -f"} +: ${ECHO="printf %s\n"} +: ${EGREP="$GREP -E"} +: ${FGREP="$GREP -F"} +: ${LN_S="ln -s"} +: ${MAKE="make"} +: ${MKDIR="mkdir"} +: ${MV="mv -f"} +: ${RM="rm -f"} +: ${SHELL="${CONFIG_SHELL-/bin/sh}"} + + +## -------------------- ## +## Useful sed snippets. ## +## -------------------- ## + +sed_dirname='s|/[^/]*$||' +sed_basename='s|^.*/||' + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='s|\([`"$\\]\)|\\\1|g' + +# Same as above, but do not quote variable references. +sed_double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution that turns a string into a regex matching for the +# string literally. +sed_make_literal_regex='s|[].[^$\\*\/]|\\&|g' + +# Sed substitution that converts a w32 file name or path +# that contains forward slashes, into one that contains +# (escaped) backslashes. A very naive implementation. +sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + +# Re-'\' parameter expansions in output of sed_double_quote_subst that +# were '\'-ed in input to the same. If an odd number of '\' preceded a +# '$' in input to sed_double_quote_subst, that '$' was protected from +# expansion. Since each input '\' is now two '\'s, look for any number +# of runs of four '\'s followed by two '\'s and then a '$'. '\' that '$'. +_G_bs='\\' +_G_bs2='\\\\' +_G_bs4='\\\\\\\\' +_G_dollar='\$' +sed_double_backslash="\ + s/$_G_bs4/&\\ +/g + s/^$_G_bs2$_G_dollar/$_G_bs&/ + s/\\([^$_G_bs]\\)$_G_bs2$_G_dollar/\\1$_G_bs2$_G_bs$_G_dollar/g + s/\n//g" + + +## ----------------- ## +## Global variables. ## +## ----------------- ## + +# Except for the global variables explicitly listed below, the following +# functions in the '^func_' namespace, and the '^require_' namespace +# variables initialised in the 'Resource management' section, sourcing +# this file will not pollute your global namespace with anything +# else. There's no portable way to scope variables in Bourne shell +# though, so actually running these functions will sometimes place +# results into a variable named after the function, and often use +# temporary variables in the '^_G_' namespace. If you are careful to +# avoid using those namespaces casually in your sourcing script, things +# should continue to work as you expect. And, of course, you can freely +# overwrite any of the functions or variables defined here before +# calling anything to customize them. + +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. +EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. + +# Allow overriding, eg assuming that you follow the convention of +# putting '$debug_cmd' at the start of all your functions, you can get +# bash to show function call trace with: +# +# debug_cmd='echo "${FUNCNAME[0]} $*" >&2' bash your-script-name +debug_cmd=${debug_cmd-":"} +exit_cmd=: + +# By convention, finish your script with: +# +# exit $exit_status +# +# so that you can set exit_status to non-zero if you want to indicate +# something went wrong during execution without actually bailing out at +# the point of failure. +exit_status=$EXIT_SUCCESS + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath=$0 + +# The name of this program. +progname=`$ECHO "$progpath" |$SED "$sed_basename"` + +# Make sure we have an absolute progpath for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=`$ECHO "$progpath" |$SED "$sed_dirname"` + progdir=`cd "$progdir" && pwd` + progpath=$progdir/$progname + ;; + *) + _G_IFS=$IFS + IFS=${PATH_SEPARATOR-:} + for progdir in $PATH; do + IFS=$_G_IFS + test -x "$progdir/$progname" && break + done + IFS=$_G_IFS + test -n "$progdir" || progdir=`pwd` + progpath=$progdir/$progname + ;; +esac + + +## ----------------- ## +## Standard options. ## +## ----------------- ## + +# The following options affect the operation of the functions defined +# below, and should be set appropriately depending on run-time para- +# meters passed on the command line. + +opt_dry_run=false +opt_quiet=false +opt_verbose=false + +# Categories 'all' and 'none' are always available. Append any others +# you will pass as the first argument to func_warning from your own +# code. +warning_categories= + +# By default, display warnings according to 'opt_warning_types'. Set +# 'warning_func' to ':' to elide all warnings, or func_fatal_error to +# treat the next displayed warning as a fatal error. +warning_func=func_warn_and_continue + +# Set to 'all' to display all warnings, 'none' to suppress all +# warnings, or a space delimited list of some subset of +# 'warning_categories' to display only the listed warnings. +opt_warning_types=all + + +## -------------------- ## +## Resource management. ## +## -------------------- ## + +# This section contains definitions for functions that each ensure a +# particular resource (a file, or a non-empty configuration variable for +# example) is available, and if appropriate to extract default values +# from pertinent package files. Call them using their associated +# 'require_*' variable to ensure that they are executed, at most, once. +# +# It's entirely deliberate that calling these functions can set +# variables that don't obey the namespace limitations obeyed by the rest +# of this file, in order that that they be as useful as possible to +# callers. + + +# require_term_colors +# ------------------- +# Allow display of bold text on terminals that support it. +require_term_colors=func_require_term_colors +func_require_term_colors () +{ + $debug_cmd + + test -t 1 && { + # COLORTERM and USE_ANSI_COLORS environment variables take + # precedence, because most terminfo databases neglect to describe + # whether color sequences are supported. + test -n "${COLORTERM+set}" && : ${USE_ANSI_COLORS="1"} + + if test 1 = "$USE_ANSI_COLORS"; then + # Standard ANSI escape sequences + tc_reset='' + tc_bold=''; tc_standout='' + tc_red=''; tc_green='' + tc_blue=''; tc_cyan='' + else + # Otherwise trust the terminfo database after all. + test -n "`tput sgr0 2>/dev/null`" && { + tc_reset=`tput sgr0` + test -n "`tput bold 2>/dev/null`" && tc_bold=`tput bold` + tc_standout=$tc_bold + test -n "`tput smso 2>/dev/null`" && tc_standout=`tput smso` + test -n "`tput setaf 1 2>/dev/null`" && tc_red=`tput setaf 1` + test -n "`tput setaf 2 2>/dev/null`" && tc_green=`tput setaf 2` + test -n "`tput setaf 4 2>/dev/null`" && tc_blue=`tput setaf 4` + test -n "`tput setaf 5 2>/dev/null`" && tc_cyan=`tput setaf 5` + } + fi + } + + require_term_colors=: +} + + +## ----------------- ## +## Function library. ## +## ----------------- ## + +# This section contains a variety of useful functions to call in your +# scripts. Take note of the portable wrappers for features provided by +# some modern shells, which will fall back to slower equivalents on +# less featureful shells. + + +# func_append VAR VALUE +# --------------------- +# Append VALUE onto the existing contents of VAR. + + # We should try to minimise forks, especially on Windows where they are + # unreasonably slow, so skip the feature probes when bash or zsh are + # being used: + if test set = "${BASH_VERSION+set}${ZSH_VERSION+set}"; then + : ${_G_HAVE_ARITH_OP="yes"} + : ${_G_HAVE_XSI_OPS="yes"} + # The += operator was introduced in bash 3.1 + case $BASH_VERSION in + [12].* | 3.0 | 3.0*) ;; + *) + : ${_G_HAVE_PLUSEQ_OP="yes"} + ;; + esac + fi + + # _G_HAVE_PLUSEQ_OP + # Can be empty, in which case the shell is probed, "yes" if += is + # useable or anything else if it does not work. + test -z "$_G_HAVE_PLUSEQ_OP" \ + && (eval 'x=a; x+=" b"; test "a b" = "$x"') 2>/dev/null \ + && _G_HAVE_PLUSEQ_OP=yes + +if test yes = "$_G_HAVE_PLUSEQ_OP" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_append () + { + $debug_cmd + + eval "$1+=\$2" + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_append () + { + $debug_cmd + + eval "$1=\$$1\$2" + } +fi + + +# func_append_quoted VAR VALUE +# ---------------------------- +# Quote VALUE and append to the end of shell variable VAR, separated +# by a space. +if test yes = "$_G_HAVE_PLUSEQ_OP"; then + eval 'func_append_quoted () + { + $debug_cmd + + func_quote_for_eval "$2" + eval "$1+=\\ \$func_quote_for_eval_result" + }' +else + func_append_quoted () + { + $debug_cmd + + func_quote_for_eval "$2" + eval "$1=\$$1\\ \$func_quote_for_eval_result" + } +fi + + +# func_append_uniq VAR VALUE +# -------------------------- +# Append unique VALUE onto the existing contents of VAR, assuming +# entries are delimited by the first character of VALUE. For example: +# +# func_append_uniq options " --another-option option-argument" +# +# will only append to $options if " --another-option option-argument " +# is not already present somewhere in $options already (note spaces at +# each end implied by leading space in second argument). +func_append_uniq () +{ + $debug_cmd + + eval _G_current_value='`$ECHO $'$1'`' + _G_delim=`expr "$2" : '\(.\)'` + + case $_G_delim$_G_current_value$_G_delim in + *"$2$_G_delim"*) ;; + *) func_append "$@" ;; + esac +} + + +# func_arith TERM... +# ------------------ +# Set func_arith_result to the result of evaluating TERMs. + test -z "$_G_HAVE_ARITH_OP" \ + && (eval 'test 2 = $(( 1 + 1 ))') 2>/dev/null \ + && _G_HAVE_ARITH_OP=yes + +if test yes = "$_G_HAVE_ARITH_OP"; then + eval 'func_arith () + { + $debug_cmd + + func_arith_result=$(( $* )) + }' +else + func_arith () + { + $debug_cmd + + func_arith_result=`expr "$@"` + } +fi + + +# func_basename FILE +# ------------------ +# Set func_basename_result to FILE with everything up to and including +# the last / stripped. +if test yes = "$_G_HAVE_XSI_OPS"; then + # If this shell supports suffix pattern removal, then use it to avoid + # forking. Hide the definitions single quotes in case the shell chokes + # on unsupported syntax... + _b='func_basename_result=${1##*/}' + _d='case $1 in + */*) func_dirname_result=${1%/*}$2 ;; + * ) func_dirname_result=$3 ;; + esac' + +else + # ...otherwise fall back to using sed. + _b='func_basename_result=`$ECHO "$1" |$SED "$sed_basename"`' + _d='func_dirname_result=`$ECHO "$1" |$SED "$sed_dirname"` + if test "X$func_dirname_result" = "X$1"; then + func_dirname_result=$3 + else + func_append func_dirname_result "$2" + fi' +fi + +eval 'func_basename () +{ + $debug_cmd + + '"$_b"' +}' + + +# func_dirname FILE APPEND NONDIR_REPLACEMENT +# ------------------------------------------- +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +eval 'func_dirname () +{ + $debug_cmd + + '"$_d"' +}' + + +# func_dirname_and_basename FILE APPEND NONDIR_REPLACEMENT +# -------------------------------------------------------- +# Perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# For efficiency, we do not delegate to the functions above but instead +# duplicate the functionality here. +eval 'func_dirname_and_basename () +{ + $debug_cmd + + '"$_b"' + '"$_d"' +}' + + +# func_echo ARG... +# ---------------- +# Echo program name prefixed message. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_echo_all ARG... +# -------------------- +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + + +# func_echo_infix_1 INFIX ARG... +# ------------------------------ +# Echo program name, followed by INFIX on the first line, with any +# additional lines not showing INFIX. +func_echo_infix_1 () +{ + $debug_cmd + + $require_term_colors + + _G_infix=$1; shift + _G_indent=$_G_infix + _G_prefix="$progname: $_G_infix: " + _G_message=$* + + # Strip color escape sequences before counting printable length + for _G_tc in "$tc_reset" "$tc_bold" "$tc_standout" "$tc_red" "$tc_green" "$tc_blue" "$tc_cyan" + do + test -n "$_G_tc" && { + _G_esc_tc=`$ECHO "$_G_tc" | $SED "$sed_make_literal_regex"` + _G_indent=`$ECHO "$_G_indent" | $SED "s|$_G_esc_tc||g"` + } + done + _G_indent="$progname: "`echo "$_G_indent" | $SED 's|.| |g'`" " ## exclude from sc_prohibit_nested_quotes + + func_echo_infix_1_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_infix_1_IFS + $ECHO "$_G_prefix$tc_bold$_G_line$tc_reset" >&2 + _G_prefix=$_G_indent + done + IFS=$func_echo_infix_1_IFS +} + + +# func_error ARG... +# ----------------- +# Echo program name prefixed message to standard error. +func_error () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 " $tc_standout${tc_red}error$tc_reset" "$*" >&2 +} + + +# func_fatal_error ARG... +# ----------------------- +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + $debug_cmd + + func_error "$*" + exit $EXIT_FAILURE +} + + +# func_grep EXPRESSION FILENAME +# ----------------------------- +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $debug_cmd + + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_len STRING +# --------------- +# Set func_len_result to the length of STRING. STRING may not +# start with a hyphen. + test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_len () + { + $debug_cmd + + func_len_result=${#1} + }' +else + func_len () + { + $debug_cmd + + func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` + } +fi + + +# func_mkdir_p DIRECTORY-PATH +# --------------------------- +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + $debug_cmd + + _G_directory_path=$1 + _G_dir_list= + + if test -n "$_G_directory_path" && test : != "$opt_dry_run"; then + + # Protect directory names starting with '-' + case $_G_directory_path in + -*) _G_directory_path=./$_G_directory_path ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$_G_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + _G_dir_list=$_G_directory_path:$_G_dir_list + + # If the last portion added has no slash in it, the list is done + case $_G_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + _G_directory_path=`$ECHO "$_G_directory_path" | $SED -e "$sed_dirname"` + done + _G_dir_list=`$ECHO "$_G_dir_list" | $SED 's|:*$||'` + + func_mkdir_p_IFS=$IFS; IFS=: + for _G_dir in $_G_dir_list; do + IFS=$func_mkdir_p_IFS + # mkdir can fail with a 'File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$_G_dir" 2>/dev/null || : + done + IFS=$func_mkdir_p_IFS + + # Bail out if we (or some other process) failed to create a directory. + test -d "$_G_directory_path" || \ + func_fatal_error "Failed to create '$1'" + fi +} + + +# func_mktempdir [BASENAME] +# ------------------------- +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, BASENAME is the basename for that directory. +func_mktempdir () +{ + $debug_cmd + + _G_template=${TMPDIR-/tmp}/${1-$progname} + + if test : = "$opt_dry_run"; then + # Return a directory name, but don't create it in dry-run mode + _G_tmpdir=$_G_template-$$ + else + + # If mktemp works, use that first and foremost + _G_tmpdir=`mktemp -d "$_G_template-XXXXXXXX" 2>/dev/null` + + if test ! -d "$_G_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + _G_tmpdir=$_G_template-${RANDOM-0}$$ + + func_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$_G_tmpdir" + umask $func_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$_G_tmpdir" || \ + func_fatal_error "cannot create temporary directory '$_G_tmpdir'" + fi + + $ECHO "$_G_tmpdir" +} + + +# func_normal_abspath PATH +# ------------------------ +# Remove doubled-up and trailing slashes, "." path components, +# and cancel out any ".." path components in PATH after making +# it an absolute path. +func_normal_abspath () +{ + $debug_cmd + + # These SED scripts presuppose an absolute path with a trailing slash. + _G_pathcar='s|^/\([^/]*\).*$|\1|' + _G_pathcdr='s|^/[^/]*||' + _G_removedotparts=':dotsl + s|/\./|/|g + t dotsl + s|/\.$|/|' + _G_collapseslashes='s|/\{1,\}|/|g' + _G_finalslash='s|/*$|/|' + + # Start from root dir and reassemble the path. + func_normal_abspath_result= + func_normal_abspath_tpath=$1 + func_normal_abspath_altnamespace= + case $func_normal_abspath_tpath in + "") + # Empty path, that just means $cwd. + func_stripname '' '/' "`pwd`" + func_normal_abspath_result=$func_stripname_result + return + ;; + # The next three entries are used to spot a run of precisely + # two leading slashes without using negated character classes; + # we take advantage of case's first-match behaviour. + ///*) + # Unusual form of absolute path, do nothing. + ;; + //*) + # Not necessarily an ordinary path; POSIX reserves leading '//' + # and for example Cygwin uses it to access remote file shares + # over CIFS/SMB, so we conserve a leading double slash if found. + func_normal_abspath_altnamespace=/ + ;; + /*) + # Absolute path, do nothing. + ;; + *) + # Relative path, prepend $cwd. + func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath + ;; + esac + + # Cancel out all the simple stuff to save iterations. We also want + # the path to end with a slash for ease of parsing, so make sure + # there is one (and only one) here. + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_removedotparts" -e "$_G_collapseslashes" -e "$_G_finalslash"` + while :; do + # Processed it all yet? + if test / = "$func_normal_abspath_tpath"; then + # If we ascended to the root using ".." the result may be empty now. + if test -z "$func_normal_abspath_result"; then + func_normal_abspath_result=/ + fi + break + fi + func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcar"` + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$_G_pathcdr"` + # Figure out what to do with it + case $func_normal_abspath_tcomponent in + "") + # Trailing empty path component, ignore it. + ;; + ..) + # Parent dir; strip last assembled component from result. + func_dirname "$func_normal_abspath_result" + func_normal_abspath_result=$func_dirname_result + ;; + *) + # Actual path component, append it. + func_append func_normal_abspath_result "/$func_normal_abspath_tcomponent" + ;; + esac + done + # Restore leading double-slash if one was found on entry. + func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result +} + + +# func_notquiet ARG... +# -------------------- +# Echo program name prefixed message only when not in quiet mode. +func_notquiet () +{ + $debug_cmd + + $opt_quiet || func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + + +# func_relative_path SRCDIR DSTDIR +# -------------------------------- +# Set func_relative_path_result to the relative path from SRCDIR to DSTDIR. +func_relative_path () +{ + $debug_cmd + + func_relative_path_result= + func_normal_abspath "$1" + func_relative_path_tlibdir=$func_normal_abspath_result + func_normal_abspath "$2" + func_relative_path_tbindir=$func_normal_abspath_result + + # Ascend the tree starting from libdir + while :; do + # check if we have found a prefix of bindir + case $func_relative_path_tbindir in + $func_relative_path_tlibdir) + # found an exact match + func_relative_path_tcancelled= + break + ;; + $func_relative_path_tlibdir*) + # found a matching prefix + func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" + func_relative_path_tcancelled=$func_stripname_result + if test -z "$func_relative_path_result"; then + func_relative_path_result=. + fi + break + ;; + *) + func_dirname $func_relative_path_tlibdir + func_relative_path_tlibdir=$func_dirname_result + if test -z "$func_relative_path_tlibdir"; then + # Have to descend all the way to the root! + func_relative_path_result=../$func_relative_path_result + func_relative_path_tcancelled=$func_relative_path_tbindir + break + fi + func_relative_path_result=../$func_relative_path_result + ;; + esac + done + + # Now calculate path; take care to avoid doubling-up slashes. + func_stripname '' '/' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + func_stripname '/' '/' "$func_relative_path_tcancelled" + if test -n "$func_stripname_result"; then + func_append func_relative_path_result "/$func_stripname_result" + fi + + # Normalisation. If bindir is libdir, return '.' else relative path. + if test -n "$func_relative_path_result"; then + func_stripname './' '' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + fi + + test -n "$func_relative_path_result" || func_relative_path_result=. + + : +} + + +# func_quote_for_eval ARG... +# -------------------------- +# Aesthetically quote ARGs to be evaled later. +# This function returns two values: +# i) func_quote_for_eval_result +# double-quoted, suitable for a subsequent eval +# ii) func_quote_for_eval_unquoted_result +# has all characters that are still active within double +# quotes backslashified. +func_quote_for_eval () +{ + $debug_cmd + + func_quote_for_eval_unquoted_result= + func_quote_for_eval_result= + while test 0 -lt $#; do + case $1 in + *[\\\`\"\$]*) + _G_unquoted_arg=`printf '%s\n' "$1" |$SED "$sed_quote_subst"` ;; + *) + _G_unquoted_arg=$1 ;; + esac + if test -n "$func_quote_for_eval_unquoted_result"; then + func_append func_quote_for_eval_unquoted_result " $_G_unquoted_arg" + else + func_append func_quote_for_eval_unquoted_result "$_G_unquoted_arg" + fi + + case $_G_unquoted_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting, command substitution and variable expansion + # for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + _G_quoted_arg=\"$_G_unquoted_arg\" + ;; + *) + _G_quoted_arg=$_G_unquoted_arg + ;; + esac + + if test -n "$func_quote_for_eval_result"; then + func_append func_quote_for_eval_result " $_G_quoted_arg" + else + func_append func_quote_for_eval_result "$_G_quoted_arg" + fi + shift + done +} + + +# func_quote_for_expand ARG +# ------------------------- +# Aesthetically quote ARG to be evaled later; same as above, +# but do not quote variable references. +func_quote_for_expand () +{ + $debug_cmd + + case $1 in + *[\\\`\"]*) + _G_arg=`$ECHO "$1" | $SED \ + -e "$sed_double_quote_subst" -e "$sed_double_backslash"` ;; + *) + _G_arg=$1 ;; + esac + + case $_G_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting and command substitution for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + _G_arg=\"$_G_arg\" + ;; + esac + + func_quote_for_expand_result=$_G_arg +} + + +# func_stripname PREFIX SUFFIX NAME +# --------------------------------- +# strip PREFIX and SUFFIX from NAME, and store in func_stripname_result. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_stripname () + { + $debug_cmd + + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary variable first. + func_stripname_result=$3 + func_stripname_result=${func_stripname_result#"$1"} + func_stripname_result=${func_stripname_result%"$2"} + }' +else + func_stripname () + { + $debug_cmd + + case $2 in + .*) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%\\\\$2\$%%"`;; + *) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%$2\$%%"`;; + esac + } +fi + + +# func_show_eval CMD [FAIL_EXP] +# ----------------------------- +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + func_quote_for_expand "$_G_cmd" + eval "func_notquiet $func_quote_for_expand_result" + + $opt_dry_run || { + eval "$_G_cmd" + _G_status=$? + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_show_eval_locale CMD [FAIL_EXP] +# ------------------------------------ +# Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () +{ + $debug_cmd + + _G_cmd=$1 + _G_fail_exp=${2-':'} + + $opt_quiet || { + func_quote_for_expand "$_G_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + $opt_dry_run || { + eval "$_G_user_locale + $_G_cmd" + _G_status=$? + eval "$_G_safe_locale" + if test 0 -ne "$_G_status"; then + eval "(exit $_G_status); $_G_fail_exp" + fi + } +} + + +# func_tr_sh +# ---------- +# Turn $1 into a string suitable for a shell variable name. +# Result is stored in $func_tr_sh_result. All characters +# not in the set a-zA-Z0-9_ are replaced with '_'. Further, +# if $1 begins with a digit, a '_' is prepended as well. +func_tr_sh () +{ + $debug_cmd + + case $1 in + [0-9]* | *[!a-zA-Z0-9_]*) + func_tr_sh_result=`$ECHO "$1" | $SED -e 's/^\([0-9]\)/_\1/' -e 's/[^a-zA-Z0-9_]/_/g'` + ;; + * ) + func_tr_sh_result=$1 + ;; + esac +} + + +# func_verbose ARG... +# ------------------- +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $debug_cmd + + $opt_verbose && func_echo "$*" + + : +} + + +# func_warn_and_continue ARG... +# ----------------------------- +# Echo program name prefixed warning message to standard error. +func_warn_and_continue () +{ + $debug_cmd + + $require_term_colors + + func_echo_infix_1 "${tc_red}warning$tc_reset" "$*" >&2 +} + + +# func_warning CATEGORY ARG... +# ---------------------------- +# Echo program name prefixed warning message to standard error. Warning +# messages can be filtered according to CATEGORY, where this function +# elides messages where CATEGORY is not listed in the global variable +# 'opt_warning_types'. +func_warning () +{ + $debug_cmd + + # CATEGORY must be in the warning_categories list! + case " $warning_categories " in + *" $1 "*) ;; + *) func_internal_error "invalid warning category '$1'" ;; + esac + + _G_category=$1 + shift + + case " $opt_warning_types " in + *" $_G_category "*) $warning_func ${1+"$@"} ;; + esac +} + + +# func_sort_ver VER1 VER2 +# ----------------------- +# 'sort -V' is not generally available. +# Note this deviates from the version comparison in automake +# in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a +# but this should suffice as we won't be specifying old +# version formats or redundant trailing .0 in bootstrap.conf. +# If we did want full compatibility then we should probably +# use m4_version_compare from autoconf. +func_sort_ver () +{ + $debug_cmd + + printf '%s\n%s\n' "$1" "$2" \ + | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n -k 6,6n -k 7,7n -k 8,8n -k 9,9n +} + +# func_lt_ver PREV CURR +# --------------------- +# Return true if PREV and CURR are in the correct order according to +# func_sort_ver, otherwise false. Use it like this: +# +# func_lt_ver "$prev_ver" "$proposed_ver" || func_fatal_error "..." +func_lt_ver () +{ + $debug_cmd + + test "x$1" = x`func_sort_ver "$1" "$2" | $SED 1q` +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: +#! /bin/sh + +# Set a version string for this script. +scriptversion=2015-10-07.11; # UTC + +# A portable, pluggable option parser for Bourne shell. +# Written by Gary V. Vaughan, 2010 + +# Copyright (C) 2010-2015 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Please report bugs or propose patches to gary@gnu.org. + + +## ------ ## +## Usage. ## +## ------ ## + +# This file is a library for parsing options in your shell scripts along +# with assorted other useful supporting features that you can make use +# of too. +# +# For the simplest scripts you might need only: +# +# #!/bin/sh +# . relative/path/to/funclib.sh +# . relative/path/to/options-parser +# scriptversion=1.0 +# func_options ${1+"$@"} +# eval set dummy "$func_options_result"; shift +# ...rest of your script... +# +# In order for the '--version' option to work, you will need to have a +# suitably formatted comment like the one at the top of this file +# starting with '# Written by ' and ending with '# warranty; '. +# +# For '-h' and '--help' to work, you will also need a one line +# description of your script's purpose in a comment directly above the +# '# Written by ' line, like the one at the top of this file. +# +# The default options also support '--debug', which will turn on shell +# execution tracing (see the comment above debug_cmd below for another +# use), and '--verbose' and the func_verbose function to allow your script +# to display verbose messages only when your user has specified +# '--verbose'. +# +# After sourcing this file, you can plug processing for additional +# options by amending the variables from the 'Configuration' section +# below, and following the instructions in the 'Option parsing' +# section further down. + +## -------------- ## +## Configuration. ## +## -------------- ## + +# You should override these variables in your script after sourcing this +# file so that they reflect the customisations you have added to the +# option parser. + +# The usage line for option parsing errors and the start of '-h' and +# '--help' output messages. You can embed shell variables for delayed +# expansion at the time the message is displayed, but you will need to +# quote other shell meta-characters carefully to prevent them being +# expanded when the contents are evaled. +usage='$progpath [OPTION]...' + +# Short help message in response to '-h' and '--help'. Add to this or +# override it after sourcing this library to reflect the full set of +# options your script accepts. +usage_message="\ + --debug enable verbose shell tracing + -W, --warnings=CATEGORY + report the warnings falling in CATEGORY [all] + -v, --verbose verbosely report processing + --version print version information and exit + -h, --help print short or long help message and exit +" + +# Additional text appended to 'usage_message' in response to '--help'. +long_help_message=" +Warning categories include: + 'all' show all warnings + 'none' turn off all the warnings + 'error' warnings are treated as fatal errors" + +# Help message printed before fatal option parsing errors. +fatal_help="Try '\$progname --help' for more information." + + + +## ------------------------- ## +## Hook function management. ## +## ------------------------- ## + +# This section contains functions for adding, removing, and running hooks +# to the main code. A hook is just a named list of of function, that can +# be run in order later on. + +# func_hookable FUNC_NAME +# ----------------------- +# Declare that FUNC_NAME will run hooks added with +# 'func_add_hook FUNC_NAME ...'. +func_hookable () +{ + $debug_cmd + + func_append hookable_fns " $1" +} + + +# func_add_hook FUNC_NAME HOOK_FUNC +# --------------------------------- +# Request that FUNC_NAME call HOOK_FUNC before it returns. FUNC_NAME must +# first have been declared "hookable" by a call to 'func_hookable'. +func_add_hook () +{ + $debug_cmd + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not accept hook functions." ;; + esac + + eval func_append ${1}_hooks '" $2"' +} + + +# func_remove_hook FUNC_NAME HOOK_FUNC +# ------------------------------------ +# Remove HOOK_FUNC from the list of functions called by FUNC_NAME. +func_remove_hook () +{ + $debug_cmd + + eval ${1}_hooks='`$ECHO "\$'$1'_hooks" |$SED "s| '$2'||"`' +} + + +# func_run_hooks FUNC_NAME [ARG]... +# --------------------------------- +# Run all hook functions registered to FUNC_NAME. +# It is assumed that the list of hook functions contains nothing more +# than a whitespace-delimited list of legal shell function names, and +# no effort is wasted trying to catch shell meta-characters or preserve +# whitespace. +func_run_hooks () +{ + $debug_cmd + + _G_rc_run_hooks=false + + case " $hookable_fns " in + *" $1 "*) ;; + *) func_fatal_error "'$1' does not support hook funcions.n" ;; + esac + + eval _G_hook_fns=\$$1_hooks; shift + + for _G_hook in $_G_hook_fns; do + if eval $_G_hook '"$@"'; then + # store returned options list back into positional + # parameters for next 'cmd' execution. + eval _G_hook_result=\$${_G_hook}_result + eval set dummy "$_G_hook_result"; shift + _G_rc_run_hooks=: + fi + done + + $_G_rc_run_hooks && func_run_hooks_result=$_G_hook_result +} + + + +## --------------- ## +## Option parsing. ## +## --------------- ## + +# In order to add your own option parsing hooks, you must accept the +# full positional parameter list in your hook function, you may remove/edit +# any options that you action, and then pass back the remaining unprocessed +# options in '_result', escaped suitably for +# 'eval'. In this case you also must return $EXIT_SUCCESS to let the +# hook's caller know that it should pay attention to +# '_result'. Returning $EXIT_FAILURE signalizes that +# arguments are left untouched by the hook and therefore caller will ignore the +# result variable. +# +# Like this: +# +# my_options_prep () +# { +# $debug_cmd +# +# # Extend the existing usage message. +# usage_message=$usage_message' +# -s, --silent don'\''t print informational messages +# ' +# # No change in '$@' (ignored completely by this hook). There is +# # no need to do the equivalent (but slower) action: +# # func_quote_for_eval ${1+"$@"} +# # my_options_prep_result=$func_quote_for_eval_result +# false +# } +# func_add_hook func_options_prep my_options_prep +# +# +# my_silent_option () +# { +# $debug_cmd +# +# args_changed=false +# +# # Note that for efficiency, we parse as many options as we can +# # recognise in a loop before passing the remainder back to the +# # caller on the first unrecognised argument we encounter. +# while test $# -gt 0; do +# opt=$1; shift +# case $opt in +# --silent|-s) opt_silent=: +# args_changed=: +# ;; +# # Separate non-argument short options: +# -s*) func_split_short_opt "$_G_opt" +# set dummy "$func_split_short_opt_name" \ +# "-$func_split_short_opt_arg" ${1+"$@"} +# shift +# args_changed=: +# ;; +# *) # Make sure the first unrecognised option "$_G_opt" +# # is added back to "$@", we could need that later +# # if $args_changed is true. +# set dummy "$_G_opt" ${1+"$@"}; shift; break ;; +# esac +# done +# +# if $args_changed; then +# func_quote_for_eval ${1+"$@"} +# my_silent_option_result=$func_quote_for_eval_result +# fi +# +# $args_changed +# } +# func_add_hook func_parse_options my_silent_option +# +# +# my_option_validation () +# { +# $debug_cmd +# +# $opt_silent && $opt_verbose && func_fatal_help "\ +# '--silent' and '--verbose' options are mutually exclusive." +# +# false +# } +# func_add_hook func_validate_options my_option_validation +# +# You'll also need to manually amend $usage_message to reflect the extra +# options you parse. It's preferable to append if you can, so that +# multiple option parsing hooks can be added safely. + + +# func_options_finish [ARG]... +# ---------------------------- +# Finishing the option parse loop (call 'func_options' hooks ATM). +func_options_finish () +{ + $debug_cmd + + _G_func_options_finish_exit=false + if func_run_hooks func_options ${1+"$@"}; then + func_options_finish_result=$func_run_hooks_result + _G_func_options_finish_exit=: + fi + + $_G_func_options_finish_exit +} + + +# func_options [ARG]... +# --------------------- +# All the functions called inside func_options are hookable. See the +# individual implementations for details. +func_hookable func_options +func_options () +{ + $debug_cmd + + _G_rc_options=false + + for my_func in options_prep parse_options validate_options options_finish + do + if eval func_$my_func '${1+"$@"}'; then + eval _G_res_var='$'"func_${my_func}_result" + eval set dummy "$_G_res_var" ; shift + _G_rc_options=: + fi + done + + # Save modified positional parameters for caller. As a top-level + # options-parser function we always need to set the 'func_options_result' + # variable (regardless the $_G_rc_options value). + if $_G_rc_options; then + func_options_result=$_G_res_var + else + func_quote_for_eval ${1+"$@"} + func_options_result=$func_quote_for_eval_result + fi + + $_G_rc_options +} + + +# func_options_prep [ARG]... +# -------------------------- +# All initialisations required before starting the option parse loop. +# Note that when calling hook functions, we pass through the list of +# positional parameters. If a hook function modifies that list, and +# needs to propagate that back to rest of this script, then the complete +# modified list must be put in 'func_run_hooks_result' before +# returning $EXIT_SUCCESS (otherwise $EXIT_FAILURE is returned). +func_hookable func_options_prep +func_options_prep () +{ + $debug_cmd + + # Option defaults: + opt_verbose=false + opt_warning_types= + + _G_rc_options_prep=false + if func_run_hooks func_options_prep ${1+"$@"}; then + _G_rc_options_prep=: + # save modified positional parameters for caller + func_options_prep_result=$func_run_hooks_result + fi + + $_G_rc_options_prep +} + + +# func_parse_options [ARG]... +# --------------------------- +# The main option parsing loop. +func_hookable func_parse_options +func_parse_options () +{ + $debug_cmd + + func_parse_options_result= + + _G_rc_parse_options=false + # this just eases exit handling + while test $# -gt 0; do + # Defer to hook functions for initial option parsing, so they + # get priority in the event of reusing an option name. + if func_run_hooks func_parse_options ${1+"$@"}; then + eval set dummy "$func_run_hooks_result"; shift + _G_rc_parse_options=: + fi + + # Break out of the loop if we already parsed every option. + test $# -gt 0 || break + + _G_match_parse_options=: + _G_opt=$1 + shift + case $_G_opt in + --debug|-x) debug_cmd='set -x' + func_echo "enabling shell trace mode" + $debug_cmd + ;; + + --no-warnings|--no-warning|--no-warn) + set dummy --warnings none ${1+"$@"} + shift + ;; + + --warnings|--warning|-W) + if test $# = 0 && func_missing_arg $_G_opt; then + _G_rc_parse_options=: + break + fi + case " $warning_categories $1" in + *" $1 "*) + # trailing space prevents matching last $1 above + func_append_uniq opt_warning_types " $1" + ;; + *all) + opt_warning_types=$warning_categories + ;; + *none) + opt_warning_types=none + warning_func=: + ;; + *error) + opt_warning_types=$warning_categories + warning_func=func_fatal_error + ;; + *) + func_fatal_error \ + "unsupported warning category: '$1'" + ;; + esac + shift + ;; + + --verbose|-v) opt_verbose=: ;; + --version) func_version ;; + -\?|-h) func_usage ;; + --help) func_help ;; + + # Separate optargs to long options (plugins may need this): + --*=*) func_split_equals "$_G_opt" + set dummy "$func_split_equals_lhs" \ + "$func_split_equals_rhs" ${1+"$@"} + shift + ;; + + # Separate optargs to short options: + -W*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + # Separate non-argument short options: + -\?*|-h*|-v*|-x*) + func_split_short_opt "$_G_opt" + set dummy "$func_split_short_opt_name" \ + "-$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + --) _G_rc_parse_options=: ; break ;; + -*) func_fatal_help "unrecognised option: '$_G_opt'" ;; + *) set dummy "$_G_opt" ${1+"$@"}; shift + _G_match_parse_options=false + break + ;; + esac + + $_G_match_parse_options && _G_rc_parse_options=: + done + + + if $_G_rc_parse_options; then + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + func_parse_options_result=$func_quote_for_eval_result + fi + + $_G_rc_parse_options +} + + +# func_validate_options [ARG]... +# ------------------------------ +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +func_hookable func_validate_options +func_validate_options () +{ + $debug_cmd + + _G_rc_validate_options=false + + # Display all warnings if -W was not given. + test -n "$opt_warning_types" || opt_warning_types=" $warning_categories" + + if func_run_hooks func_validate_options ${1+"$@"}; then + # save modified positional parameters for caller + func_validate_options_result=$func_run_hooks_result + _G_rc_validate_options=: + fi + + # Bail if the options were screwed! + $exit_cmd $EXIT_FAILURE + + $_G_rc_validate_options +} + + + +## ----------------- ## +## Helper functions. ## +## ----------------- ## + +# This section contains the helper functions used by the rest of the +# hookable option parser framework in ascii-betical order. + + +# func_fatal_help ARG... +# ---------------------- +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + eval \$ECHO \""$fatal_help"\" + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + + +# func_help +# --------- +# Echo long help message to standard output and exit. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message" + exit 0 +} + + +# func_missing_arg ARGNAME +# ------------------------ +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + $debug_cmd + + func_error "Missing argument for '$1'." + exit_cmd=exit +} + + +# func_split_equals STRING +# ------------------------ +# Set func_split_equals_lhs and func_split_equals_rhs shell variables after +# splitting STRING at the '=' sign. +test -z "$_G_HAVE_XSI_OPS" \ + && (eval 'x=a/b/c; + test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ + && _G_HAVE_XSI_OPS=yes + +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=${1%%=*} + func_split_equals_rhs=${1#*=} + test "x$func_split_equals_lhs" = "x$1" \ + && func_split_equals_rhs= + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_equals () + { + $debug_cmd + + func_split_equals_lhs=`expr "x$1" : 'x\([^=]*\)'` + func_split_equals_rhs= + test "x$func_split_equals_lhs" = "x$1" \ + || func_split_equals_rhs=`expr "x$1" : 'x[^=]*=\(.*\)$'` + } +fi #func_split_equals + + +# func_split_short_opt SHORTOPT +# ----------------------------- +# Set func_split_short_opt_name and func_split_short_opt_arg shell +# variables after splitting SHORTOPT after the 2nd character. +if test yes = "$_G_HAVE_XSI_OPS" +then + # This is an XSI compatible shell, allowing a faster implementation... + eval 'func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_arg=${1#??} + func_split_short_opt_name=${1%"$func_split_short_opt_arg"} + }' +else + # ...otherwise fall back to using expr, which is often a shell builtin. + func_split_short_opt () + { + $debug_cmd + + func_split_short_opt_name=`expr "x$1" : 'x-\(.\)'` + func_split_short_opt_arg=`expr "x$1" : 'x-.\(.*\)$'` + } +fi #func_split_short_opt + + +# func_usage +# ---------- +# Echo short help message to standard output and exit. +func_usage () +{ + $debug_cmd + + func_usage_message + $ECHO "Run '$progname --help |${PAGER-more}' for full usage" + exit 0 +} + + +# func_usage_message +# ------------------ +# Echo short help message to standard output. +func_usage_message () +{ + $debug_cmd + + eval \$ECHO \""Usage: $usage"\" + echo + $SED -n 's|^# || + /^Written by/{ + x;p;x + } + h + /^Written by/q' < "$progpath" + echo + eval \$ECHO \""$usage_message"\" +} + + +# func_version +# ------------ +# Echo version message to standard output and exit. +func_version () +{ + $debug_cmd + + printf '%s\n' "$progname $scriptversion" + $SED -n ' + /(C)/!b go + :more + /\./!{ + N + s|\n# | | + b more + } + :go + /^# Written by /,/# warranty; / { + s|^# || + s|^# *$|| + s|\((C)\)[ 0-9,-]*[ ,-]\([1-9][0-9]* \)|\1 \2| + p + } + /^# Written by / { + s|^# || + p + } + /^warranty; /q' < "$progpath" + + exit $? +} + + +# Local variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" +# time-stamp-time-zone: "UTC" +# End: + +# Set a version string. +scriptversion='(GNU libtool) 2.4.6' + + +# func_echo ARG... +# ---------------- +# Libtool also displays the current mode in messages, so override +# funclib.sh func_echo with this custom definition. +func_echo () +{ + $debug_cmd + + _G_message=$* + + func_echo_IFS=$IFS + IFS=$nl + for _G_line in $_G_message; do + IFS=$func_echo_IFS + $ECHO "$progname${opt_mode+: $opt_mode}: $_G_line" + done + IFS=$func_echo_IFS +} + + +# func_warning ARG... +# ------------------- +# Libtool warnings are not categorized, so override funclib.sh +# func_warning with this simpler definition. +func_warning () +{ + $debug_cmd + + $warning_func ${1+"$@"} +} + + +## ---------------- ## +## Options parsing. ## +## ---------------- ## + +# Hook in the functions to make sure our own options are parsed during +# the option parsing loop. + +usage='$progpath [OPTION]... [MODE-ARG]...' + +# Short help message in response to '-h'. +usage_message="Options: + --config show all configuration variables + --debug enable verbose shell tracing + -n, --dry-run display commands without modifying any files + --features display basic configuration information and exit + --mode=MODE use operation mode MODE + --no-warnings equivalent to '-Wnone' + --preserve-dup-deps don't remove duplicate dependency libraries + --quiet, --silent don't print informational messages + --tag=TAG use configuration variables from tag TAG + -v, --verbose print more informational messages than default + --version print version information + -W, --warnings=CATEGORY report the warnings falling in CATEGORY [all] + -h, --help, --help-all print short, long, or detailed help message +" + +# Additional text appended to 'usage_message' in response to '--help'. +func_help () +{ + $debug_cmd + + func_usage_message + $ECHO "$long_help_message + +MODE must be one of the following: + + clean remove files from the build directory + compile compile a source file into a libtool object + execute automatically set library path, then run a program + finish complete the installation of libtool libraries + install install libraries or executables + link create a library or an executable + uninstall remove libraries from an installed directory + +MODE-ARGS vary depending on the MODE. When passed as first option, +'--mode=MODE' may be abbreviated as 'MODE' or a unique abbreviation of that. +Try '$progname --help --mode=MODE' for a more detailed description of MODE. + +When reporting a bug, please describe a test case to reproduce it and +include the following information: + + host-triplet: $host + shell: $SHELL + compiler: $LTCC + compiler flags: $LTCFLAGS + linker: $LD (gnu? $with_gnu_ld) + version: $progname $scriptversion Debian-2.4.6-15 + automake: `($AUTOMAKE --version) 2>/dev/null |$SED 1q` + autoconf: `($AUTOCONF --version) 2>/dev/null |$SED 1q` + +Report bugs to . +GNU libtool home page: . +General help using GNU software: ." + exit 0 +} + + +# func_lo2o OBJECT-NAME +# --------------------- +# Transform OBJECT-NAME from a '.lo' suffix to the platform specific +# object suffix. + +lo2o=s/\\.lo\$/.$objext/ +o2lo=s/\\.$objext\$/.lo/ + +if test yes = "$_G_HAVE_XSI_OPS"; then + eval 'func_lo2o () + { + case $1 in + *.lo) func_lo2o_result=${1%.lo}.$objext ;; + * ) func_lo2o_result=$1 ;; + esac + }' + + # func_xform LIBOBJ-OR-SOURCE + # --------------------------- + # Transform LIBOBJ-OR-SOURCE from a '.o' or '.c' (or otherwise) + # suffix to a '.lo' libtool-object suffix. + eval 'func_xform () + { + func_xform_result=${1%.*}.lo + }' +else + # ...otherwise fall back to using sed. + func_lo2o () + { + func_lo2o_result=`$ECHO "$1" | $SED "$lo2o"` + } + + func_xform () + { + func_xform_result=`$ECHO "$1" | $SED 's|\.[^.]*$|.lo|'` + } +fi + + +# func_fatal_configuration ARG... +# ------------------------------- +# Echo program name prefixed message to standard error, followed by +# a configuration failure hint, and exit. +func_fatal_configuration () +{ + func__fatal_error ${1+"$@"} \ + "See the $PACKAGE documentation for more information." \ + "Fatal configuration error." +} + + +# func_config +# ----------- +# Display the configuration for all the tags in this script. +func_config () +{ + re_begincf='^# ### BEGIN LIBTOOL' + re_endcf='^# ### END LIBTOOL' + + # Default configuration. + $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" + + # Now print the configurations for the tags. + for tagname in $taglist; do + $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" + done + + exit $? +} + + +# func_features +# ------------- +# Display the features supported by this script. +func_features () +{ + echo "host: $host" + if test yes = "$build_libtool_libs"; then + echo "enable shared libraries" + else + echo "disable shared libraries" + fi + if test yes = "$build_old_libs"; then + echo "enable static libraries" + else + echo "disable static libraries" + fi + + exit $? +} + + +# func_enable_tag TAGNAME +# ----------------------- +# Verify that TAGNAME is valid, and either flag an error and exit, or +# enable the TAGNAME tag. We also add TAGNAME to the global $taglist +# variable here. +func_enable_tag () +{ + # Global variable: + tagname=$1 + + re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" + re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" + sed_extractcf=/$re_begincf/,/$re_endcf/p + + # Validate tagname. + case $tagname in + *[!-_A-Za-z0-9,/]*) + func_fatal_error "invalid tag name: $tagname" + ;; + esac + + # Don't test for the "default" C tag, as we know it's + # there but not specially marked. + case $tagname in + CC) ;; + *) + if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then + taglist="$taglist $tagname" + + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} + + +# func_check_version_match +# ------------------------ +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + fi + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +# libtool_options_prep [ARG]... +# ----------------------------- +# Preparation for options parsed by libtool. +libtool_options_prep () +{ + $debug_mode + + # Option defaults: + opt_config=false + opt_dlopen= + opt_dry_run=false + opt_help=false + opt_mode= + opt_preserve_dup_deps=false + opt_quiet=false + + nonopt= + preserve_args= + + _G_rc_lt_options_prep=: + + # Shorthand for --mode=foo, only valid as the first argument + case $1 in + clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift + ;; + compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift + ;; + execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift + ;; + finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift + ;; + install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; + link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; + uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift + ;; + *) + _G_rc_lt_options_prep=false + ;; + esac + + if $_G_rc_lt_options_prep; then + # Pass back the list of options. + func_quote_for_eval ${1+"$@"} + libtool_options_prep_result=$func_quote_for_eval_result + fi + + $_G_rc_lt_options_prep +} +func_add_hook func_options_prep libtool_options_prep + + +# libtool_parse_options [ARG]... +# --------------------------------- +# Provide handling for libtool specific options. +libtool_parse_options () +{ + $debug_cmd + + _G_rc_lt_parse_options=false + + # Perform our own loop to consume as many options as possible in + # each iteration. + while test $# -gt 0; do + _G_match_lt_parse_options=: + _G_opt=$1 + shift + case $_G_opt in + --dry-run|--dryrun|-n) + opt_dry_run=: + ;; + + --config) func_config ;; + + --dlopen|-dlopen) + opt_dlopen="${opt_dlopen+$opt_dlopen +}$1" + shift + ;; + + --preserve-dup-deps) + opt_preserve_dup_deps=: ;; + + --features) func_features ;; + + --finish) set dummy --mode finish ${1+"$@"}; shift ;; + + --help) opt_help=: ;; + + --help-all) opt_help=': help-all' ;; + + --mode) test $# = 0 && func_missing_arg $_G_opt && break + opt_mode=$1 + case $1 in + # Valid mode arguments: + clean|compile|execute|finish|install|link|relink|uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $_G_opt" + exit_cmd=exit + break + ;; + esac + shift + ;; + + --no-silent|--no-quiet) + opt_quiet=false + func_append preserve_args " $_G_opt" + ;; + + --no-warnings|--no-warning|--no-warn) + opt_warning=false + func_append preserve_args " $_G_opt" + ;; + + --no-verbose) + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --silent|--quiet) + opt_quiet=: + opt_verbose=false + func_append preserve_args " $_G_opt" + ;; + + --tag) test $# = 0 && func_missing_arg $_G_opt && break + opt_tag=$1 + func_append preserve_args " $_G_opt $1" + func_enable_tag "$1" + shift + ;; + + --verbose|-v) opt_quiet=false + opt_verbose=: + func_append preserve_args " $_G_opt" + ;; + + # An option not handled by this hook function: + *) set dummy "$_G_opt" ${1+"$@"} ; shift + _G_match_lt_parse_options=false + break + ;; + esac + $_G_match_lt_parse_options && _G_rc_lt_parse_options=: + done + + if $_G_rc_lt_parse_options; then + # save modified positional parameters for caller + func_quote_for_eval ${1+"$@"} + libtool_parse_options_result=$func_quote_for_eval_result + fi + + $_G_rc_lt_parse_options +} +func_add_hook func_parse_options libtool_parse_options + + + +# libtool_validate_options [ARG]... +# --------------------------------- +# Perform any sanity checks on option settings and/or unconsumed +# arguments. +libtool_validate_options () +{ + # save first non-option argument + if test 0 -lt $#; then + nonopt=$1 + shift + fi + + # preserve --debug + test : = "$debug_cmd" || func_append preserve_args " --debug" + + case $host in + # Solaris2 added to fix http://debbugs.gnu.org/cgi/bugreport.cgi?bug=16452 + # see also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59788 + *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* | *os2*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: + ;; + *) + opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps + ;; + esac + + $opt_help || { + # Sanity checks first: + func_check_version_match + + test yes != "$build_libtool_libs" \ + && test yes != "$build_old_libs" \ + && func_fatal_configuration "not configured to build any kind of library" + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$opt_dlopen" && test execute != "$opt_mode"; then + func_error "unrecognized option '-dlopen'" + $ECHO "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help=$help + help="Try '$progname --help --mode=$opt_mode' for more information." + } + + # Pass back the unparsed argument list + func_quote_for_eval ${1+"$@"} + libtool_validate_options_result=$func_quote_for_eval_result +} +func_add_hook func_validate_options libtool_validate_options + + +# Process options as early as possible so that --help and --version +# can return quickly. +func_options ${1+"$@"} +eval set dummy "$func_options_result"; shift + + + +## ----------- ## +## Main. ## +## ----------- ## + +magic='%%%MAGIC variable%%%' +magic_exe='%%%MAGIC EXE variable%%%' + +# Global variables. +extracted_archives= +extracted_serial=0 + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# func_generated_by_libtool +# True iff stdin has been generated by Libtool. This function is only +# a basic sanity check; it will hardly flush out determined imposters. +func_generated_by_libtool_p () +{ + $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} + +# func_lalib_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_lalib_unsafe_p file +# True iff FILE is a libtool '.la' library or '.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if 'file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case $lalib_p_line in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test yes = "$lalib_p" +} + +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + test -f "$1" && + $lt_truncate_bin < "$1" 2>/dev/null | func_generated_by_libtool_p +} + +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result=$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $debug_cmd + + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# 'FILE.' does not work on cygwin managed mounts. +func_source () +{ + $debug_cmd + + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_resolve_sysroot PATH +# Replace a leading = in PATH with a sysroot. Store the result into +# func_resolve_sysroot_result +func_resolve_sysroot () +{ + func_resolve_sysroot_result=$1 + case $func_resolve_sysroot_result in + =*) + func_stripname '=' '' "$func_resolve_sysroot_result" + func_resolve_sysroot_result=$lt_sysroot$func_stripname_result + ;; + esac +} + +# func_replace_sysroot PATH +# If PATH begins with the sysroot, replace it with = and +# store the result into func_replace_sysroot_result. +func_replace_sysroot () +{ + case $lt_sysroot:$1 in + ?*:"$lt_sysroot"*) + func_stripname "$lt_sysroot" '' "$1" + func_replace_sysroot_result='='$func_stripname_result + ;; + *) + # Including no sysroot. + func_replace_sysroot_result=$1 + ;; + esac +} + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $debug_cmd + + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`$SED -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case "$@ " in + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with '--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=$1 + if test yes = "$build_libtool_libs"; then + write_lobj=\'$2\' + else + write_lobj=none + fi + + if test yes = "$build_old_libs"; then + write_oldobj=\'$3\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T </dev/null` + if test "$?" -eq 0 && test -n "$func_convert_core_file_wine_to_w32_tmp"; then + func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | + $SED -e "$sed_naive_backslashify"` + else + func_convert_core_file_wine_to_w32_result= + fi + fi +} +# end: func_convert_core_file_wine_to_w32 + + +# func_convert_core_path_wine_to_w32 ARG +# Helper function used by path conversion functions when $build is *nix, and +# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly +# configured wine environment available, with the winepath program in $build's +# $PATH. Assumes ARG has no leading or trailing path separator characters. +# +# ARG is path to be converted from $build format to win32. +# Result is available in $func_convert_core_path_wine_to_w32_result. +# Unconvertible file (directory) names in ARG are skipped; if no directory names +# are convertible, then the result may be empty. +func_convert_core_path_wine_to_w32 () +{ + $debug_cmd + + # unfortunately, winepath doesn't convert paths, only file names + func_convert_core_path_wine_to_w32_result= + if test -n "$1"; then + oldIFS=$IFS + IFS=: + for func_convert_core_path_wine_to_w32_f in $1; do + IFS=$oldIFS + func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" + if test -n "$func_convert_core_file_wine_to_w32_result"; then + if test -z "$func_convert_core_path_wine_to_w32_result"; then + func_convert_core_path_wine_to_w32_result=$func_convert_core_file_wine_to_w32_result + else + func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" + fi + fi + done + IFS=$oldIFS + fi +} +# end: func_convert_core_path_wine_to_w32 + + +# func_cygpath ARGS... +# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when +# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) +# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or +# (2), returns the Cygwin file name or path in func_cygpath_result (input +# file name or path is assumed to be in w32 format, as previously converted +# from $build's *nix or MSYS format). In case (3), returns the w32 file name +# or path in func_cygpath_result (input file name or path is assumed to be in +# Cygwin format). Returns an empty string on error. +# +# ARGS are passed to cygpath, with the last one being the file name or path to +# be converted. +# +# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH +# environment variable; do not put it in $PATH. +func_cygpath () +{ + $debug_cmd + + if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then + func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` + if test "$?" -ne 0; then + # on failure, ensure result is empty + func_cygpath_result= + fi + else + func_cygpath_result= + func_error "LT_CYGPATH is empty or specifies non-existent file: '$LT_CYGPATH'" + fi +} +#end: func_cygpath + + +# func_convert_core_msys_to_w32 ARG +# Convert file name or path ARG from MSYS format to w32 format. Return +# result in func_convert_core_msys_to_w32_result. +func_convert_core_msys_to_w32 () +{ + $debug_cmd + + # awkward: cmd appends spaces to result + func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | + $SED -e 's/[ ]*$//' -e "$sed_naive_backslashify"` +} +#end: func_convert_core_msys_to_w32 + + +# func_convert_file_check ARG1 ARG2 +# Verify that ARG1 (a file name in $build format) was converted to $host +# format in ARG2. Otherwise, emit an error message, but continue (resetting +# func_to_host_file_result to ARG1). +func_convert_file_check () +{ + $debug_cmd + + if test -z "$2" && test -n "$1"; then + func_error "Could not determine host file name corresponding to" + func_error " '$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_file_result=$1 + fi +} +# end func_convert_file_check + + +# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH +# Verify that FROM_PATH (a path in $build format) was converted to $host +# format in TO_PATH. Otherwise, emit an error message, but continue, resetting +# func_to_host_file_result to a simplistic fallback value (see below). +func_convert_path_check () +{ + $debug_cmd + + if test -z "$4" && test -n "$3"; then + func_error "Could not determine the host path corresponding to" + func_error " '$3'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This is a deliberately simplistic "conversion" and + # should not be "improved". See libtool.info. + if test "x$1" != "x$2"; then + lt_replace_pathsep_chars="s|$1|$2|g" + func_to_host_path_result=`echo "$3" | + $SED -e "$lt_replace_pathsep_chars"` + else + func_to_host_path_result=$3 + fi + fi +} +# end func_convert_path_check + + +# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG +# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT +# and appending REPL if ORIG matches BACKPAT. +func_convert_path_front_back_pathsep () +{ + $debug_cmd + + case $4 in + $1 ) func_to_host_path_result=$3$func_to_host_path_result + ;; + esac + case $4 in + $2 ) func_append func_to_host_path_result "$3" + ;; + esac +} +# end func_convert_path_front_back_pathsep + + +################################################## +# $build to $host FILE NAME CONVERSION FUNCTIONS # +################################################## +# invoked via '$to_host_file_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# Result will be available in $func_to_host_file_result. + + +# func_to_host_file ARG +# Converts the file name ARG from $build format to $host format. Return result +# in func_to_host_file_result. +func_to_host_file () +{ + $debug_cmd + + $to_host_file_cmd "$1" +} +# end func_to_host_file + + +# func_to_tool_file ARG LAZY +# converts the file name ARG from $build format to toolchain format. Return +# result in func_to_tool_file_result. If the conversion in use is listed +# in (the comma separated) LAZY, no conversion takes place. +func_to_tool_file () +{ + $debug_cmd + + case ,$2, in + *,"$to_tool_file_cmd",*) + func_to_tool_file_result=$1 + ;; + *) + $to_tool_file_cmd "$1" + func_to_tool_file_result=$func_to_host_file_result + ;; + esac +} +# end func_to_tool_file + + +# func_convert_file_noop ARG +# Copy ARG to func_to_host_file_result. +func_convert_file_noop () +{ + func_to_host_file_result=$1 +} +# end func_convert_file_noop + + +# func_convert_file_msys_to_w32 ARG +# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_file_result. +func_convert_file_msys_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_to_host_file_result=$func_convert_core_msys_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_w32 + + +# func_convert_file_cygwin_to_w32 ARG +# Convert file name ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_file_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # because $build is cygwin, we call "the" cygpath in $PATH; no need to use + # LT_CYGPATH in this case. + func_to_host_file_result=`cygpath -m "$1"` + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_cygwin_to_w32 + + +# func_convert_file_nix_to_w32 ARG +# Convert file name ARG from *nix to w32 format. Requires a wine environment +# and a working winepath. Returns result in func_to_host_file_result. +func_convert_file_nix_to_w32 () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_file_wine_to_w32 "$1" + func_to_host_file_result=$func_convert_core_file_wine_to_w32_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_w32 + + +# func_convert_file_msys_to_cygwin ARG +# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_file_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_cygpath -u "$func_convert_core_msys_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_cygwin + + +# func_convert_file_nix_to_cygwin ARG +# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed +# in a wine environment, working winepath, and LT_CYGPATH set. Returns result +# in func_to_host_file_result. +func_convert_file_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_file_result=$1 + if test -n "$1"; then + # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. + func_convert_core_file_wine_to_w32 "$1" + func_cygpath -u "$func_convert_core_file_wine_to_w32_result" + func_to_host_file_result=$func_cygpath_result + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_cygwin + + +############################################# +# $build to $host PATH CONVERSION FUNCTIONS # +############################################# +# invoked via '$to_host_path_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# The result will be available in $func_to_host_path_result. +# +# Path separators are also converted from $build format to $host format. If +# ARG begins or ends with a path separator character, it is preserved (but +# converted to $host format) on output. +# +# All path conversion functions are named using the following convention: +# file name conversion function : func_convert_file_X_to_Y () +# path conversion function : func_convert_path_X_to_Y () +# where, for any given $build/$host combination the 'X_to_Y' value is the +# same. If conversion functions are added for new $build/$host combinations, +# the two new functions must follow this pattern, or func_init_to_host_path_cmd +# will break. + + +# func_init_to_host_path_cmd +# Ensures that function "pointer" variable $to_host_path_cmd is set to the +# appropriate value, based on the value of $to_host_file_cmd. +to_host_path_cmd= +func_init_to_host_path_cmd () +{ + $debug_cmd + + if test -z "$to_host_path_cmd"; then + func_stripname 'func_convert_file_' '' "$to_host_file_cmd" + to_host_path_cmd=func_convert_path_$func_stripname_result + fi +} + + +# func_to_host_path ARG +# Converts the path ARG from $build format to $host format. Return result +# in func_to_host_path_result. +func_to_host_path () +{ + $debug_cmd + + func_init_to_host_path_cmd + $to_host_path_cmd "$1" +} +# end func_to_host_path + + +# func_convert_path_noop ARG +# Copy ARG to func_to_host_path_result. +func_convert_path_noop () +{ + func_to_host_path_result=$1 +} +# end func_convert_path_noop + + +# func_convert_path_msys_to_w32 ARG +# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_path_result. +func_convert_path_msys_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from ARG. MSYS + # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; + # and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_msys_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_msys_to_w32 + + +# func_convert_path_cygwin_to_w32 ARG +# Convert path ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_path_cygwin_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_cygwin_to_w32 + + +# func_convert_path_nix_to_w32 ARG +# Convert path ARG from *nix to w32 format. Requires a wine environment and +# a working winepath. Returns result in func_to_host_file_result. +func_convert_path_nix_to_w32 () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result=$func_convert_core_path_wine_to_w32_result + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_nix_to_w32 + + +# func_convert_path_msys_to_cygwin ARG +# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_path_msys_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_msys_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_msys_to_cygwin + + +# func_convert_path_nix_to_cygwin ARG +# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a +# a wine environment, working winepath, and LT_CYGPATH set. Returns result in +# func_to_host_file_result. +func_convert_path_nix_to_cygwin () +{ + $debug_cmd + + func_to_host_path_result=$1 + if test -n "$1"; then + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" + func_to_host_path_result=$func_cygpath_result + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_nix_to_cygwin + + +# func_dll_def_p FILE +# True iff FILE is a Windows DLL '.def' file. +# Keep in sync with _LT_DLL_DEF_P in libtool.m4 +func_dll_def_p () +{ + $debug_cmd + + func_dll_def_p_tmp=`$SED -n \ + -e 's/^[ ]*//' \ + -e '/^\(;.*\)*$/d' \ + -e 's/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p' \ + -e q \ + "$1"` + test DEF = "$func_dll_def_p_tmp" +} + + +# func_mode_compile arg... +func_mode_compile () +{ + $debug_cmd + + # Get the compilation command and the source file. + base_compile= + srcfile=$nonopt # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + pie_flag= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg=$arg + arg_mode=normal + ;; + + target ) + libobj=$arg + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + test -n "$libobj" && \ + func_fatal_error "you cannot specify '-o' more than once" + arg_mode=target + continue + ;; + + -pie | -fpie | -fPIE) + func_append pie_flag " $arg" + continue + ;; + + -shared | -static | -prefer-pic | -prefer-non-pic) + func_append later " $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + lastarg= + save_ifs=$IFS; IFS=, + for arg in $args; do + IFS=$save_ifs + func_append_quoted lastarg "$arg" + done + IFS=$save_ifs + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result + + # Add the arguments to base_compile. + func_append base_compile " $lastarg" + continue + ;; + + *) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg=$srcfile + srcfile=$arg + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + func_append_quoted base_compile "$lastarg" + done # for arg + + case $arg_mode in + arg) + func_fatal_error "you must specify an argument for -Xcompile" + ;; + target) + func_fatal_error "you must specify a target with '-o'" + ;; + *) + # Get the name of the library object. + test -z "$libobj" && { + func_basename "$srcfile" + libobj=$func_basename_result + } + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + case $libobj in + *.[cCFSifmso] | \ + *.ada | *.adb | *.ads | *.asm | \ + *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ + *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) + func_xform "$libobj" + libobj=$func_xform_result + ;; + esac + + case $libobj in + *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; + *) + func_fatal_error "cannot determine name of library object from '$libobj'" + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -shared) + test yes = "$build_libtool_libs" \ + || func_fatal_configuration "cannot build a shared library" + build_old_libs=no + continue + ;; + + -static) + build_libtool_libs=no + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + func_quote_for_eval "$libobj" + test "X$libobj" != "X$func_quote_for_eval_result" \ + && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && func_warning "libobj name '$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname=$func_basename_result + xdir=$func_dirname_result + lobj=$xdir$objdir/$objname + + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" + + # Delete any leftover library objects. + if test yes = "$build_old_libs"; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2* | cegcc*) + pic_mode=default + ;; + esac + if test no = "$pic_mode" && test pass_all != "$deplibs_check_method"; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test no = "$compiler_c_o"; then + output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.$objext + lockfile=$output_obj.lock + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test yes = "$need_locks"; then + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + elif test warn = "$need_locks"; then + if test -f "$lockfile"; then + $ECHO "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + func_append removelist " $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist + func_append removelist " $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + + func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 + srcfile=$func_to_tool_file_result + func_quote_for_eval "$srcfile" + qsrcfile=$func_quote_for_eval_result + + # Only build a PIC object if we are building libtool libraries. + if test yes = "$build_libtool_libs"; then + # Without this assignment, base_compile gets emptied. + fbsd_hideous_sh_bug=$base_compile + + if test no != "$pic_mode"; then + command="$base_compile $qsrcfile $pic_flag" + else + # Don't build PIC code + command="$base_compile $qsrcfile" + fi + + func_mkdir_p "$xdir$objdir" + + if test -z "$output_obj"; then + # Place PIC objects in $objdir + func_append command " -o $lobj" + fi + + func_show_eval_locale "$command" \ + 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + + # Allow error messages only from the first compilation. + if test yes = "$suppress_opt"; then + suppress_output=' >/dev/null 2>&1' + fi + fi + + # Only build a position-dependent object if we build old libraries. + if test yes = "$build_old_libs"; then + if test yes != "$pic_mode"; then + # Don't build PIC code + command="$base_compile $qsrcfile$pie_flag" + else + command="$base_compile $qsrcfile $pic_flag" + fi + if test yes = "$compiler_c_o"; then + func_append command " -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. + func_append command "$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + + if test warn = "$need_locks" && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support '-c' and '-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + fi + + $opt_dry_run || { + func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" + + # Unlock the critical section if it was locked + if test no != "$need_locks"; then + removelist=$lockfile + $RM "$lockfile" + fi + } + + exit $EXIT_SUCCESS +} + +$opt_help || { + test compile = "$opt_mode" && func_mode_compile ${1+"$@"} +} + +func_mode_help () +{ + # We need to display help for each of the modes. + case $opt_mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. + func_help + ;; + + clean) + $ECHO \ +"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to build PIC objects only + -prefer-non-pic try to build non-PIC objects only + -shared do not build a '.o' file suitable for static linking + -static only build a '.o' file suitable for static linking + -Wc,FLAG pass FLAG directly to the compiler + +COMPILE-COMMAND is a command to be used in creating a 'standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix '.c' with the +library object suffix, '.lo'." + ;; + + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to '-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the '--dry-run' option if you just want to see what would be executed." + ;; + + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the 'install' or 'cp' program. + +The following components of INSTALL-COMMAND are treated specially: + + -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -bindir BINDIR specify path to binaries directory (for systems where + libraries must be found in the PATH setting at runtime) + -dlopen FILE '-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE use a list of object files found in FILE to specify objects + -os2dllname NAME force a short DLL name on OS/2 (no effect on other OSes) + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface + -Wc,FLAG + -Xcompiler FLAG pass linker-specific FLAG directly to the compiler + -Wl,FLAG + -Xlinker FLAG pass linker-specific FLAG directly to the linker + -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) + +All other options (arguments beginning with '-') are ignored. + +Every other argument is treated as a filename. Files ending in '.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in '.la', then a libtool library is created, +only library objects ('.lo' files) may be specified, and '-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in '.a' or '.lib', then a standard library is created +using 'ar' and 'ranlib', or on Windows using 'lib'. + +If OUTPUT-FILE ends in '.lo' or '.$objext', then a reloadable object file +is created, otherwise an executable program is created." + ;; + + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + + *) + func_fatal_help "invalid operation mode '$opt_mode'" + ;; + esac + + echo + $ECHO "Try '$progname --help' for more information about other modes." +} + +# Now that we've collected a possible --mode arg, show help if necessary +if $opt_help; then + if test : = "$opt_help"; then + func_mode_help + else + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + func_mode_help + done + } | $SED -n '1p; 2,$s/^Usage:/ or: /p' + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + echo + func_mode_help + done + } | + $SED '1d + /^When reporting/,/^Report/{ + H + d + } + $x + /information about other modes/d + /more detailed .*MODE/d + s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' + fi + exit $? +fi + + +# func_mode_execute arg... +func_mode_execute () +{ + $debug_cmd + + # The first argument is the command name. + cmd=$nonopt + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. + for file in $opt_dlopen; do + test -f "$file" \ + || func_fatal_help "'$file' is not a file" + + dir= + case $file in + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$lib' is not a valid libtool archive" + + # Read the libtool library. + dlname= + library_names= + func_source "$file" + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "'$file' was not linked with '-export-dynamic'" + continue + fi + + func_dirname "$file" "" "." + dir=$func_dirname_result + + if test -f "$dir/$objdir/$dlname"; then + func_append dir "/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find '$dlname' in '$dir' or '$dir/$objdir'" + fi + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir=$func_dirname_result + ;; + + *) + func_warning "'-dlopen' is ignored for non-libtool libraries and objects" + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir=$absdir + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic=$magic + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -* | *.la | *.lo ) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file=$progdir/$program + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file=$progdir/$program + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_append_quoted args "$file" + done + + if $opt_dry_run; then + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + echo "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + else + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var + else + $lt_unset $lt_var + fi" + done + + # Now prepare to actually exec the command. + exec_cmd=\$cmd$args + fi +} + +test execute = "$opt_mode" && func_mode_execute ${1+"$@"} + + +# func_mode_finish arg... +func_mode_finish () +{ + $debug_cmd + + libs= + libdirs= + admincmds= + + for opt in "$nonopt" ${1+"$@"} + do + if test -d "$opt"; then + func_append libdirs " $opt" + + elif test -f "$opt"; then + if func_lalib_unsafe_p "$opt"; then + func_append libs " $opt" + else + func_warning "'$opt' is not a valid libtool archive" + fi + + else + func_fatal_error "invalid argument '$opt'" + fi + done + + if test -n "$libs"; then + if test -n "$lt_sysroot"; then + sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` + sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" + else + sysroot_cmd= + fi + + # Remove sysroot references + if $opt_dry_run; then + for lib in $libs; do + echo "removing references to $lt_sysroot and '=' prefixes from $lib" + done + else + tmpdir=`func_mktempdir` + for lib in $libs; do + $SED -e "$sysroot_cmd s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ + > $tmpdir/tmp-la + mv -f $tmpdir/tmp-la $lib + done + ${RM}r "$tmpdir" + fi + fi + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || func_append admincmds " + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + $opt_quiet && exit $EXIT_SUCCESS + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + echo "----------------------------------------------------------------------" + echo "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + echo + echo "If you ever happen to want to link against installed libraries" + echo "in a given directory, LIBDIR, you must either use libtool, and" + echo "specify the full pathname of the library, or use the '-LLIBDIR'" + echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + echo " - add LIBDIR to the '$shlibpath_var' environment variable" + echo " during execution" + fi + if test -n "$runpath_var"; then + echo " - add LIBDIR to the '$runpath_var' environment variable" + echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $ECHO " - use the '$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + echo " - have your system administrator add LIBDIR to '/etc/ld.so.conf'" + fi + echo + + echo "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" + echo "pages." + ;; + *) + echo "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + echo "----------------------------------------------------------------------" + fi + exit $EXIT_SUCCESS +} + +test finish = "$opt_mode" && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $debug_cmd + + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$SHELL" = "$nonopt" || test /bin/sh = "$nonopt" || + # Allow the use of GNU shtool's install command. + case $nonopt in *shtool*) :;; *) false;; esac + then + # Aesthetically quote it. + func_quote_for_eval "$nonopt" + install_prog="$func_quote_for_eval_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" + func_append install_prog "$func_quote_for_eval_result" + install_shared_prog=$install_prog + case " $install_prog " in + *[\\\ /]cp\ *) install_cp=: ;; + *) install_cp=false ;; + esac + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=false + stripme= + no_mode=: + for arg + do + arg2= + if test -n "$dest"; then + func_append files " $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=: ;; + -f) + if $install_cp; then :; else + prev=$arg + fi + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + if test X-m = "X$prev" && test -n "$install_override_mode"; then + arg2=$install_override_mode + no_mode=false + fi + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + func_quote_for_eval "$arg" + func_append install_prog " $func_quote_for_eval_result" + if test -n "$arg2"; then + func_quote_for_eval "$arg2" + fi + func_append install_shared_prog " $func_quote_for_eval_result" + done + + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" + + test -n "$prev" && \ + func_fatal_help "the '$prev' option requires an argument" + + if test -n "$install_override_mode" && $no_mode; then + if $install_cp; then :; else + func_quote_for_eval "$install_override_mode" + func_append install_shared_prog " -m $func_quote_for_eval_result" + fi + fi + + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi + fi + + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=: + if $isdir; then + destdir=$dest + destname= + else + func_dirname_and_basename "$dest" "" "." + destdir=$func_dirname_result + destname=$func_basename_result + + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "'$dest' is not a directory" + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "'$destdir' must be an absolute directory name" + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + func_append staticlibs " $file" + ;; + + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "'$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) func_append current_libdirs " $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) func_append future_libdirs " $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir=$func_dirname_result + func_append dir "$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install '$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking '$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink '\''$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname=$1 + shift + + srcname=$realname + test -n "$relink_command" && srcname=${realname}T + + # Install the shared library and build the symlinks. + func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme=$stripme + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme= + ;; + esac + ;; + os2*) + case $realname in + *_dll.a) + tstripme= + ;; + esac + ;; + esac + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try 'ln -sf' first, because the 'ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" + done + fi + + # Do each command in the postinstall commands. + lib=$destdir/$realname + func_execute_cmds "$postinstall_cmds" 'exit $?' + fi + + # Install the pseudo-library for information purposes. + func_basename "$file" + name=$func_basename_result + instname=$dir/${name}i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. + test -n "$old_library" && func_append staticlibs " $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest=$destfile + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to '$destfile'" + ;; + esac + + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' + + # Install the old object if enabled. + if test yes = "$build_old_libs"; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile=$destdir/$destname + else + func_basename "$file" + destfile=$func_basename_result + destfile=$destdir/$destfile + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext= + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=.exe + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; + *) + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script '$wrapper'" + + finalize=: + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile=$libdir/`$ECHO "$lib" | $SED 's%^.*/%%g'` + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "'$lib' has not been installed in '$libdir'" + finalize=false + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test no = "$fast_install" && test -n "$relink_command"; then + $opt_dry_run || { + if $finalize; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file=$func_basename_result + outputname=$tmpdir/$file + # Replace the output file specification. + relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_quiet || { + func_quote_for_expand "$relink_command" + eval "func_echo $func_quote_for_expand_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink '$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file=$outputname + else + func_warning "cannot relink '$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac + ;; + esac + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done + + for file in $staticlibs; do + func_basename "$file" + name=$func_basename_result + + # Set up the ranlib parameters. + oldlib=$destdir/$name + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $tool_oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run '$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL "$progpath" $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test install = "$opt_mode" && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $debug_cmd + + my_outputname=$1 + my_originator=$2 + my_pic_p=${3-false} + my_prefix=`$ECHO "$my_originator" | $SED 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms=${my_outputname}S.c + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist=$output_objdir/$my_outputname.nm + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for '$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* External symbol declarations for the compiler. */\ +" + + if test yes = "$dlself"; then + func_verbose "generating symbol list for '$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_to_tool_file "$progfile" func_convert_file_msys_to_w32 + func_verbose "extracting global C symbols from '$func_to_tool_file_result'" + $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols=$output_objdir/$outputname.exp + $opt_dry_run || { + $RM $export_symbols + eval "$SED -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { + eval "$SED -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } + fi + fi + + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from '$dlprefile'" + func_basename "$dlprefile" + name=$func_basename_result + case $host in + *cygwin* | *mingw* | *cegcc* ) + # if an import library, we need to obtain dlname + if func_win32_import_lib_p "$dlprefile"; then + func_tr_sh "$dlprefile" + eval "curr_lafile=\$libfile_$func_tr_sh_result" + dlprefile_dlbasename= + if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then + # Use subshell, to avoid clobbering current variable values + dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` + if test -n "$dlprefile_dlname"; then + func_basename "$dlprefile_dlname" + dlprefile_dlbasename=$func_basename_result + else + # no lafile. user explicitly requested -dlpreopen . + $sharedlib_from_linklib_cmd "$dlprefile" + dlprefile_dlbasename=$sharedlib_from_linklib_result + fi + fi + $opt_dry_run || { + if test -n "$dlprefile_dlbasename"; then + eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' + else + func_warning "Could not compute DLL name from $name" + eval '$ECHO ": $name " >> "$nlist"' + fi + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | + $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" + } + else # not an import lib + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + fi + ;; + *) + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + ;; + esac + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + $GREP -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' + else + echo '/* NONE */' >> "$output_objdir/$my_dlsyms" + fi + + func_show_eval '$RM "${nlist}I"' + if test -n "$global_symbol_to_import"; then + eval "$global_symbol_to_import"' < "$nlist"S > "$nlist"I' + fi + + echo >> "$output_objdir/$my_dlsyms" "\ + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[];\ +" + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ +static void lt_syminit(void) +{ + LT_DLSYM_CONST lt_dlsymlist *symbol = lt_${my_prefix}_LTX_preloaded_symbols; + for (; symbol->name; ++symbol) + {" + $SED 's/.*/ if (STREQ (symbol->name, \"&\")) symbol->address = (void *) \&&;/' < "$nlist"I >> "$output_objdir/$my_dlsyms" + echo >> "$output_objdir/$my_dlsyms" "\ + } +}" + fi + echo >> "$output_objdir/$my_dlsyms" "\ +LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{ {\"$my_originator\", (void *) 0}," + + if test -s "$nlist"I; then + echo >> "$output_objdir/$my_dlsyms" "\ + {\"@INIT@\", (void *) <_syminit}," + fi + + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + echo >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run + + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + $my_pic_p && pic_flag_for_symtable=" $pic_flag" + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) func_append symtab_cflags " $arg" ;; + esac + done + + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' + + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T" "${nlist}I"' + + # Transform the symbol file into the correct name. + symfileobj=$output_objdir/${my_outputname}S.$objext + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + fi + ;; + *) + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for '$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` + fi +} + +# func_cygming_gnu_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is a GNU/binutils-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_gnu_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` + test -n "$func_cygming_gnu_implib_tmp" +} + +# func_cygming_ms_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is an MS-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_ms_implib_p () +{ + $debug_cmd + + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` + test -n "$func_cygming_ms_implib_tmp" +} + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +# Despite the name, also deal with 64 bit binaries. +func_win32_libid () +{ + $debug_cmd + + win32_libid_type=unknown + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then + case $nm_interface in + "MS dumpbin") + if func_cygming_ms_implib_p "$1" || + func_cygming_gnu_implib_p "$1" + then + win32_nmres=import + else + win32_nmres= + fi + ;; + *) + func_to_tool_file "$1" func_convert_file_msys_to_w32 + win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | + $SED -n -e ' + 1,100{ + / I /{ + s|.*|import| + p + q + } + }'` + ;; + esac + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} + +# func_cygming_dll_for_implib ARG +# +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib () +{ + $debug_cmd + + sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` +} + +# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs +# +# The is the core of a fallback implementation of a +# platform-specific function to extract the name of the +# DLL associated with the specified import library LIBNAME. +# +# SECTION_NAME is either .idata$6 or .idata$7, depending +# on the platform and compiler that created the implib. +# +# Echos the name of the DLL associated with the +# specified import library. +func_cygming_dll_for_implib_fallback_core () +{ + $debug_cmd + + match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` + $OBJDUMP -s --section "$1" "$2" 2>/dev/null | + $SED '/^Contents of section '"$match_literal"':/{ + # Place marker at beginning of archive member dllname section + s/.*/====MARK====/ + p + d + } + # These lines can sometimes be longer than 43 characters, but + # are always uninteresting + /:[ ]*file format pe[i]\{,1\}-/d + /^In archive [^:]*:/d + # Ensure marker is printed + /^====MARK====/p + # Remove all lines with less than 43 characters + /^.\{43\}/!d + # From remaining lines, remove first 43 characters + s/^.\{43\}//' | + $SED -n ' + # Join marker and all lines until next marker into a single line + /^====MARK====/ b para + H + $ b para + b + :para + x + s/\n//g + # Remove the marker + s/^====MARK====// + # Remove trailing dots and whitespace + s/[\. \t]*$// + # Print + /./p' | + # we now have a list, one entry per line, of the stringified + # contents of the appropriate section of all members of the + # archive that possess that section. Heuristic: eliminate + # all those that have a first or second character that is + # a '.' (that is, objdump's representation of an unprintable + # character.) This should work for all archives with less than + # 0x302f exports -- but will fail for DLLs whose name actually + # begins with a literal '.' or a single character followed by + # a '.'. + # + # Of those that remain, print the first one. + $SED -e '/^\./d;/^.\./d;q' +} + +# func_cygming_dll_for_implib_fallback ARG +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# +# This fallback implementation is for use when $DLLTOOL +# does not support the --identify-strict option. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib_fallback () +{ + $debug_cmd + + if func_cygming_gnu_implib_p "$1"; then + # binutils import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` + elif func_cygming_ms_implib_p "$1"; then + # ms-generated import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` + else + # unknown + sharedlib_from_linklib_result= + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $debug_cmd + + f_ex_an_ar_dir=$1; shift + f_ex_an_ar_oldlib=$1 + if test yes = "$lock_old_archive_extraction"; then + lockfile=$f_ex_an_ar_oldlib.lock + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + fi + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ + 'stat=$?; rm -f "$lockfile"; exit $stat' + if test yes = "$lock_old_archive_extraction"; then + $opt_dry_run || rm -f "$lockfile" + fi + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $debug_cmd + + my_gentop=$1; shift + my_oldlibs=${1+"$@"} + my_oldobjs= + my_xlib= + my_xabs= + my_xdir= + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs=$my_xlib ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib=$func_basename_result + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir=$my_gentop/$my_xlib_u + + func_mkdir_p "$my_xdir" + + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + func_basename "$darwin_archive" + darwin_base_archive=$func_basename_result + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches; do + func_mkdir_p "unfat-$$/$darwin_base_archive-$darwin_arch" + $LIPO -thin $darwin_arch -output "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" "$darwin_archive" + cd "unfat-$$/$darwin_base_archive-$darwin_arch" + func_extract_an_archive "`pwd`" "$darwin_base_archive" + cd "$darwin_curdir" + $RM "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$sed_basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` + done + + func_extract_archives_result=$my_oldobjs +} + + +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory where it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=${1-no} + + $ECHO "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='$sed_quote_subst' + +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + file=\"\$0\"" + + qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` + $ECHO "\ + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + ECHO=\"$qECHO\" + fi + +# Very basic option parsing. These options are (a) specific to +# the libtool wrapper, (b) are identical between the wrapper +# /script/ and the wrapper /executable/ that is used only on +# windows platforms, and (c) all begin with the string "--lt-" +# (application programs are unlikely to have options that match +# this pattern). +# +# There are only two supported options: --lt-debug and +# --lt-dump-script. There is, deliberately, no --lt-help. +# +# The first argument to this parsing function should be the +# script's $0 value, followed by "$@". +lt_option_debug= +func_parse_lt_options () +{ + lt_script_arg0=\$0 + shift + for lt_opt + do + case \"\$lt_opt\" in + --lt-debug) lt_option_debug=1 ;; + --lt-dump-script) + lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` + test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. + lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` + cat \"\$lt_dump_D/\$lt_dump_F\" + exit 0 + ;; + --lt-*) + \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 + exit 1 + ;; + esac + done + + # Print the debug banner immediately: + if test -n \"\$lt_option_debug\"; then + echo \"$outputname:$output:\$LINENO: libtool wrapper (GNU $PACKAGE) $VERSION\" 1>&2 + fi +} + +# Used when --lt-debug. Prints its arguments to stdout +# (redirection is the responsibility of the caller) +func_lt_dump_args () +{ + lt_dump_args_N=1; + for lt_arg + do + \$ECHO \"$outputname:$output:\$LINENO: newargv[\$lt_dump_args_N]: \$lt_arg\" + lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` + done +} + +# Core function for launching the target application +func_exec_program_core () +{ +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir\\\\\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir/\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 +} + +# A function to encapsulate launching the target application +# Strips options in the --lt-* namespace from \$@ and +# launches target application with the remaining arguments. +func_exec_program () +{ + case \" \$* \" in + *\\ --lt-*) + for lt_wr_arg + do + case \$lt_wr_arg in + --lt-*) ;; + *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; + esac + shift + done ;; + esac + func_exec_program_core \${1+\"\$@\"} +} + + # Parse options + func_parse_lt_options \"\$0\" \${1+\"\$@\"} + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` + done + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test yes = "$fast_install"; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | $SED 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + \$ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 + fi + fi + + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $ECHO "\ + + if test -f \"\$progdir/\$program\"; then" + + # fixup the dll searchpath if we need to. + # + # Fix the DLL searchpath if we need to. Do this before prepending + # to shlibpath, because on Windows, both are PATH and uninstalled + # libraries must come first. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + # Export our shlibpath_var if we have one. + if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` + + export $shlibpath_var +" + fi + + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + func_exec_program \${1+\"\$@\"} + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: '\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} + + +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +#else +# include +# include +# ifdef __CYGWIN__ +# include +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) + +/* declarations of non-ANSI functions */ +#if defined __MINGW32__ +# ifdef __STRICT_ANSI__ +int _putenv (const char *); +# endif +#elif defined __CYGWIN__ +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +/* #elif defined other_platform || defined ... */ +#endif + +/* portability defines, excluding path handling macros */ +#if defined _MSC_VER +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +# define S_IXUSR _S_IEXEC +#elif defined __MINGW32__ +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +#elif defined __CYGWIN__ +# define HAVE_SETENV +# define FOPEN_WB "wb" +/* #elif defined other platforms ... */ +#endif + +#if defined PATH_MAX +# define LT_PATHMAX PATH_MAX +#elif defined MAXPATHLEN +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif + +/* path handling portability macros */ +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined _WIN32 || defined __MSDOS__ || defined __DJGPP__ || \ + defined __OS2__ +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free (stale); stale = 0; } \ +} while (0) + +#if defined LT_DEBUGWRAPPER +static int lt_debug = 1; +#else +static int lt_debug = 0; +#endif + +const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ + +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_debugprintf (const char *file, int line, const char *fmt, ...); +void lt_fatal (const char *file, int line, const char *message, ...); +static const char *nonnull (const char *s); +static const char *nonempty (const char *s); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); +char **prepare_spawn (char **argv); +void lt_dump_script (FILE *f); +EOF + + cat <= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} + +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; + + lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", + nonempty (path)); + if ((!path) || (!*path)) + return 0; + + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + size_t tmp_len; + char *concat_name; + + lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", + nonempty (wrapper)); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined HAVE_DOS_BASED_FILE_SYSTEM + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined HAVE_DOS_BASED_FILE_SYSTEM + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = (size_t) (q - p); + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} + +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + lt_debugprintf (__FILE__, __LINE__, + "checking path component for symlinks: %s\n", + tmp_pathspec); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } + + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + lt_fatal (__FILE__, __LINE__, + "error accessing file \"%s\": %s", + tmp_pathspec, nonnull (strerror (errno))); + } + } + XFREE (tmp_pathspec); + + if (!has_symlinks) + { + return xstrdup (pathspec); + } + + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal (__FILE__, __LINE__, + "could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} + +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; + + assert (str != NULL); + assert (pat != NULL); + + len = strlen (str); + patlen = strlen (pat); + + if (patlen <= len) + { + str += len - patlen; + if (STREQ (str, pat)) + *str = '\0'; + } + return str; +} + +void +lt_debugprintf (const char *file, int line, const char *fmt, ...) +{ + va_list args; + if (lt_debug) + { + (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); + } +} + +static void +lt_error_core (int exit_status, const char *file, + int line, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *file, int line, const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); + va_end (ap); +} + +static const char * +nonnull (const char *s) +{ + return s ? s : "(null)"; +} + +static const char * +nonempty (const char *s) +{ + return (s && !*s) ? "(empty)" : nonnull (s); +} + +void +lt_setenv (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_setenv) setting '%s' to '%s'\n", + nonnull (name), nonnull (value)); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + size_t len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} + +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + size_t orig_value_len = strlen (orig_value); + size_t add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} + +void +lt_update_exe_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + size_t len = strlen (new_value); + while ((len > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[--len] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +void +lt_update_lib_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +EOF + case $host_os in + mingw*) + cat <<"EOF" + +/* Prepares an argument vector before calling spawn(). + Note that spawn() does not by itself call the command interpreter + (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : + ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + GetVersionEx(&v); + v.dwPlatformId == VER_PLATFORM_WIN32_NT; + }) ? "cmd.exe" : "command.com"). + Instead it simply concatenates the arguments, separated by ' ', and calls + CreateProcess(). We must quote the arguments since Win32 CreateProcess() + interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a + special way: + - Space and tab are interpreted as delimiters. They are not treated as + delimiters if they are surrounded by double quotes: "...". + - Unescaped double quotes are removed from the input. Their only effect is + that within double quotes, space and tab are treated like normal + characters. + - Backslashes not followed by double quotes are not special. + - But 2*n+1 backslashes followed by a double quote become + n backslashes followed by a double quote (n >= 0): + \" -> " + \\\" -> \" + \\\\\" -> \\" + */ +#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +char ** +prepare_spawn (char **argv) +{ + size_t argc; + char **new_argv; + size_t i; + + /* Count number of arguments. */ + for (argc = 0; argv[argc] != NULL; argc++) + ; + + /* Allocate new argument vector. */ + new_argv = XMALLOC (char *, argc + 1); + + /* Put quoted arguments into the new argument vector. */ + for (i = 0; i < argc; i++) + { + const char *string = argv[i]; + + if (string[0] == '\0') + new_argv[i] = xstrdup ("\"\""); + else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) + { + int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); + size_t length; + unsigned int backslashes; + const char *s; + char *quoted_string; + char *p; + + length = 0; + backslashes = 0; + if (quote_around) + length++; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + length += backslashes + 1; + length++; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + length += backslashes + 1; + + quoted_string = XMALLOC (char, length + 1); + + p = quoted_string; + backslashes = 0; + if (quote_around) + *p++ = '"'; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + { + unsigned int j; + for (j = backslashes + 1; j > 0; j--) + *p++ = '\\'; + } + *p++ = c; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + { + unsigned int j; + for (j = backslashes; j > 0; j--) + *p++ = '\\'; + *p++ = '"'; + } + *p = '\0'; + + new_argv[i] = quoted_string; + } + else + new_argv[i] = (char *) string; + } + new_argv[argc] = NULL; + + return new_argv; +} +EOF + ;; + esac + + cat <<"EOF" +void lt_dump_script (FILE* f) +{ +EOF + func_emit_wrapper yes | + $SED -n -e ' +s/^\(.\{79\}\)\(..*\)/\1\ +\2/ +h +s/\([\\"]\)/\\\1/g +s/$/\\n/ +s/\([^\n]*\).*/ fputs ("\1", f);/p +g +D' + cat <<"EOF" +} +EOF +} +# end: func_emit_cwrapperexe_src + +# func_win32_import_lib_p ARG +# True if ARG is an import lib, as indicated by $file_magic_cmd +func_win32_import_lib_p () +{ + $debug_cmd + + case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in + *import*) : ;; + *) false ;; + esac +} + +# func_suncc_cstd_abi +# !!ONLY CALL THIS FOR SUN CC AFTER $compile_command IS FULLY EXPANDED!! +# Several compiler flags select an ABI that is incompatible with the +# Cstd library. Avoid specifying it if any are in CXXFLAGS. +func_suncc_cstd_abi () +{ + $debug_cmd + + case " $compile_command " in + *" -compat=g "*|*\ -std=c++[0-9][0-9]\ *|*" -library=stdcxx4 "*|*" -library=stlport4 "*) + suncc_use_cstd_abi=no + ;; + *) + suncc_use_cstd_abi=yes + ;; + esac +} + +# func_mode_link arg... +func_mode_link () +{ + $debug_cmd + + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # what system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no + + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll that has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt + + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= + + avoid_version=no + bindir= + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + os2dllname= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=false + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module=$wl-single_module + func_infer_tag $base_compile + + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test yes != "$build_libtool_libs" \ + && func_fatal_configuration "cannot build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test yes = "$build_libtool_libs" && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg=$1 + shift + func_quote_for_eval "$arg" + qarg=$func_quote_for_eval_unquoted_result + func_append libtool_args " $func_quote_for_eval_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + bindir) + bindir=$arg + prev= + continue + ;; + dlfiles|dlprefiles) + $preload || { + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=: + } + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test no = "$dlself"; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test dlprefiles = "$prev"; then + dlself=yes + elif test dlfiles = "$prev" && test yes != "$dlopen_self"; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test dlfiles = "$prev"; then + func_append dlfiles " $arg" + else + func_append dlprefiles " $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols=$arg + test -f "$arg" \ + || func_fatal_error "symbol file '$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex=$arg + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) func_append deplibs " $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir=$arg + prev= + continue + ;; + mllvm) + # Clang does not use LLVM to link, so we can simply discard any + # '-mllvm $arg' options when doing the link step. + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# func_append moreargs " $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + if test none != "$pic_object"; then + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + fi + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file '$arg' does not exist" + fi + arg=$save_arg + prev= + continue + ;; + os2dllname) + os2dllname=$arg + prev= + continue + ;; + precious_regex) + precious_files_regex=$arg + prev= + continue + ;; + release) + release=-$arg + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + if test rpath = "$prev"; then + case "$rpath " in + *" $arg "*) ;; + *) func_append rpath " $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) func_append xrpath " $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds=$arg + prev= + continue + ;; + weak) + func_append weak_libs " $arg" + prev= + continue + ;; + xcclinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg=$arg + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "'-allow-undefined' must not be used because it is the default" + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -bindir) + prev=bindir + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test X-export-symbols = "X$arg"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework) + prev=framework + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; + + -L*) + func_stripname "-L" '' "$arg" + if test -z "$func_stripname_result"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between '-L' and '$1'" + else + func_fatal_error "need path for '-L' option" + fi + fi + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of '$dir'" + dir=$absdir + ;; + esac + case "$deplibs " in + *" -L$dir "* | *" $arg "*) + # Will only happen for absolute or sysroot arguments + ;; + *) + # Preserve sysroot, but never include relative directories + case $dir in + [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; + *) func_append deplibs " -L$dir" ;; + esac + func_append lib_search_path " $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) func_append dllsearchpath ":$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test X-lc = "X$arg" || test X-lm = "X$arg"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test X-lc = "X$arg" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) + # Do not include libc due to us having libc/libc_r. + test X-lc = "X$arg" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + func_append deplibs " System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test X-lc = "X$arg" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test X-lc = "X$arg" && continue + ;; + esac + elif test X-lc_r = "X$arg"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + func_append deplibs " $arg" + continue + ;; + + -mllvm) + prev=mllvm + continue + ;; + + -module) + module=yes + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot|--sysroot) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) func_append new_inherited_linker_flags " $arg" ;; + esac + continue + ;; + + -multi_module) + single_module=$wl-multi_module + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "'-no-install' is ignored for $host" + func_warning "assuming '-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -os2dllname) + prev=os2dllname + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + =*) + func_stripname '=' '' "$dir" + dir=$lt_sysroot$func_stripname_result + ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + continue + ;; + + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -weak) + prev=weak + continue + ;; + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_for_eval "$flag" + func_append arg " $func_quote_for_eval_result" + func_append compiler_flags " $func_quote_for_eval_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs=$IFS; IFS=, + for flag in $args; do + IFS=$save_ifs + func_quote_for_eval "$flag" + func_append arg " $wl$func_quote_for_eval_result" + func_append compiler_flags " $wl$func_quote_for_eval_result" + func_append linker_flags " $func_quote_for_eval_result" + done + IFS=$save_ifs + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # -msg_* for osf cc + -msg_*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + + # Flags to be passed through unchanged, with rationale: + # -64, -mips[0-9] enable 64-bit mode for the SGI compiler + # -r[0-9][0-9]* specify processor for the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler + # +DA*, +DD* enable 64-bit mode for the HP compiler + # -q* compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* architecture-specific flags for GCC + # -F/path path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* profiling flags for GCC + # -fstack-protector* stack protector flags for GCC + # @file GCC response files + # -tp=* Portland pgcc target processor selection + # --sysroot=* for sysroot support + # -O*, -g*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization + # -specs=* GCC specs files + # -stdlib=* select c++ std lib with clang + # -fsanitize=* Clang/GCC memory and address sanitizer + # -fuse-ld=* Linker select flags for GCC + # -static-* direct GCC to link specific libraries statically + # -fcilkplus Cilk Plus language extension features for C/C++ + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ + -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*| \ + -specs=*|-fsanitize=*|-fuse-ld=*|-static-*|-fcilkplus) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + func_append compile_command " $arg" + func_append finalize_command " $arg" + func_append compiler_flags " $arg" + continue + ;; + + -Z*) + if test os2 = "`expr $host : '.*\(os2\)'`"; then + # OS/2 uses -Zxxx to specify OS/2-specific options + compiler_flags="$compiler_flags $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case $arg in + -Zlinker | -Zstack) + prev=xcompiler + ;; + esac + continue + else + # Otherwise treat like 'Some other compiler flag' below + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + fi + ;; + + # Some other compiler flag. + -* | +*) + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + + *.$objext) + # A standard object. + func_append objs " $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test none = "$pic_object" && + test none = "$non_pic_object"; then + func_fatal_error "cannot find name of object for '$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + test none = "$pic_object" || { + # Prepend the subdirectory the object is found in. + pic_object=$xdir$pic_object + + if test dlfiles = "$prev"; then + if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test dlprefiles = "$prev"; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg=$pic_object + } + + # Non-PIC object. + if test none != "$non_pic_object"; then + # Prepend the subdirectory the object is found in. + non_pic_object=$xdir$non_pic_object + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test none = "$pic_object"; then + arg=$non_pic_object + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object=$pic_object + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir=$func_dirname_result + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "'$arg' is not a valid libtool object" + fi + fi + ;; + + *.$libext) + # An archive. + func_append deplibs " $arg" + func_append old_deplibs " $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + func_resolve_sysroot "$arg" + if test dlfiles = "$prev"; then + # This library was specified with -dlopen. + func_append dlfiles " $func_resolve_sysroot_result" + prev= + elif test dlprefiles = "$prev"; then + # The library was specified with -dlpreopen. + func_append dlprefiles " $func_resolve_sysroot_result" + prev= + else + func_append deplibs " $func_resolve_sysroot_result" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_for_eval "$arg" + arg=$func_quote_for_eval_result + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the '$prevarg' option requires an argument" + + if test yes = "$export_dynamic" && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname=$func_basename_result + libobjs_save=$libobjs + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"\$$shlibpath_var\" \| \$SED \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + # Definition is injected by LT_CONFIG during libtool generation. + func_munge_path_list sys_lib_dlsearch_path "$LT_SYS_LIBRARY_PATH" + + func_dirname "$output" "/" "" + output_objdir=$func_dirname_result$objdir + func_to_tool_file "$output_objdir/" + tool_output_objdir=$func_to_tool_file_result + # Create the object directory. + func_mkdir_p "$output_objdir" + + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_preserve_dup_deps; then + case "$libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append libs " $deplib" + done + + if test lib = "$linkmode"; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; + esac + func_append pre_post_deps " $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can '-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=false + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test lib,link = "$linkmode,$pass"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs=$tmp_deplibs + fi + + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass"; then + libs=$deplibs + deplibs= + fi + if test prog = "$linkmode"; then + case $pass in + dlopen) libs=$dlfiles ;; + dlpreopen) libs=$dlprefiles ;; + link) + libs="$deplibs %DEPLIBS%" + test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs" + ;; + esac + fi + if test lib,dlpreopen = "$linkmode,$pass"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + func_resolve_sysroot "$lib" + case $lib in + *.la) func_source "$func_resolve_sysroot_result" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + func_basename "$deplib" + deplib_base=$func_basename_result + case " $weak_libs " in + *" $deplib_base "*) ;; + *) func_append deplibs " $deplib" ;; + esac + done + done + libs=$dlprefiles + fi + if test dlopen = "$pass"; then + # Collect dlpreopened libraries + save_deplibs=$deplibs + deplibs= + fi + + for deplib in $libs; do + lib= + found=false + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append compiler_flags " $deplib" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test lib != "$linkmode" && test prog != "$linkmode"; then + func_warning "'-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test lib = "$linkmode"; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib=$searchdir/lib$name$search_ext + if test -f "$lib"; then + if test .la = "$search_ext"; then + found=: + else + found=false + fi + break 2 + fi + done + done + if $found; then + # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll=$l + done + if test "X$ll" = "X$old_library"; then # only static version available + found=false + func_dirname "$lib" "" "." + ladir=$func_dirname_result + lib=$ladir/$old_library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + else + # deplib doesn't seem to be a libtool library + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + ;; # -l + *.ltframework) + if test prog,link = "$linkmode,$pass"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test lib = "$linkmode"; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test conv = "$pass" && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + prog) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + if test scan = "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + *) + func_warning "'-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test link = "$pass"; then + func_stripname '-R' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) + func_resolve_sysroot "$deplib" + lib=$func_resolve_sysroot_result + ;; + *.$libext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=false + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=: + fi + ;; + pass_all) + valid_a_lib=: + ;; + esac + if $valid_a_lib; then + echo + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + else + echo + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because the file extensions .$libext of this argument makes me believe" + echo "*** that it is just a static archive that I should not use here." + fi + ;; + esac + continue + ;; + prog) + if test link != "$pass"; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test conv = "$pass"; then + deplibs="$deplib $deplibs" + elif test prog = "$linkmode"; then + if test dlpreopen = "$pass" || test yes != "$dlopen_support" || test no = "$build_libtool_libs"; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + func_append newdlprefiles " $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append newdlfiles " $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=: + continue + ;; + esac # case $deplib + + $found || test -f "$lib" \ + || func_fatal_error "cannot find the library '$lib' or unhandled argument '$deplib'" + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "'$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir=$func_dirname_result + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + if test lib,link = "$linkmode,$pass" || + test prog,scan = "$linkmode,$pass" || + { test prog != "$linkmode" && test lib != "$linkmode"; }; then + test -n "$dlopen" && func_append dlfiles " $dlopen" + test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" + fi + + if test conv = "$pass"; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + # It is a libtool convenience library, so add in its objects. + func_append convenience " $ladir/$objdir/$old_library" + func_append old_convenience " $ladir/$objdir/$old_library" + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done + elif test prog != "$linkmode" && test lib != "$linkmode"; then + func_fatal_error "'$lib' is not a convenience library" + fi + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + if test -n "$old_library" && + { test yes = "$prefer_static_libs" || + test built,no = "$prefer_static_libs,$installed"; }; then + linklib=$old_library + else + for l in $old_library $library_names; do + linklib=$l + done + fi + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for '$lib'" + fi + + # This library was specified with -dlopen. + if test dlopen = "$pass"; then + test -z "$libdir" \ + && func_fatal_error "cannot -dlopen a convenience library: '$lib'" + if test -z "$dlname" || + test yes != "$dlopen_support" || + test no = "$build_libtool_libs" + then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + func_append dlprefiles " $lib $dependency_libs" + else + func_append newdlfiles " $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir=$ladir ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of '$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir=$ladir + fi + ;; + esac + func_basename "$lib" + laname=$func_basename_result + + # Find the relevant object directory and library name. + if test yes = "$installed"; then + if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library '$lib' was moved." + dir=$ladir + absdir=$abs_ladir + libdir=$abs_ladir + else + dir=$lt_sysroot$libdir + absdir=$lt_sysroot$libdir + fi + test yes = "$hardcode_automatic" && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir=$ladir + absdir=$abs_ladir + # Remove this search path later + func_append notinst_path " $abs_ladir" + else + dir=$ladir/$objdir + absdir=$abs_ladir/$objdir + # Remove this search path later + func_append notinst_path " $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test dlpreopen = "$pass"; then + if test -z "$libdir" && test prog = "$linkmode"; then + func_fatal_error "only libraries may -dlpreopen a convenience library: '$lib'" + fi + case $host in + # special handling for platforms with PE-DLLs. + *cygwin* | *mingw* | *cegcc* ) + # Linker will automatically link against shared library if both + # static and shared are present. Therefore, ensure we extract + # symbols from the import library if a shared library is present + # (otherwise, the dlopen module name will be incorrect). We do + # this by putting the import library name into $newdlprefiles. + # We recover the dlopen module name by 'saving' the la file + # name in a special purpose variable, and (later) extracting the + # dlname from the la file. + if test -n "$dlname"; then + func_tr_sh "$dir/$linklib" + eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" + func_append newdlprefiles " $dir/$linklib" + else + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + fi + ;; + * ) + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + func_append newdlprefiles " $dir/$dlname" + else + func_append newdlprefiles " $dir/$linklib" + fi + ;; + esac + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test lib = "$linkmode"; then + deplibs="$dir/$old_library $deplibs" + elif test prog,link = "$linkmode,$pass"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test prog = "$linkmode" && test link != "$pass"; then + func_append newlib_search_path " $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=false + if test no != "$link_all_deplibs" || test -z "$library_names" || + test no = "$build_libtool_libs"; then + linkalldeplibs=: + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + esac + # Need to link against all dependency_libs? + if $linkalldeplibs; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test prog,link = "$linkmode,$pass"; then + if test -n "$library_names" && + { { test no = "$prefer_static_libs" || + test built,yes = "$prefer_static_libs,$installed"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath"; then + # Make sure the rpath contains only unique directories. + case $temp_rpath: in + *"$absdir:"*) ;; + *) func_append temp_rpath "$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if $alldeplibs && + { test pass_all = "$deplibs_check_method" || + { test yes = "$build_libtool_libs" && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test built = "$use_static_libs" && test yes = "$installed"; then + use_static_libs=no + fi + if test -n "$library_names" && + { test no = "$use_static_libs" || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc* | *os2*) + # No point in relinking DLLs because paths are not encoded + func_append notinst_deplibs " $lib" + need_relink=no + ;; + *) + if test no = "$installed"; then + func_append notinst_deplibs " $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule= + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule=$dlpremoduletest + break + fi + done + if test -z "$dlopenmodule" && test yes = "$shouldnotlink" && test link = "$pass"; then + echo + if test prog = "$linkmode"; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test lib = "$linkmode" && + test yes = "$hardcode_into_libs"; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname=$1 + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname=$dlname + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc* | *os2*) + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + esac + eval soname=\"$soname_spec\" + else + soname=$realname + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot=$soname + func_basename "$soroot" + soname=$func_basename_result + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from '$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for '$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test prog = "$linkmode" || test relink != "$opt_mode"; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test no = "$hardcode_direct"; then + add=$dir/$linklib + case $host in + *-*-sco3.2v5.0.[024]*) add_dir=-L$dir ;; + *-*-sysv4*uw2*) add_dir=-L$dir ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir=-L$dir ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we cannot + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library"; then + echo + echo "*** And there doesn't seem to be a static archive available" + echo "*** The link will probably fail, sorry" + else + add=$dir/$old_library + fi + elif test -n "$old_library"; then + add=$dir/$old_library + fi + fi + esac + elif test no = "$hardcode_minus_L"; then + case $host in + *-*-sunos*) add_shlibpath=$dir ;; + esac + add_dir=-L$dir + add=-l$name + elif test no = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + relink) + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$dir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$absdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + add_shlibpath=$dir + add=-l$name + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test yes != "$lib_linked"; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) func_append compile_shlibpath "$add_shlibpath:" ;; + esac + fi + if test prog = "$linkmode"; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test yes != "$hardcode_direct" && + test yes != "$hardcode_minus_L" && + test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + fi + fi + fi + + if test prog = "$linkmode" || test relink = "$opt_mode"; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test yes = "$hardcode_direct" && + test no = "$hardcode_direct_absolute"; then + add=$libdir/$linklib + elif test yes = "$hardcode_minus_L"; then + add_dir=-L$libdir + add=-l$name + elif test yes = "$hardcode_shlibpath_var"; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + add=-l$name + elif test yes = "$hardcode_automatic"; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib"; then + add=$inst_prefix_dir$libdir/$linklib + else + add=$libdir/$linklib + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir=-L$libdir + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add=-l$name + fi + + if test prog = "$linkmode"; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test prog = "$linkmode"; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test unsupported != "$hardcode_direct"; then + test -n "$old_library" && linklib=$old_library + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test yes = "$build_libtool_libs"; then + # Not a shared library + if test pass_all != "$deplibs_check_method"; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + echo + $ECHO "*** Warning: This system cannot link to static lib archive $lib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have." + if test yes = "$module"; then + echo "*** But as you try to build a module library, libtool will still create " + echo "*** a static module, that should work as long as the dlopening application" + echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test lib = "$linkmode"; then + if test -n "$dependency_libs" && + { test yes != "$hardcode_into_libs" || + test yes = "$build_old_libs" || + test yes = "$link_static"; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) func_append xrpath " $temp_xrpath";; + esac;; + *) func_append temp_deplibs " $libdir";; + esac + done + dependency_libs=$temp_deplibs + fi + + func_append newlib_search_path " $absdir" + # Link against this library + test no = "$link_static" && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result";; + *) func_resolve_sysroot "$deplib" ;; + esac + if $opt_preserve_dup_deps; then + case "$tmp_libs " in + *" $func_resolve_sysroot_result "*) + func_append specialdeplibs " $func_resolve_sysroot_result" ;; + esac + fi + func_append tmp_libs " $func_resolve_sysroot_result" + done + + if test no != "$link_all_deplibs"; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + path= + case $deplib in + -L*) path=$deplib ;; + *.la) + func_resolve_sysroot "$deplib" + deplib=$func_resolve_sysroot_result + func_dirname "$deplib" "" "." + dir=$func_dirname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir=$dir ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of '$dir'" + absdir=$dir + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`$SED -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names"; then + for tmp in $deplibrary_names; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl"; then + depdepl=$absdir/$objdir/$depdepl + darwin_install_name=`$OTOOL -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`$OTOOL64 -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + func_append compiler_flags " $wl-dylib_file $wl$darwin_install_name:$depdepl" + func_append linker_flags " -dylib_file $darwin_install_name:$depdepl" + path= + fi + fi + ;; + *) + path=-L$absdir/$objdir + ;; + esac + else + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "'$deplib' seems to be moved" + + path=-L$absdir + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test link = "$pass"; then + if test prog = "$linkmode"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs=$newdependency_libs + if test dlpreopen = "$pass"; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test dlopen != "$pass"; then + test conv = "$pass" || { + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) func_append lib_search_path " $dir" ;; + esac + done + newlib_search_path= + } + + if test prog,link = "$linkmode,$pass"; then + vars="compile_deplibs finalize_deplibs" + else + vars=deplibs + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) func_append tmp_libs " $deplib" ;; + esac + ;; + *) func_append tmp_libs " $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + + # Add Sun CC postdeps if required: + test CXX = "$tagname" && { + case $host_os in + linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C++ 5.9 + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + + solaris*) + func_cc_basename "$CC" + case $func_cc_basename_result in + CC* | sunCC*) + func_suncc_cstd_abi + + if test no != "$suncc_use_cstd_abi"; then + func_append postdeps ' -library=Cstd -library=Crun' + fi + ;; + esac + ;; + esac + } + + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i= + ;; + esac + if test -n "$i"; then + func_append tmp_libs " $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test prog = "$linkmode"; then + dlfiles=$newdlfiles + fi + if test prog = "$linkmode" || test lib = "$linkmode"; then + dlprefiles=$newdlprefiles + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "'-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "'-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs=$output + func_append objs "$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form 'libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test no = "$module" \ + && func_fatal_help "libtool library '$output' must begin with 'lib'" + + if test no != "$need_lib_prefix"; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test pass_all != "$deplibs_check_method"; then + func_fatal_error "cannot build libtool library '$output' from non-libtool objects on this host:$objs" + else + echo + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + func_append libobjs " $objs" + fi + fi + + test no = "$dlself" \ + || func_warning "'-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test 1 -lt "$#" \ + && func_warning "ignoring multiple '-rpath's for a libtool library" + + install_libdir=$1 + + oldlibs= + if test -z "$rpath"; then + if test yes = "$build_libtool_libs"; then + # Building a libtool convenience library. + # Some compilers have problems with a '.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "'-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "'-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs=$IFS; IFS=: + set dummy $vinfo 0 0 0 + shift + IFS=$save_ifs + + test -n "$7" && \ + func_fatal_help "too many parameters to '-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major=$1 + number_minor=$2 + number_revision=$3 + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # that has an extra 1 added just for fun + # + case $version_type in + # correct linux to gnu/linux during the next big refactor + darwin|freebsd-elf|linux|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_revision + ;; + freebsd-aout|qnx|sunos) + current=$number_major + revision=$number_minor + age=0 + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age=$number_minor + revision=$number_minor + lt_irix_increment=no + ;; + *) + func_fatal_configuration "$modename: unknown library version type '$version_type'" + ;; + esac + ;; + no) + current=$1 + revision=$2 + age=$3 + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT '$current' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION '$revision' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE '$age' must be a nonnegative integer" + func_fatal_error "'$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE '$age' is greater than the current interface number '$current'" + func_fatal_error "'$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + # On Darwin other compilers + case $CC in + nagfor*) + verstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" + ;; + *) + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + esac + ;; + + freebsd-aout) + major=.$current + versuffix=.$current.$revision + ;; + + freebsd-elf) + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + irix | nonstopux) + if test no = "$lt_irix_increment"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring=$verstring_prefix$major.$revision + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test 0 -ne "$loop"; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring_prefix$major.$iface:$verstring + done + + # Before this point, $major must not contain '.'. + major=.$major + versuffix=$major.$revision + ;; + + linux) # correct to gnu/linux during the next big refactor + func_arith $current - $age + major=.$func_arith_result + versuffix=$major.$age.$revision + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=.$current.$age.$revision + verstring=$current.$age.$revision + + # Add in all the interfaces that we are compatible with. + loop=$age + while test 0 -ne "$loop"; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring=$verstring:$iface.0 + done + + # Make executables depend on our current version. + func_append verstring ":$current.0" + ;; + + qnx) + major=.$current + versuffix=.$current + ;; + + sco) + major=.$current + versuffix=.$current + ;; + + sunos) + major=.$current + versuffix=.$current.$revision + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 file systems. + func_arith $current - $age + major=$func_arith_result + versuffix=-$major + ;; + + *) + func_fatal_configuration "unknown library version type '$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring=0.0 + ;; + esac + if test no = "$need_version"; then + versuffix= + else + versuffix=.0.0 + fi + fi + + # Remove version info from name if versioning should be avoided + if test yes,no = "$avoid_version,$need_version"; then + major= + versuffix= + verstring= + fi + + # Check to see if the archive will have undefined symbols. + if test yes = "$allow_undefined"; then + if test unsupported = "$allow_undefined_flag"; then + if test yes = "$build_old_libs"; then + func_warning "undefined symbols not allowed in $host shared libraries; building static only" + build_libtool_libs=no + else + func_fatal_error "can't build $host shared library unless -no-undefined is specified" + fi + fi + else + # Don't allow undefined symbols. + allow_undefined_flag=$no_undefined_flag + fi + + fi + + func_generate_dlsyms "$libname" "$libname" : + func_append libobjs " $symfileobj" + test " " = "$libobjs" && libobjs= + + if test relink != "$opt_mode"; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/$libname$release.*) + if test -n "$precious_files_regex"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + func_append removelist " $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test yes = "$build_old_libs" && test convenience != "$build_libtool_libs"; then + func_append oldlibs " $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; $lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` + # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` + # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + func_replace_sysroot "$libdir" + func_append temp_xrpath " -R$func_replace_sysroot_result" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + if test yes != "$hardcode_into_libs" || test yes = "$build_old_libs"; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles=$dlfiles + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) func_append dlfiles " $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles=$dlprefiles + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) func_append dlprefiles " $lib" ;; + esac + done + + if test yes = "$build_libtool_libs"; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + func_append deplibs " System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test yes = "$build_libtool_need_lc"; then + func_append deplibs " -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release= + versuffix= + major= + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + $nocaseglob + else + potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` + fi + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib=$potent_lib + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | $SED 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib=$potliblink;; + *) potlib=`$ECHO "$potlib" | $SED 's|[^/]*$||'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + case " $predeps $postdeps " in + *" $a_deplib "*) + func_append newdeplibs " $a_deplib" + a_deplib= + ;; + esac + fi + if test -n "$a_deplib"; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib=$potent_lib # see symlink-check above in file_magic test + if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib= + break 2 + fi + done + done + fi + if test -n "$a_deplib"; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib"; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs= + tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` + if test yes = "$allow_libtool_libs_with_static_runtimes"; then + for i in $predeps $postdeps; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s|$i||"` + done + fi + case $tmp_deplibs in + *[!\ \ ]*) + echo + if test none = "$deplibs_check_method"; then + echo "*** Warning: inter-library dependencies are not supported in this platform." + else + echo "*** Warning: inter-library dependencies are not known to be supported." + fi + echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + ;; + esac + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + if test yes = "$droppeddeps"; then + if test yes = "$module"; then + echo + echo "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + echo "*** a static module, that should work as long as the dlopening" + echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using 'nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** 'nm' from GNU binutils and a full rebuild may help." + fi + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + echo "*** The inter-library dependencies that have been dropped here will be" + echo "*** automatically added whenever a program is linked with this library" + echo "*** or is declared to -dlopen it." + + if test no = "$allow_undefined"; then + echo + echo "*** Since this library must not contain undefined symbols," + echo "*** because either the platform does not support them or" + echo "*** it was explicitly requested with -no-undefined," + echo "*** libtool will only create a static version of it." + if test no = "$build_old_libs"; then + oldlibs=$output_objdir/$libname.$libext + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + deplibs=$new_libs + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test yes = "$build_libtool_libs"; then + # Remove $wl instances when linking with ld. + # FIXME: should test the right _cmds variable. + case $archive_cmds in + *\$LD\ *) wl= ;; + esac + if test yes = "$hardcode_into_libs"; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath=$finalize_rpath + test relink = "$opt_mode" || rpath=$compile_rpath$rpath + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + func_replace_sysroot "$libdir" + libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append dep_rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath=$finalize_shlibpath + test relink = "$opt_mode" || shlibpath=$compile_shlibpath$shlibpath + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname=$1 + shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname=$realname + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib=$output_objdir/$realname + linknames= + for link + do + func_append linknames " $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= + + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols=$output_objdir/$libname.uexp + func_append delfiles " $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + func_dll_def_p "$export_symbols" || { + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols=$export_symbols + export_symbols= + always_export_symbols=yes + } + fi + ;; + esac + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test yes = "$always_export_symbols" || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs=$IFS; IFS='~' + for cmd1 in $cmds; do + IFS=$save_ifs + # Take the normal branch if the nm_file_list_spec branch + # doesn't work or if tool conversion is not needed. + case $nm_file_list_spec~$to_tool_file_cmd in + *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) + try_normal_branch=yes + eval cmd=\"$cmd1\" + func_len " $cmd" + len=$func_len_result + ;; + *) + try_normal_branch=no + ;; + esac + if test yes = "$try_normal_branch" \ + && { test "$len" -lt "$max_cmd_len" \ + || test "$max_cmd_len" -le -1; } + then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + elif test -n "$nm_file_list_spec"; then + func_basename "$output" + output_la=$func_basename_result + save_libobjs=$libobjs + save_output=$output + output=$output_objdir/$output_la.nm + func_to_tool_file "$output" + libobjs=$nm_file_list_spec$func_to_tool_file_result + func_append delfiles " $output" + func_verbose "creating $NM input file list: $output" + for obj in $save_libobjs; do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > "$output" + eval cmd=\"$cmd1\" + func_show_eval "$cmd" 'exit $?' + output=$save_output + libobjs=$save_libobjs + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS=$save_ifs + if test -n "$export_symbols_regex" && test : != "$skipped_export"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test : != "$skipped_export" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + func_append tmp_deplibs " $test_deplib" + ;; + esac + done + deplibs=$tmp_deplibs + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test yes = "$compiler_needs_object" && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test yes = "$thread_safe" && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + func_append linker_flags " $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test : != "$skipped_export" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + func_basename "$output" + output_la=$func_basename_result + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 + + if test -n "$save_libobjs" && test : != "$skipped_export" && test yes = "$with_gnu_ld"; then + output=$output_objdir/$output_la.lnkscript + func_verbose "creating GNU ld script: $output" + echo 'INPUT (' > $output + for obj in $save_libobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + echo ')' >> $output + func_append delfiles " $output" + func_to_tool_file "$output" + output=$func_to_tool_file_result + elif test -n "$save_libobjs" && test : != "$skipped_export" && test -n "$file_list_spec"; then + output=$output_objdir/$output_la.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test yes = "$compiler_needs_object"; then + firstobj="$1 " + shift + fi + for obj + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + func_append delfiles " $output" + func_to_tool_file "$output" + output=$firstobj\"$file_list_spec$func_to_tool_file_result\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-$k.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test -z "$objlist" || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test 1 -eq "$k"; then + # The first file doesn't have a previous command to add. + reload_objs=$objlist + eval concat_cmds=\"$reload_cmds\" + else + # All subsequent reloadable object files will link in + # the last one created. + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-$k.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-$k.$objext + objlist=" $obj" + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds$reload_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + func_append delfiles " $output" + + else + output= + fi + + ${skipped_export-false} && { + func_verbose "generating symbol list for '$libname.la'" + export_symbols=$output_objdir/$libname.exp + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + } + + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs=$IFS; IFS='~' + for cmd in $concat_cmds; do + IFS=$save_ifs + $opt_quiet || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + + ${skipped_export-false} && { + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols=$export_symbols + test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for '$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands, which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + } + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test yes = "$module" && test -n "$module_cmds"; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + fi + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs=$IFS; IFS='~' + for cmd in $cmds; do + IFS=$sp$nl + eval cmd=\"$cmd\" + IFS=$save_ifs + $opt_quiet || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS=$save_ifs + + # Restore the uninstalled library and exit + if test relink = "$opt_mode"; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test yes = "$module" || test yes = "$export_dynamic"; then + # On all known operating systems, these are identical. + dlname=$soname + fi + fi + ;; + + obj) + if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then + func_warning "'-dlopen' is ignored for objects" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "'-l' and '-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "'-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "'-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "'-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object '$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result + ;; + *) + libobj= + obj=$output + ;; + esac + + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # if reload_cmds runs $LD directly, get rid of -Wl from + # whole_archive_flag_spec and hope we can get by with turning comma + # into space. + case $reload_cmds in + *\$LD[\ \$]*) wl= ;; + esac + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + test -n "$wl" || tmp_whole_archive_flags=`$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` + reload_conv_objs=$reload_objs\ $tmp_whole_archive_flags + else + gentop=$output_objdir/${obj}x + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # If we're not building shared, we need to use non_pic_objs + test yes = "$build_libtool_libs" || libobjs=$non_pic_objects + + # Create the old-style object. + reload_objs=$objs$old_deplibs' '`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; /\.lib$/d; $lo2o" | $NL2SP`' '$reload_conv_objs + + output=$obj + func_execute_cmds "$reload_cmds" 'exit $?' + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + fi + + test yes = "$build_libtool_libs" || { + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + } + + if test -n "$pic_flag" || test default != "$pic_mode"; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output=$libobj + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "'-version-info' is ignored for programs" + + test -n "$release" && \ + func_warning "'-release' is ignored for programs" + + $preload \ + && test unknown,unknown,unknown = "$dlopen_support,$dlopen_self,$dlopen_self_static" \ + && func_warning "'LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test CXX = "$tagname"; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + func_append compile_command " $wl-bind_at_load" + func_append finalize_command " $wl-bind_at_load" + ;; + esac + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + compile_deplibs=$new_libs + + + func_append compile_command " $compile_deplibs" + func_append finalize_command " $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$libdir" | $SED -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) func_append dllsearchpath ":$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath=$rpath + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs=$libdir + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) func_append finalize_perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir=$hardcode_libdirs + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath=$rpath + + if test -n "$libobjs" && test yes = "$build_old_libs"; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + fi + + func_generate_dlsyms "$outputname" "@PROGRAM@" false + + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi + + wrappers_required=: + case $host in + *cegcc* | *mingw32ce*) + # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. + wrappers_required=false + ;; + *cygwin* | *mingw* ) + test yes = "$build_libtool_libs" || wrappers_required=false + ;; + *) + if test no = "$need_relink" || test yes != "$build_libtool_libs"; then + wrappers_required=false + fi + ;; + esac + $wrappers_required || { + # Replace the output file specification. + compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + link_command=$compile_command$compile_rpath + + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.$objext"; then + func_show_eval '$RM "$output_objdir/${outputname}S.$objext"' + fi + + exit $exit_status + } + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + func_append rpath "$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test yes = "$no_install"; then + # We don't need to create a wrapper script. + link_command=$compile_var$compile_command$compile_rpath + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + exit $EXIT_SUCCESS + fi + + case $hardcode_action,$fast_install in + relink,*) + # Fast installation is not supported + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "'$output' will be relinked during installation" + ;; + *,yes) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` + ;; + *,no) + link_command=$compile_var$compile_command$compile_rpath + relink_command=$finalize_var$finalize_command$finalize_rpath + ;; + *,needless) + link_command=$finalize_var$compile_command$finalize_rpath + relink_command= + ;; + esac + + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname + + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output_objdir/$outputname" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Now create the wrapper script. + func_verbose "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + fi + + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource=$output_path/$objdir/lt-$output_name.c + cwrapper=$output_path/$output_name.exe + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } + + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host"; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result + else + func_emit_wrapper no > $func_ltwrapper_scriptname_result + fi + } + ;; + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 + + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + case $build_libtool_libs in + convenience) + oldobjs="$libobjs_save $symfileobj" + addlibs=$convenience + build_libtool_libs=no + ;; + module) + oldobjs=$libobjs_save + addlibs=$old_convenience + build_libtool_libs=no + ;; + *) + oldobjs="$old_deplibs $non_pic_objects" + $preload && test -f "$symfileobj" \ + && func_append oldobjs " $symfileobj" + addlibs=$old_convenience + ;; + esac + + if test -n "$addlibs"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $addlibs + func_append oldobjs " $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test yes = "$build_libtool_libs"; then + cmds=$old_archive_from_new_cmds + else + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append oldobjs " $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + echo "copying selected object files to avoid basename conflicts..." + gentop=$output_objdir/${outputname}x + func_append generated " $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase=$func_basename_result + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + func_append oldobjs " $gentop/$newobj" + ;; + *) func_append oldobjs " $obj" ;; + esac + done + fi + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + elif test -n "$archiver_list_spec"; then + func_verbose "using command file archive linking..." + for obj in $oldobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > $output_objdir/$libname.libcmd + func_to_tool_file "$output_objdir/$libname.libcmd" + oldobjs=" $archiver_list_spec$func_to_tool_file_result" + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj"; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test -z "$oldobjs"; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' + done + + test -n "$generated" && \ + func_show_eval "${RM}r$generated" + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test yes = "$build_old_libs" && old_library=$libname.$libext + func_verbose "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL \"$progpath\" $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + if test yes = "$hardcode_automatic"; then + relink_command= + fi + + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test yes = "$installed"; then + if test -z "$install_libdir"; then + break + fi + output=$output_objdir/${outputname}i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name=$func_basename_result + func_resolve_sysroot "$deplib" + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` + test -z "$libdir" && \ + func_fatal_error "'$deplib' is not a valid libtool archive" + func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" + ;; + -L*) + func_stripname -L '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -L$func_replace_sysroot_result" + ;; + -R*) + func_stripname -R '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -R$func_replace_sysroot_result" + ;; + *) func_append newdependency_libs " $deplib" ;; + esac + done + dependency_libs=$newdependency_libs + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" + ;; + *) func_append newdlfiles " $lib" ;; + esac + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name=$func_basename_result + eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "'$lib' is not a valid libtool archive" + func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" + ;; + esac + done + dlprefiles=$newdlprefiles + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlfiles " $abs" + done + dlfiles=$newdlfiles + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlprefiles " $abs" + done + dlprefiles=$newdlprefiles + fi + $RM $output + # place dlname in correct position for cygwin + # In fact, it would be nice if we could use this code for all target + # systems that can't hard-code library paths into their executables + # and that have no shared library path variable independent of PATH, + # but it turns out we can't easily determine that from inspecting + # libtool variables, so we have to hard-code the OSs to which it + # applies here; at the moment, that means platforms that use the PE + # object format with DLL files. See the long comment at the top of + # tests/bindir.at for full details. + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) + # If a -bindir argument was supplied, place the dll there. + if test -n "$bindir"; then + func_relative_path "$install_libdir" "$bindir" + tdlname=$func_relative_path_result/$dlname + else + # Otherwise fall back on heuristic. + tdlname=../bin/$dlname + fi + ;; + esac + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Linker flags that cannot go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test no,yes = "$installed,$need_relink"; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} + +if test link = "$opt_mode" || test relink = "$opt_mode"; then + func_mode_link ${1+"$@"} +fi + + +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $debug_cmd + + RM=$nonopt + files= + rmforce=false + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic=$magic + + for arg + do + case $arg in + -f) func_append RM " $arg"; rmforce=: ;; + -*) func_append RM " $arg" ;; + *) func_append files " $arg" ;; + esac + done + + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" + + rmdirs= + + for file in $files; do + func_dirname "$file" "" "." + dir=$func_dirname_result + if test . = "$dir"; then + odir=$objdir + else + odir=$dir/$objdir + fi + func_basename "$file" + name=$func_basename_result + test uninstall = "$opt_mode" && odir=$dir + + # Remember odir for removal later, being careful to avoid duplicates + if test clean = "$opt_mode"; then + case " $rmdirs " in + *" $odir "*) ;; + *) func_append rmdirs " $odir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif $rmforce; then + continue + fi + + rmfiles=$file + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if func_lalib_p "$file"; then + func_source $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + func_append rmfiles " $odir/$n" + done + test -n "$old_library" && func_append rmfiles " $odir/$old_library" + + case $opt_mode in + clean) + case " $library_names " in + *" $dlname "*) ;; + *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; + esac + test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + func_execute_cmds "$postuninstall_cmds" '$rmforce || exit_status=1' + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + func_execute_cmds "$old_postuninstall_cmds" '$rmforce || exit_status=1' + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if func_lalib_p "$file"; then + + # Read the .lo file + func_source $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && test none != "$pic_object"; then + func_append rmfiles " $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && test none != "$non_pic_object"; then + func_append rmfiles " $dir/$non_pic_object" + fi + fi + ;; + + *) + if test clean = "$opt_mode"; then + noexename=$name + case $file in + *.exe) + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe + func_append rmfiles " $file" + ;; + esac + # Do a test to see if this is a libtool program. + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + func_append rmfiles " $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + func_append rmfiles " $odir/$name $odir/${name}S.$objext" + if test yes = "$fast_install" && test -n "$relink_command"; then + func_append rmfiles " $odir/lt-$name" + fi + if test "X$noexename" != "X$name"; then + func_append rmfiles " $odir/lt-$noexename.c" + fi + fi + fi + ;; + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done + + # Try to remove the $objdir's in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + func_show_eval "rmdir $dir >/dev/null 2>&1" + fi + done + + exit $exit_status +} + +if test uninstall = "$opt_mode" || test clean = "$opt_mode"; then + func_mode_uninstall ${1+"$@"} +fi + +test -z "$opt_mode" && { + help=$generic_help + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode '$opt_mode'" + +if test -n "$exec_cmd"; then + eval exec "$exec_cmd" + exit $EXIT_FAILURE +fi + +exit $exit_status + + +# The TAGs below are defined such that we never get into a situation +# where we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: diff --git a/ext/libpqxx-7.7.3/config/m4/Makefile.am b/ext/libpqxx-7.7.3/config/m4/Makefile.am new file mode 100644 index 000000000..c5dd1572f --- /dev/null +++ b/ext/libpqxx-7.7.3/config/m4/Makefile.am @@ -0,0 +1,3 @@ +MAINTAINERCLEANFILES=Makefile.in config.guess config.sub install-sh \ + ltmain.sh missing mkinstalldirs + diff --git a/ext/libpqxx-7.7.3/config/m4/libtool.m4 b/ext/libpqxx-7.7.3/config/m4/libtool.m4 new file mode 100644 index 000000000..c4c02946d --- /dev/null +++ b/ext/libpqxx-7.7.3/config/m4/libtool.m4 @@ -0,0 +1,8394 @@ +# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- +# +# Copyright (C) 1996-2001, 2003-2015 Free Software Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +m4_define([_LT_COPYING], [dnl +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +]) + +# serial 58 LT_INIT + + +# LT_PREREQ(VERSION) +# ------------------ +# Complain and exit if this libtool version is less that VERSION. +m4_defun([LT_PREREQ], +[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, + [m4_default([$3], + [m4_fatal([Libtool version $1 or higher is required], + 63)])], + [$2])]) + + +# _LT_CHECK_BUILDDIR +# ------------------ +# Complain if the absolute build directory name contains unusual characters +m4_defun([_LT_CHECK_BUILDDIR], +[case `pwd` in + *\ * | *\ *) + AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; +esac +]) + + +# LT_INIT([OPTIONS]) +# ------------------ +AC_DEFUN([LT_INIT], +[AC_PREREQ([2.62])dnl We use AC_PATH_PROGS_FEATURE_CHECK +AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl +AC_BEFORE([$0], [LT_LANG])dnl +AC_BEFORE([$0], [LT_OUTPUT])dnl +AC_BEFORE([$0], [LTDL_INIT])dnl +m4_require([_LT_CHECK_BUILDDIR])dnl + +dnl Autoconf doesn't catch unexpanded LT_ macros by default: +m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl +m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl +dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 +dnl unless we require an AC_DEFUNed macro: +AC_REQUIRE([LTOPTIONS_VERSION])dnl +AC_REQUIRE([LTSUGAR_VERSION])dnl +AC_REQUIRE([LTVERSION_VERSION])dnl +AC_REQUIRE([LTOBSOLETE_VERSION])dnl +m4_require([_LT_PROG_LTMAIN])dnl + +_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) + +dnl Parse OPTIONS +_LT_SET_OPTIONS([$0], [$1]) + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS=$ltmain + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +AC_SUBST(LIBTOOL)dnl + +_LT_SETUP + +# Only expand once: +m4_define([LT_INIT]) +])# LT_INIT + +# Old names: +AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) +AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PROG_LIBTOOL], []) +dnl AC_DEFUN([AM_PROG_LIBTOOL], []) + + +# _LT_PREPARE_CC_BASENAME +# ----------------------- +m4_defun([_LT_PREPARE_CC_BASENAME], [ +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in @S|@*""; do + case $cc_temp in + compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; + distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} +])# _LT_PREPARE_CC_BASENAME + + +# _LT_CC_BASENAME(CC) +# ------------------- +# It would be clearer to call AC_REQUIREs from _LT_PREPARE_CC_BASENAME, +# but that macro is also expanded into generated libtool script, which +# arranges for $SED and $ECHO to be set by different means. +m4_defun([_LT_CC_BASENAME], +[m4_require([_LT_PREPARE_CC_BASENAME])dnl +AC_REQUIRE([_LT_DECL_SED])dnl +AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl +func_cc_basename $1 +cc_basename=$func_cc_basename_result +]) + + +# _LT_FILEUTILS_DEFAULTS +# ---------------------- +# It is okay to use these file commands and assume they have been set +# sensibly after 'm4_require([_LT_FILEUTILS_DEFAULTS])'. +m4_defun([_LT_FILEUTILS_DEFAULTS], +[: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} +])# _LT_FILEUTILS_DEFAULTS + + +# _LT_SETUP +# --------- +m4_defun([_LT_SETUP], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl +AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl + +_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl +dnl +_LT_DECL([], [host_alias], [0], [The host system])dnl +_LT_DECL([], [host], [0])dnl +_LT_DECL([], [host_os], [0])dnl +dnl +_LT_DECL([], [build_alias], [0], [The build system])dnl +_LT_DECL([], [build], [0])dnl +_LT_DECL([], [build_os], [0])dnl +dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +dnl +AC_REQUIRE([AC_PROG_LN_S])dnl +test -z "$LN_S" && LN_S="ln -s" +_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl +dnl +AC_REQUIRE([LT_CMD_MAX_LEN])dnl +_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl +_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl +dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl +m4_require([_LT_CMD_RELOAD])dnl +m4_require([_LT_CHECK_MAGIC_METHOD])dnl +m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl +m4_require([_LT_CMD_OLD_ARCHIVE])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_WITH_SYSROOT])dnl +m4_require([_LT_CMD_TRUNCATE])dnl + +_LT_CONFIG_LIBTOOL_INIT([ +# See if we are running on zsh, and set the options that allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi +]) +if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi + +_LT_CHECK_OBJDIR + +m4_require([_LT_TAG_COMPILER])dnl + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a '.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld=$lt_cv_prog_gnu_ld + +old_CC=$CC +old_CFLAGS=$CFLAGS + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +_LT_CC_BASENAME([$compiler]) + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + _LT_PATH_MAGIC + fi + ;; +esac + +# Use C for the default configuration in the libtool script +LT_SUPPORTED_TAG([CC]) +_LT_LANG_C_CONFIG +_LT_LANG_DEFAULT_CONFIG +_LT_CONFIG_COMMANDS +])# _LT_SETUP + + +# _LT_PREPARE_SED_QUOTE_VARS +# -------------------------- +# Define a few sed substitution that help us do robust quoting. +m4_defun([_LT_PREPARE_SED_QUOTE_VARS], +[# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\([["`\\]]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' +]) + +# _LT_PROG_LTMAIN +# --------------- +# Note that this code is called both from 'configure', and 'config.status' +# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, +# 'config.status' has no value for ac_aux_dir unless we are using Automake, +# so we pass a copy along to make sure it has a sensible value anyway. +m4_defun([_LT_PROG_LTMAIN], +[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl +_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) +ltmain=$ac_aux_dir/ltmain.sh +])# _LT_PROG_LTMAIN + + +## ------------------------------------- ## +## Accumulate code for creating libtool. ## +## ------------------------------------- ## + +# So that we can recreate a full libtool script including additional +# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS +# in macros and then make a single call at the end using the 'libtool' +# label. + + +# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) +# ---------------------------------------- +# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL_INIT], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_INIT], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_INIT]) + + +# _LT_CONFIG_LIBTOOL([COMMANDS]) +# ------------------------------ +# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) + + +# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) +# ----------------------------------------------------- +m4_defun([_LT_CONFIG_SAVE_COMMANDS], +[_LT_CONFIG_LIBTOOL([$1]) +_LT_CONFIG_LIBTOOL_INIT([$2]) +]) + + +# _LT_FORMAT_COMMENT([COMMENT]) +# ----------------------------- +# Add leading comment marks to the start of each line, and a trailing +# full-stop to the whole comment if one is not present already. +m4_define([_LT_FORMAT_COMMENT], +[m4_ifval([$1], [ +m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], + [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) +)]) + + + +## ------------------------ ## +## FIXME: Eliminate VARNAME ## +## ------------------------ ## + + +# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) +# ------------------------------------------------------------------- +# CONFIGNAME is the name given to the value in the libtool script. +# VARNAME is the (base) name used in the configure script. +# VALUE may be 0, 1 or 2 for a computed quote escaped value based on +# VARNAME. Any other value will be used directly. +m4_define([_LT_DECL], +[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], + [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], + [m4_ifval([$1], [$1], [$2])]) + lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) + m4_ifval([$4], + [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) + lt_dict_add_subkey([lt_decl_dict], [$2], + [tagged?], [m4_ifval([$5], [yes], [no])])]) +]) + + +# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) +# -------------------------------------------------------- +m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) + + +# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_tag_varnames], +[_lt_decl_filter([tagged?], [yes], $@)]) + + +# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) +# --------------------------------------------------------- +m4_define([_lt_decl_filter], +[m4_case([$#], + [0], [m4_fatal([$0: too few arguments: $#])], + [1], [m4_fatal([$0: too few arguments: $#: $1])], + [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], + [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], + [lt_dict_filter([lt_decl_dict], $@)])[]dnl +]) + + +# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) +# -------------------------------------------------- +m4_define([lt_decl_quote_varnames], +[_lt_decl_filter([value], [1], $@)]) + + +# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_dquote_varnames], +[_lt_decl_filter([value], [2], $@)]) + + +# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_varnames_tagged], +[m4_assert([$# <= 2])dnl +_$0(m4_quote(m4_default([$1], [[, ]])), + m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), + m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) +m4_define([_lt_decl_varnames_tagged], +[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) + + +# lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_all_varnames], +[_$0(m4_quote(m4_default([$1], [[, ]])), + m4_if([$2], [], + m4_quote(lt_decl_varnames), + m4_quote(m4_shift($@))))[]dnl +]) +m4_define([_lt_decl_all_varnames], +[lt_join($@, lt_decl_varnames_tagged([$1], + lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl +]) + + +# _LT_CONFIG_STATUS_DECLARE([VARNAME]) +# ------------------------------------ +# Quote a variable value, and forward it to 'config.status' so that its +# declaration there will have the same value as in 'configure'. VARNAME +# must have a single quote delimited value for this to work. +m4_define([_LT_CONFIG_STATUS_DECLARE], +[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) + + +# _LT_CONFIG_STATUS_DECLARATIONS +# ------------------------------ +# We delimit libtool config variables with single quotes, so when +# we write them to config.status, we have to be sure to quote all +# embedded single quotes properly. In configure, this macro expands +# each variable declared with _LT_DECL (and _LT_TAGDECL) into: +# +# ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' +m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], +[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), + [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAGS +# ---------------- +# Output comment and list of tags supported by the script +m4_defun([_LT_LIBTOOL_TAGS], +[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl +available_tags='_LT_TAGS'dnl +]) + + +# _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) +# ----------------------------------- +# Extract the dictionary values for VARNAME (optionally with TAG) and +# expand to a commented shell variable setting: +# +# # Some comment about what VAR is for. +# visible_name=$lt_internal_name +m4_define([_LT_LIBTOOL_DECLARE], +[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], + [description])))[]dnl +m4_pushdef([_libtool_name], + m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl +m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), + [0], [_libtool_name=[$]$1], + [1], [_libtool_name=$lt_[]$1], + [2], [_libtool_name=$lt_[]$1], + [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl +m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl +]) + + +# _LT_LIBTOOL_CONFIG_VARS +# ----------------------- +# Produce commented declarations of non-tagged libtool config variables +# suitable for insertion in the LIBTOOL CONFIG section of the 'libtool' +# script. Tagged libtool config variables (even for the LIBTOOL CONFIG +# section) are produced by _LT_LIBTOOL_TAG_VARS. +m4_defun([_LT_LIBTOOL_CONFIG_VARS], +[m4_foreach([_lt_var], + m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAG_VARS(TAG) +# ------------------------- +m4_define([_LT_LIBTOOL_TAG_VARS], +[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) + + +# _LT_TAGVAR(VARNAME, [TAGNAME]) +# ------------------------------ +m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) + + +# _LT_CONFIG_COMMANDS +# ------------------- +# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of +# variables for single and double quote escaping we saved from calls +# to _LT_DECL, we can put quote escaped variables declarations +# into 'config.status', and then the shell code to quote escape them in +# for loops in 'config.status'. Finally, any additional code accumulated +# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. +m4_defun([_LT_CONFIG_COMMANDS], +[AC_PROVIDE_IFELSE([LT_OUTPUT], + dnl If the libtool generation code has been placed in $CONFIG_LT, + dnl instead of duplicating it all over again into config.status, + dnl then we will have config.status run $CONFIG_LT later, so it + dnl needs to know what name is stored there: + [AC_CONFIG_COMMANDS([libtool], + [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], + dnl If the libtool generation code is destined for config.status, + dnl expand the accumulated commands and init code now: + [AC_CONFIG_COMMANDS([libtool], + [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) +])#_LT_CONFIG_COMMANDS + + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], +[ + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +_LT_CONFIG_STATUS_DECLARATIONS +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$[]1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_quote_varnames); do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_dquote_varnames); do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +_LT_OUTPUT_LIBTOOL_INIT +]) + +# _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) +# ------------------------------------ +# Generate a child script FILE with all initialization necessary to +# reuse the environment learned by the parent script, and make the +# file executable. If COMMENT is supplied, it is inserted after the +# '#!' sequence but before initialization text begins. After this +# macro, additional text can be appended to FILE to form the body of +# the child script. The macro ends with non-zero status if the +# file could not be fully written (such as if the disk is full). +m4_ifdef([AS_INIT_GENERATED], +[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], +[m4_defun([_LT_GENERATED_FILE_INIT], +[m4_require([AS_PREPARE])]dnl +[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl +[lt_write_fail=0 +cat >$1 <<_ASEOF || lt_write_fail=1 +#! $SHELL +# Generated by $as_me. +$2 +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$1 <<\_ASEOF || lt_write_fail=1 +AS_SHELL_SANITIZE +_AS_PREPARE +exec AS_MESSAGE_FD>&1 +_ASEOF +test 0 = "$lt_write_fail" && chmod +x $1[]dnl +m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT + +# LT_OUTPUT +# --------- +# This macro allows early generation of the libtool script (before +# AC_OUTPUT is called), incase it is used in configure for compilation +# tests. +AC_DEFUN([LT_OUTPUT], +[: ${CONFIG_LT=./config.lt} +AC_MSG_NOTICE([creating $CONFIG_LT]) +_LT_GENERATED_FILE_INIT(["$CONFIG_LT"], +[# Run this file to recreate a libtool stub with the current configuration.]) + +cat >>"$CONFIG_LT" <<\_LTEOF +lt_cl_silent=false +exec AS_MESSAGE_LOG_FD>>config.log +{ + echo + AS_BOX([Running $as_me.]) +} >&AS_MESSAGE_LOG_FD + +lt_cl_help="\ +'$as_me' creates a local libtool stub from the current configuration, +for use in further configure time tests before the real libtool is +generated. + +Usage: $[0] [[OPTIONS]] + + -h, --help print this help, then exit + -V, --version print version number, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + +Report bugs to ." + +lt_cl_version="\ +m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl +m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) +configured by $[0], generated by m4_PACKAGE_STRING. + +Copyright (C) 2011 Free Software Foundation, Inc. +This config.lt script is free software; the Free Software Foundation +gives unlimited permision to copy, distribute and modify it." + +while test 0 != $[#] +do + case $[1] in + --version | --v* | -V ) + echo "$lt_cl_version"; exit 0 ;; + --help | --h* | -h ) + echo "$lt_cl_help"; exit 0 ;; + --debug | --d* | -d ) + debug=: ;; + --quiet | --q* | --silent | --s* | -q ) + lt_cl_silent=: ;; + + -*) AC_MSG_ERROR([unrecognized option: $[1] +Try '$[0] --help' for more information.]) ;; + + *) AC_MSG_ERROR([unrecognized argument: $[1] +Try '$[0] --help' for more information.]) ;; + esac + shift +done + +if $lt_cl_silent; then + exec AS_MESSAGE_FD>/dev/null +fi +_LTEOF + +cat >>"$CONFIG_LT" <<_LTEOF +_LT_OUTPUT_LIBTOOL_COMMANDS_INIT +_LTEOF + +cat >>"$CONFIG_LT" <<\_LTEOF +AC_MSG_NOTICE([creating $ofile]) +_LT_OUTPUT_LIBTOOL_COMMANDS +AS_EXIT(0) +_LTEOF +chmod +x "$CONFIG_LT" + +# configure is writing to config.log, but config.lt does its own redirection, +# appending to config.log, which fails on DOS, as config.log is still kept +# open by configure. Here we exec the FD to /dev/null, effectively closing +# config.log, so it can be properly (re)opened and appended to by config.lt. +lt_cl_success=: +test yes = "$silent" && + lt_config_lt_args="$lt_config_lt_args --quiet" +exec AS_MESSAGE_LOG_FD>/dev/null +$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false +exec AS_MESSAGE_LOG_FD>>config.log +$lt_cl_success || AS_EXIT(1) +])# LT_OUTPUT + + +# _LT_CONFIG(TAG) +# --------------- +# If TAG is the built-in tag, create an initial libtool script with a +# default configuration from the untagged config vars. Otherwise add code +# to config.status for appending the configuration named by TAG from the +# matching tagged config vars. +m4_defun([_LT_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_CONFIG_SAVE_COMMANDS([ + m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl + m4_if(_LT_TAG, [C], [ + # See if we are running on zsh, and set the options that allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST + fi + + cfgfile=${ofile}T + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL +# Generated automatically by $as_me ($PACKAGE) $VERSION +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +_LT_COPYING +_LT_LIBTOOL_TAGS + +# Configured defaults for sys_lib_dlsearch_path munging. +: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} + +# ### BEGIN LIBTOOL CONFIG +_LT_LIBTOOL_CONFIG_VARS +_LT_LIBTOOL_TAG_VARS +# ### END LIBTOOL CONFIG + +_LT_EOF + + cat <<'_LT_EOF' >> "$cfgfile" + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +_LT_PREPARE_MUNGE_PATH_LIST +_LT_PREPARE_CC_BASENAME + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + _LT_PROG_LTMAIN + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" +], +[cat <<_LT_EOF >> "$ofile" + +dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded +dnl in a comment (ie after a #). +# ### BEGIN LIBTOOL TAG CONFIG: $1 +_LT_LIBTOOL_TAG_VARS(_LT_TAG) +# ### END LIBTOOL TAG CONFIG: $1 +_LT_EOF +])dnl /m4_if +], +[m4_if([$1], [], [ + PACKAGE='$PACKAGE' + VERSION='$VERSION' + RM='$RM' + ofile='$ofile'], []) +])dnl /_LT_CONFIG_SAVE_COMMANDS +])# _LT_CONFIG + + +# LT_SUPPORTED_TAG(TAG) +# --------------------- +# Trace this macro to discover what tags are supported by the libtool +# --tag option, using: +# autoconf --trace 'LT_SUPPORTED_TAG:$1' +AC_DEFUN([LT_SUPPORTED_TAG], []) + + +# C support is built-in for now +m4_define([_LT_LANG_C_enabled], []) +m4_define([_LT_TAGS], []) + + +# LT_LANG(LANG) +# ------------- +# Enable libtool support for the given language if not already enabled. +AC_DEFUN([LT_LANG], +[AC_BEFORE([$0], [LT_OUTPUT])dnl +m4_case([$1], + [C], [_LT_LANG(C)], + [C++], [_LT_LANG(CXX)], + [Go], [_LT_LANG(GO)], + [Java], [_LT_LANG(GCJ)], + [Fortran 77], [_LT_LANG(F77)], + [Fortran], [_LT_LANG(FC)], + [Windows Resource], [_LT_LANG(RC)], + [m4_ifdef([_LT_LANG_]$1[_CONFIG], + [_LT_LANG($1)], + [m4_fatal([$0: unsupported language: "$1"])])])dnl +])# LT_LANG + + +# _LT_LANG(LANGNAME) +# ------------------ +m4_defun([_LT_LANG], +[m4_ifdef([_LT_LANG_]$1[_enabled], [], + [LT_SUPPORTED_TAG([$1])dnl + m4_append([_LT_TAGS], [$1 ])dnl + m4_define([_LT_LANG_]$1[_enabled], [])dnl + _LT_LANG_$1_CONFIG($1)])dnl +])# _LT_LANG + + +m4_ifndef([AC_PROG_GO], [ +############################################################ +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_GO. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +############################################################ +m4_defun([AC_PROG_GO], +[AC_LANG_PUSH(Go)dnl +AC_ARG_VAR([GOC], [Go compiler command])dnl +AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl +_AC_ARG_VAR_LDFLAGS()dnl +AC_CHECK_TOOL(GOC, gccgo) +if test -z "$GOC"; then + if test -n "$ac_tool_prefix"; then + AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo]) + fi +fi +if test -z "$GOC"; then + AC_CHECK_PROG(GOC, gccgo, gccgo, false) +fi +])#m4_defun +])#m4_ifndef + + +# _LT_LANG_DEFAULT_CONFIG +# ----------------------- +m4_defun([_LT_LANG_DEFAULT_CONFIG], +[AC_PROVIDE_IFELSE([AC_PROG_CXX], + [LT_LANG(CXX)], + [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) + +AC_PROVIDE_IFELSE([AC_PROG_F77], + [LT_LANG(F77)], + [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) + +AC_PROVIDE_IFELSE([AC_PROG_FC], + [LT_LANG(FC)], + [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) + +dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal +dnl pulling things in needlessly. +AC_PROVIDE_IFELSE([AC_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([LT_PROG_GCJ], + [LT_LANG(GCJ)], + [m4_ifdef([AC_PROG_GCJ], + [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([A][M_PROG_GCJ], + [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([LT_PROG_GCJ], + [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) + +AC_PROVIDE_IFELSE([AC_PROG_GO], + [LT_LANG(GO)], + [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])]) + +AC_PROVIDE_IFELSE([LT_PROG_RC], + [LT_LANG(RC)], + [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) +])# _LT_LANG_DEFAULT_CONFIG + +# Obsolete macros: +AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) +AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) +AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) +AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) +AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_CXX], []) +dnl AC_DEFUN([AC_LIBTOOL_F77], []) +dnl AC_DEFUN([AC_LIBTOOL_FC], []) +dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) +dnl AC_DEFUN([AC_LIBTOOL_RC], []) + + +# _LT_TAG_COMPILER +# ---------------- +m4_defun([_LT_TAG_COMPILER], +[AC_REQUIRE([AC_PROG_CC])dnl + +_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl +_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl +_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl +_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC +])# _LT_TAG_COMPILER + + +# _LT_COMPILER_BOILERPLATE +# ------------------------ +# Check for compiler boilerplate output or warnings with +# the simple compiler test code. +m4_defun([_LT_COMPILER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* +])# _LT_COMPILER_BOILERPLATE + + +# _LT_LINKER_BOILERPLATE +# ---------------------- +# Check for linker boilerplate output or warnings with +# the simple link test code. +m4_defun([_LT_LINKER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* +])# _LT_LINKER_BOILERPLATE + +# _LT_REQUIRED_DARWIN_CHECKS +# ------------------------- +m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ + case $host_os in + rhapsody* | darwin*) + AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) + AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) + AC_CHECK_TOOL([LIPO], [lipo], [:]) + AC_CHECK_TOOL([OTOOL], [otool], [:]) + AC_CHECK_TOOL([OTOOL64], [otool64], [:]) + _LT_DECL([], [DSYMUTIL], [1], + [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) + _LT_DECL([], [NMEDIT], [1], + [Tool to change global to local symbols on Mac OS X]) + _LT_DECL([], [LIPO], [1], + [Tool to manipulate fat objects and archives on Mac OS X]) + _LT_DECL([], [OTOOL], [1], + [ldd/readelf like tool for Mach-O binaries on Mac OS X]) + _LT_DECL([], [OTOOL64], [1], + [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) + + AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], + [lt_cv_apple_cc_single_mod=no + if test -z "$LT_MULTI_MODULE"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + # If there is a non-empty error log, and "single_module" + # appears in it, assume the flag caused a linker warning + if test -s conftest.err && $GREP single_module conftest.err; then + cat conftest.err >&AS_MESSAGE_LOG_FD + # Otherwise, if the output was created with a 0 exit code from + # the compiler, it worked. + elif test -f libconftest.dylib && test 0 = "$_lt_result"; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi]) + + AC_CACHE_CHECK([for -exported_symbols_list linker flag], + [lt_cv_ld_exported_symbols_list], + [lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [lt_cv_ld_exported_symbols_list=yes], + [lt_cv_ld_exported_symbols_list=no]) + LDFLAGS=$save_LDFLAGS + ]) + + AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], + [lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD + echo "$AR cr libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD + $AR cr libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD + echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD + $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -s conftest.err && $GREP force_load conftest.err; then + cat conftest.err >&AS_MESSAGE_LOG_FD + elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + ]) + case $host_os in + rhapsody* | darwin1.[[012]]) + _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[[912]]*) + _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; + 10.[[012]][[,.]]*) + _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; + 10.*|11.*) + _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test yes = "$lt_cv_apple_cc_single_mod"; then + _lt_dar_single_mod='$single_module' + fi + if test yes = "$lt_cv_ld_exported_symbols_list"; then + _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' + fi + if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac +]) + + +# _LT_DARWIN_LINKER_FEATURES([TAG]) +# --------------------------------- +# Checks for linker and compiler features on darwin +m4_defun([_LT_DARWIN_LINKER_FEATURES], +[ + m4_require([_LT_REQUIRED_DARWIN_CHECKS]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_automatic, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + if test yes = "$lt_cv_ld_force_load"; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes], + [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes]) + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='' + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)=$_lt_dar_allow_undefined + case $cc_basename in + ifort*|nagfor*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test yes = "$_lt_dar_can_shared"; then + output_verbose_link_cmd=func_echo_all + _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" + _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" + _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" + m4_if([$1], [CXX], +[ if test yes != "$lt_cv_apple_cc_single_mod"; then + _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" + fi +],[]) + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi +]) + +# _LT_SYS_MODULE_PATH_AIX([TAGNAME]) +# ---------------------------------- +# Links a minimal program and checks the executable +# for the system default hardcoded library path. In most cases, +# this is /usr/lib:/lib, but when the MPI compilers are used +# the location of the communication and MPI libs are included too. +# If we don't find anything, use the default library path according +# to the aix ld manual. +# Store the results from the different compilers for each TAGNAME. +# Allow to override them for all tags through lt_cv_aix_libpath. +m4_defun([_LT_SYS_MODULE_PATH_AIX], +[m4_require([_LT_DECL_SED])dnl +if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], + [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ + lt_aix_libpath_sed='[ + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }]' + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi],[]) + if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=/usr/lib:/lib + fi + ]) + aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) +fi +])# _LT_SYS_MODULE_PATH_AIX + + +# _LT_SHELL_INIT(ARG) +# ------------------- +m4_define([_LT_SHELL_INIT], +[m4_divert_text([M4SH-INIT], [$1 +])])# _LT_SHELL_INIT + + + +# _LT_PROG_ECHO_BACKSLASH +# ----------------------- +# Find how we can fake an echo command that does not interpret backslash. +# In particular, with Autoconf 2.60 or later we add some code to the start +# of the generated configure script that will find a shell with a builtin +# printf (that we can use as an echo command). +m4_defun([_LT_PROG_ECHO_BACKSLASH], +[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +AC_MSG_CHECKING([how to print strings]) +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$[]1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + +case $ECHO in + printf*) AC_MSG_RESULT([printf]) ;; + print*) AC_MSG_RESULT([print -r]) ;; + *) AC_MSG_RESULT([cat]) ;; +esac + +m4_ifdef([_AS_DETECT_SUGGESTED], +[_AS_DETECT_SUGGESTED([ + test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test "X`printf %s $ECHO`" = "X$ECHO" \ + || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) + +_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) +_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) +])# _LT_PROG_ECHO_BACKSLASH + + +# _LT_WITH_SYSROOT +# ---------------- +AC_DEFUN([_LT_WITH_SYSROOT], +[AC_MSG_CHECKING([for sysroot]) +AC_ARG_WITH([sysroot], +[AS_HELP_STRING([--with-sysroot@<:@=DIR@:>@], + [Search for dependent libraries within DIR (or the compiler's sysroot + if not specified).])], +[], [with_sysroot=no]) + +dnl lt_sysroot will always be passed unquoted. We quote it here +dnl in case the user passed a directory name. +lt_sysroot= +case $with_sysroot in #( + yes) + if test yes = "$GCC"; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + AC_MSG_RESULT([$with_sysroot]) + AC_MSG_ERROR([The sysroot must be an absolute path.]) + ;; +esac + + AC_MSG_RESULT([${lt_sysroot:-no}]) +_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl +[dependent libraries, and where our libraries should be installed.])]) + +# _LT_ENABLE_LOCK +# --------------- +m4_defun([_LT_ENABLE_LOCK], +[AC_ARG_ENABLE([libtool-lock], + [AS_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test no = "$enable_libtool_lock" || enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out what ABI is being produced by ac_compile, and set mode + # options accordingly. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE=32 + ;; + *ELF-64*) + HPUX_IA64_MODE=64 + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + if test yes = "$lt_cv_prog_gnu_ld"; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +mips64*-*linux*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + emul=elf + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + emul="${emul}32" + ;; + *64-bit*) + emul="${emul}64" + ;; + esac + case `/usr/bin/file conftest.$ac_objext` in + *MSB*) + emul="${emul}btsmip" + ;; + *LSB*) + emul="${emul}ltsmip" + ;; + esac + case `/usr/bin/file conftest.$ac_objext` in + *N32*) + emul="${emul}n32" + ;; + esac + LD="${LD-ld} -m $emul" + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. Note that the listed cases only cover the + # situations where additional linker options are needed (such as when + # doing 32-bit compilation for a host where ld defaults to 64-bit, or + # vice versa); the common cases where no linker options are needed do + # not appear in the list. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + case `/usr/bin/file conftest.o` in + *x86-64*) + LD="${LD-ld} -m elf32_x86_64" + ;; + *) + LD="${LD-ld} -m elf_i386" + ;; + esac + ;; + powerpc64le-*linux*) + LD="${LD-ld} -m elf32lppclinux" + ;; + powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + powerpcle-*linux*) + LD="${LD-ld} -m elf64lppc" + ;; + powerpc-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS -belf" + AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, + [AC_LANG_PUSH(C) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) + AC_LANG_POP]) + if test yes != "$lt_cv_cc_needs_belf"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS=$SAVE_CFLAGS + fi + ;; +*-*solaris*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) + case $host in + i?86-*-solaris*|x86_64-*-solaris*) + LD="${LD-ld} -m elf_x86_64" + ;; + sparc*-*-solaris*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + # GNU ld 2.21 introduced _sol2 emulations. Use them if available. + if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then + LD=${LD-ld}_sol2 + fi + ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks=$enable_libtool_lock +])# _LT_ENABLE_LOCK + + +# _LT_PROG_AR +# ----------- +m4_defun([_LT_PROG_AR], +[AC_CHECK_TOOLS(AR, [ar], false) +: ${AR=ar} +: ${AR_FLAGS=cr} +_LT_DECL([], [AR], [1], [The archiver]) +_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) + +AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], + [lt_cv_ar_at_file=no + AC_COMPILE_IFELSE([AC_LANG_PROGRAM], + [echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' + AC_TRY_EVAL([lt_ar_try]) + if test 0 -eq "$ac_status"; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + AC_TRY_EVAL([lt_ar_try]) + if test 0 -ne "$ac_status"; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + ]) + ]) + +if test no = "$lt_cv_ar_at_file"; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi +_LT_DECL([], [archiver_list_spec], [1], + [How to feed a file listing to the archiver]) +])# _LT_PROG_AR + + +# _LT_CMD_OLD_ARCHIVE +# ------------------- +m4_defun([_LT_CMD_OLD_ARCHIVE], +[_LT_PROG_AR + +AC_CHECK_TOOL(STRIP, strip, :) +test -z "$STRIP" && STRIP=: +_LT_DECL([], [STRIP], [1], [A symbol stripping program]) + +AC_CHECK_TOOL(RANLIB, ranlib, :) +test -z "$RANLIB" && RANLIB=: +_LT_DECL([], [RANLIB], [1], + [Commands used to install an old-style archive]) + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + bitrig* | openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac +_LT_DECL([], [old_postinstall_cmds], [2]) +_LT_DECL([], [old_postuninstall_cmds], [2]) +_LT_TAGDECL([], [old_archive_cmds], [2], + [Commands used to build an old-style archive]) +_LT_DECL([], [lock_old_archive_extraction], [0], + [Whether to use a lock for old archive extraction]) +])# _LT_CMD_OLD_ARCHIVE + + +# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------------------- +# Check whether the given compiler option works +AC_DEFUN([_LT_COMPILER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$3" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + fi + $RM conftest* +]) + +if test yes = "[$]$2"; then + m4_if([$5], , :, [$5]) +else + m4_if([$6], , :, [$6]) +fi +])# _LT_COMPILER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) + + +# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------- +# Check whether the given linker option works +AC_DEFUN([_LT_LINKER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS $3" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&AS_MESSAGE_LOG_FD + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + else + $2=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS +]) + +if test yes = "[$]$2"; then + m4_if([$4], , :, [$4]) +else + m4_if([$5], , :, [$5]) +fi +])# _LT_LINKER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) + + +# LT_CMD_MAX_LEN +#--------------- +AC_DEFUN([LT_CMD_MAX_LEN], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +# find the maximum length of command line arguments +AC_MSG_CHECKING([the maximum length of command line arguments]) +AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl + i=0 + teststring=ABCD + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + os2*) + # The test takes a long time on OS/2. + lt_cv_sys_max_cmd_len=8192 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len" && \ + test undefined != "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test X`env echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test 17 != "$i" # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac +]) +if test -n "$lt_cv_sys_max_cmd_len"; then + AC_MSG_RESULT($lt_cv_sys_max_cmd_len) +else + AC_MSG_RESULT(none) +fi +max_cmd_len=$lt_cv_sys_max_cmd_len +_LT_DECL([], [max_cmd_len], [0], + [What is the maximum length of a command?]) +])# LT_CMD_MAX_LEN + +# Old name: +AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) + + +# _LT_HEADER_DLFCN +# ---------------- +m4_defun([_LT_HEADER_DLFCN], +[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl +])# _LT_HEADER_DLFCN + + +# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, +# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) +# ---------------------------------------------------------------- +m4_defun([_LT_TRY_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test yes = "$cross_compiling"; then : + [$4] +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +[#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisibility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +}] +_LT_EOF + if AC_TRY_EVAL(ac_link) && test -s "conftest$ac_exeext" 2>/dev/null; then + (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) $1 ;; + x$lt_dlneed_uscore) $2 ;; + x$lt_dlunknown|x*) $3 ;; + esac + else : + # compilation failed + $3 + fi +fi +rm -fr conftest* +])# _LT_TRY_DLOPEN_SELF + + +# LT_SYS_DLOPEN_SELF +# ------------------ +AC_DEFUN([LT_SYS_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test yes != "$enable_dlopen"; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen=load_add_on + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen=LoadLibrary + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen=dlopen + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],[ + lt_cv_dlopen=dyld + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ]) + ;; + + tpf*) + # Don't try to run any link tests for TPF. We know it's impossible + # because TPF is a cross-compiler, and we know how we open DSOs. + lt_cv_dlopen=dlopen + lt_cv_dlopen_libs= + lt_cv_dlopen_self=no + ;; + + *) + AC_CHECK_FUNC([shl_load], + [lt_cv_dlopen=shl_load], + [AC_CHECK_LIB([dld], [shl_load], + [lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld], + [AC_CHECK_FUNC([dlopen], + [lt_cv_dlopen=dlopen], + [AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl], + [AC_CHECK_LIB([svld], [dlopen], + [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld], + [AC_CHECK_LIB([dld], [dld_link], + [lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld]) + ]) + ]) + ]) + ]) + ]) + ;; + esac + + if test no = "$lt_cv_dlopen"; then + enable_dlopen=no + else + enable_dlopen=yes + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS=$CPPFLAGS + test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS=$LDFLAGS + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS=$LIBS + LIBS="$lt_cv_dlopen_libs $LIBS" + + AC_CACHE_CHECK([whether a program can dlopen itself], + lt_cv_dlopen_self, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, + lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) + ]) + + if test yes = "$lt_cv_dlopen_self"; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + AC_CACHE_CHECK([whether a statically linked program can dlopen itself], + lt_cv_dlopen_self_static, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, + lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) + ]) + fi + + CPPFLAGS=$save_CPPFLAGS + LDFLAGS=$save_LDFLAGS + LIBS=$save_LIBS + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi +_LT_DECL([dlopen_support], [enable_dlopen], [0], + [Whether dlopen is supported]) +_LT_DECL([dlopen_self], [enable_dlopen_self], [0], + [Whether dlopen of programs is supported]) +_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], + [Whether dlopen of statically linked programs is supported]) +])# LT_SYS_DLOPEN_SELF + +# Old name: +AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) + + +# _LT_COMPILER_C_O([TAGNAME]) +# --------------------------- +# Check to see if options -c and -o are simultaneously supported by compiler. +# This macro does not hard code the compiler like AC_PROG_CC_C_O. +m4_defun([_LT_COMPILER_C_O], +[m4_require([_LT_DECL_SED])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + fi + fi + chmod u+w . 2>&AS_MESSAGE_LOG_FD + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* +]) +_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], + [Does compiler simultaneously support -c and -o options?]) +])# _LT_COMPILER_C_O + + +# _LT_COMPILER_FILE_LOCKS([TAGNAME]) +# ---------------------------------- +# Check to see if we can do hard links to lock some files if needed +m4_defun([_LT_COMPILER_FILE_LOCKS], +[m4_require([_LT_ENABLE_LOCK])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_COMPILER_C_O([$1]) + +hard_links=nottested +if test no = "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" && test no != "$need_locks"; then + # do not overwrite the value of need_locks provided by the user + AC_MSG_CHECKING([if we can lock with hard links]) + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + AC_MSG_RESULT([$hard_links]) + if test no = "$hard_links"; then + AC_MSG_WARN(['$CC' does not support '-c -o', so 'make -j' may be unsafe]) + need_locks=warn + fi +else + need_locks=no +fi +_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) +])# _LT_COMPILER_FILE_LOCKS + + +# _LT_CHECK_OBJDIR +# ---------------- +m4_defun([_LT_CHECK_OBJDIR], +[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], +[rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null]) +objdir=$lt_cv_objdir +_LT_DECL([], [objdir], [0], + [The name of the directory that contains temporary libtool files])dnl +m4_pattern_allow([LT_OBJDIR])dnl +AC_DEFINE_UNQUOTED([LT_OBJDIR], "$lt_cv_objdir/", + [Define to the sub-directory where libtool stores uninstalled libraries.]) +])# _LT_CHECK_OBJDIR + + +# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) +# -------------------------------------- +# Check hardcoding attributes. +m4_defun([_LT_LINKER_HARDCODE_LIBPATH], +[AC_MSG_CHECKING([how to hardcode library paths into programs]) +_LT_TAGVAR(hardcode_action, $1)= +if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || + test -n "$_LT_TAGVAR(runpath_var, $1)" || + test yes = "$_LT_TAGVAR(hardcode_automatic, $1)"; then + + # We can hardcode non-existent directories. + if test no != "$_LT_TAGVAR(hardcode_direct, $1)" && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" && + test no != "$_LT_TAGVAR(hardcode_minus_L, $1)"; then + # Linking always hardcodes the temporary library directory. + _LT_TAGVAR(hardcode_action, $1)=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + _LT_TAGVAR(hardcode_action, $1)=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + _LT_TAGVAR(hardcode_action, $1)=unsupported +fi +AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) + +if test relink = "$_LT_TAGVAR(hardcode_action, $1)" || + test yes = "$_LT_TAGVAR(inherit_rpath, $1)"; then + # Fast installation is not supported + enable_fast_install=no +elif test yes = "$shlibpath_overrides_runpath" || + test no = "$enable_shared"; then + # Fast installation is not necessary + enable_fast_install=needless +fi +_LT_TAGDECL([], [hardcode_action], [0], + [How to hardcode a shared library path into an executable]) +])# _LT_LINKER_HARDCODE_LIBPATH + + +# _LT_CMD_STRIPLIB +# ---------------- +m4_defun([_LT_CMD_STRIPLIB], +[m4_require([_LT_DECL_EGREP]) +striplib= +old_striplib= +AC_MSG_CHECKING([whether stripping libraries is possible]) +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + AC_MSG_RESULT([yes]) +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP"; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi + ;; + *) + AC_MSG_RESULT([no]) + ;; + esac +fi +_LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) +_LT_DECL([], [striplib], [1]) +])# _LT_CMD_STRIPLIB + + +# _LT_PREPARE_MUNGE_PATH_LIST +# --------------------------- +# Make sure func_munge_path_list() is defined correctly. +m4_defun([_LT_PREPARE_MUNGE_PATH_LIST], +[[# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x@S|@2 in + x) + ;; + *:) + eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'` \@S|@@S|@1\" + ;; + x:*) + eval @S|@1=\"\@S|@@S|@1 `$ECHO @S|@2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval @S|@1=\"\@S|@@S|@1\ `$ECHO @S|@2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval @S|@1=\"`$ECHO @S|@2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \@S|@@S|@1\" + ;; + *) + eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'`\" + ;; + esac +} +]])# _LT_PREPARE_PATH_LIST + + +# _LT_SYS_DYNAMIC_LINKER([TAG]) +# ----------------------------- +# PORTME Fill in your ld.so characteristics +m4_defun([_LT_SYS_DYNAMIC_LINKER], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_OBJDUMP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +m4_require([_LT_PREPARE_MUNGE_PATH_LIST])dnl +AC_MSG_CHECKING([dynamic linker characteristics]) +m4_if([$1], + [], [ +if test yes = "$GCC"; then + case $host_os in + darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; + *) lt_awk_arg='/^libraries:/' ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq='s|=\([[A-Za-z]]:\)|\1|g' ;; + *) lt_sed_strip_eq='s|=/|/|g' ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary... + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + # ...but if some path component already ends with the multilib dir we assume + # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). + case "$lt_multi_os_dir; $lt_search_path_spec " in + "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) + lt_multi_os_dir= + ;; + esac + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" + elif test -n "$lt_multi_os_dir"; then + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS = " "; FS = "/|\n";} { + lt_foo = ""; + lt_count = 0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo = "/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[[lt_foo]]++; } + if (lt_freq[[lt_foo]] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's|/\([[A-Za-z]]:\)|\1|g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi]) +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=.so +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +AC_ARG_VAR([LT_SYS_LIBRARY_PATH], +[User-defined run-time library search path.]) + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='$libname$release$shared_ext$major' + ;; + +aix[[4-9]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test ia64 = "$host_cpu"; then + # AIX 5 supports IA64 + library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line '#! .'. This would cause the generated library to + # depend on '.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[[01]] | aix4.[[01]].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # Using Import Files as archive members, it is possible to support + # filename-based versioning of shared library archives on AIX. While + # this would work for both with and without runtime linking, it will + # prevent static linking of such archives. So we do filename-based + # shared library versioning with .so extension only, which is used + # when both runtime linking and shared linking is enabled. + # Unfortunately, runtime linking may impact performance, so we do + # not want this to be the default eventually. Also, we use the + # versioned .so libs for executables only if there is the -brtl + # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. + # To allow for filename-based versioning support, we need to create + # libNAME.so.V as an archive file, containing: + # *) an Import File, referring to the versioned filename of the + # archive as well as the shared archive member, telling the + # bitwidth (32 or 64) of that shared object, and providing the + # list of exported symbols of that shared object, eventually + # decorated with the 'weak' keyword + # *) the shared object with the F_LOADONLY flag set, to really avoid + # it being seen by the linker. + # At run time we better use the real file rather than another symlink, + # but for link time we create the symlink libNAME.so -> libNAME.so.V + + case $with_aix_soname,$aix_use_runtimelinking in + # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + aix,yes) # traditional libtool + dynamic_linker='AIX unversionable lib.so' + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + aix,no) # traditional AIX only + dynamic_linker='AIX lib.a[(]lib.so.V[)]' + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + ;; + svr4,*) # full svr4 only + dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)]" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,yes) # both, prefer svr4 + dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)], lib.a[(]lib.so.V[)]" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # unpreferred sharedlib libNAME.a needs extra handling + postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' + postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,no) # both, prefer aix + dynamic_linker="AIX lib.a[(]lib.so.V[)], lib.so.V[(]$shared_archive_member_spec.o[)]" + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling + postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' + postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' + ;; + esac + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='$libname$shared_ext' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[[45]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' + library_names_spec='$libname.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec=$LIB + if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' + soname_spec='$libname$release$major$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[[23]].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[[01]]* | freebsdelf3.[[01]]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ + freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=no + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + if test 32 = "$HPUX_IA64_MODE"; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + sys_lib_dlsearch_path_spec=/usr/lib/hpux32 + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + sys_lib_dlsearch_path_spec=/usr/lib/hpux64 + fi + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[[3-9]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test yes = "$lt_cv_prog_gnu_ld"; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" + sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +linux*android*) + version_type=none # Android doesn't support versioned libraries. + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext' + soname_spec='$libname$release$shared_ext' + finish_cmds= + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + dynamic_linker='Android linker' + # Don't embed -rpath directories since the linker doesn't support them. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], + [lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ + LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], + [lt_cv_shlibpath_overrides_runpath=yes])]) + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + ]) + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Ideally, we could use ldconfig to report *all* directores which are + # searched for libraries, however this is still not possible. Aside from not + # being certain /sbin/ldconfig is available, command + # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, + # even though it is searched at run-time. Try to do the best guess by + # appending ld.so.conf contents (and includes) to the search path. + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd* | bitrig*) + version_type=sunos + sys_lib_dlsearch_path_spec=/usr/lib + need_lib_prefix=no + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + need_version=no + else + need_version=yes + fi + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +os2*) + libname_spec='$name' + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + # OS/2 can only load a DLL with a base name of 8 characters or less. + soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; + v=$($ECHO $release$versuffix | tr -d .-); + n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); + $ECHO $n$v`$shared_ext' + library_names_spec='${libname}_dll.$libext' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=BEGINLIBPATH + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test yes = "$with_gnu_ld"; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec; then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' + soname_spec='$libname$shared_ext.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=sco + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test yes = "$with_gnu_ld"; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +AC_MSG_RESULT([$dynamic_linker]) +test no = "$dynamic_linker" && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test yes = "$GCC"; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then + sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec +fi + +if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then + sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec +fi + +# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... +configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec + +# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code +func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" + +# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool +configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH + +_LT_DECL([], [variables_saved_for_relink], [1], + [Variables whose values should be saved in libtool wrapper scripts and + restored at link time]) +_LT_DECL([], [need_lib_prefix], [0], + [Do we need the "lib" prefix for modules?]) +_LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) +_LT_DECL([], [version_type], [0], [Library versioning type]) +_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) +_LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) +_LT_DECL([], [shlibpath_overrides_runpath], [0], + [Is shlibpath searched before the hard-coded library search path?]) +_LT_DECL([], [libname_spec], [1], [Format of library name prefix]) +_LT_DECL([], [library_names_spec], [1], + [[List of archive names. First name is the real one, the rest are links. + The last name is the one that the linker finds with -lNAME]]) +_LT_DECL([], [soname_spec], [1], + [[The coded name of the library, if different from the real name]]) +_LT_DECL([], [install_override_mode], [1], + [Permission mode override for installation of shared libraries]) +_LT_DECL([], [postinstall_cmds], [2], + [Command to use after installation of a shared archive]) +_LT_DECL([], [postuninstall_cmds], [2], + [Command to use after uninstallation of a shared archive]) +_LT_DECL([], [finish_cmds], [2], + [Commands used to finish a libtool library installation in a directory]) +_LT_DECL([], [finish_eval], [1], + [[As "finish_cmds", except a single script fragment to be evaled but + not shown]]) +_LT_DECL([], [hardcode_into_libs], [0], + [Whether we should hardcode library paths into libraries]) +_LT_DECL([], [sys_lib_search_path_spec], [2], + [Compile-time system search path for libraries]) +_LT_DECL([sys_lib_dlsearch_path_spec], [configure_time_dlsearch_path], [2], + [Detected run-time system search path for libraries]) +_LT_DECL([], [configure_time_lt_sys_library_path], [2], + [Explicit LT_SYS_LIBRARY_PATH set during ./configure time]) +])# _LT_SYS_DYNAMIC_LINKER + + +# _LT_PATH_TOOL_PREFIX(TOOL) +# -------------------------- +# find a file program that can recognize shared library +AC_DEFUN([_LT_PATH_TOOL_PREFIX], +[m4_require([_LT_DECL_EGREP])dnl +AC_MSG_CHECKING([for $1]) +AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, +[case $MAGIC_CMD in +[[\\/*] | ?:[\\/]*]) + lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD=$MAGIC_CMD + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR +dnl $ac_dummy forces splitting on constant user-supplied paths. +dnl POSIX.2 word splitting is done only on the output of word expansions, +dnl not every word. This closes a longstanding sh security hole. + ac_dummy="m4_if([$2], , $PATH, [$2])" + for ac_dir in $ac_dummy; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$1"; then + lt_cv_path_MAGIC_CMD=$ac_dir/"$1" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD=$lt_cv_path_MAGIC_CMD + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS=$lt_save_ifs + MAGIC_CMD=$lt_save_MAGIC_CMD + ;; +esac]) +MAGIC_CMD=$lt_cv_path_MAGIC_CMD +if test -n "$MAGIC_CMD"; then + AC_MSG_RESULT($MAGIC_CMD) +else + AC_MSG_RESULT(no) +fi +_LT_DECL([], [MAGIC_CMD], [0], + [Used to examine libraries when file_magic_cmd begins with "file"])dnl +])# _LT_PATH_TOOL_PREFIX + +# Old name: +AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) + + +# _LT_PATH_MAGIC +# -------------- +# find a file program that can recognize a shared library +m4_defun([_LT_PATH_MAGIC], +[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) + else + MAGIC_CMD=: + fi +fi +])# _LT_PATH_MAGIC + + +# LT_PATH_LD +# ---------- +# find the pathname to the GNU or non-GNU linker +AC_DEFUN([LT_PATH_LD], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_PROG_ECHO_BACKSLASH])dnl + +AC_ARG_WITH([gnu-ld], + [AS_HELP_STRING([--with-gnu-ld], + [assume the C compiler uses GNU ld @<:@default=no@:>@])], + [test no = "$withval" || with_gnu_ld=yes], + [with_gnu_ld=no])dnl + +ac_prog=ld +if test yes = "$GCC"; then + # Check if gcc -print-prog-name=ld gives a path. + AC_MSG_CHECKING([for ld used by $CC]) + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return, which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [[\\/]]* | ?:[[\\/]]*) + re_direlt='/[[^/]][[^/]]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD=$ac_prog + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test yes = "$with_gnu_ld"; then + AC_MSG_CHECKING([for GNU ld]) +else + AC_MSG_CHECKING([for non-GNU ld]) +fi +AC_CACHE_VAL(lt_cv_path_LD, +[if test -z "$LD"; then + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD=$ac_dir/$ac_prog + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &1 conftest.i +cat conftest.i conftest.i >conftest2.i +: ${lt_DD:=$DD} +AC_PATH_PROGS_FEATURE_CHECK([lt_DD], [dd], +[if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then + cmp -s conftest.i conftest.out \ + && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: +fi]) +rm -f conftest.i conftest2.i conftest.out]) +])# _LT_PATH_DD + + +# _LT_CMD_TRUNCATE +# ---------------- +# find command to truncate a binary pipe +m4_defun([_LT_CMD_TRUNCATE], +[m4_require([_LT_PATH_DD]) +AC_CACHE_CHECK([how to truncate binary pipes], [lt_cv_truncate_bin], +[printf 0123456789abcdef0123456789abcdef >conftest.i +cat conftest.i conftest.i >conftest2.i +lt_cv_truncate_bin= +if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then + cmp -s conftest.i conftest.out \ + && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" +fi +rm -f conftest.i conftest2.i conftest.out +test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"]) +_LT_DECL([lt_truncate_bin], [lt_cv_truncate_bin], [1], + [Command to truncate a binary pipe]) +])# _LT_CMD_TRUNCATE + + +# _LT_CHECK_MAGIC_METHOD +# ---------------------- +# how to check for library dependencies +# -- PORTME fill in with the dynamic library characteristics +m4_defun([_LT_CHECK_MAGIC_METHOD], +[m4_require([_LT_DECL_EGREP]) +m4_require([_LT_DECL_OBJDUMP]) +AC_CACHE_CHECK([how to recognize dependent libraries], +lt_cv_deplibs_check_method, +[lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# 'unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# that responds to the $file_magic_cmd with a given extended regex. +# If you have 'file' or equivalent on your system and you're not sure +# whether 'pass_all' will *always* work, you probably want this one. + +case $host_os in +aix[[4-9]]*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[[45]]*) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. + if ( file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[[3-9]]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd* | bitrig*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +os2*) + lt_cv_deplibs_check_method=pass_all + ;; +esac +]) + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + +_LT_DECL([], [deplibs_check_method], [1], + [Method to check whether dependent libraries are shared objects]) +_LT_DECL([], [file_magic_cmd], [1], + [Command to use when deplibs_check_method = "file_magic"]) +_LT_DECL([], [file_magic_glob], [1], + [How to find potential files when deplibs_check_method = "file_magic"]) +_LT_DECL([], [want_nocaseglob], [1], + [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) +])# _LT_CHECK_MAGIC_METHOD + + +# LT_PATH_NM +# ---------- +# find the pathname to a BSD- or MS-compatible name lister +AC_DEFUN([LT_PATH_NM], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, +[if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM=$NM +else + lt_nm_to_check=${ac_tool_prefix}nm + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + tmp_nm=$ac_dir/$lt_tmp_nm + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the 'sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty + case $build_os in + mingw*) lt_bad_file=conftest.nm/nofile ;; + *) lt_bad_file=/dev/null ;; + esac + case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in + *$lt_bad_file* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break 2 + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break 2 + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS=$lt_save_ifs + done + : ${lt_cv_path_NM=no} +fi]) +if test no != "$lt_cv_path_NM"; then + NM=$lt_cv_path_NM +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) + case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols -headers" + ;; + *) + DUMPBIN=: + ;; + esac + fi + AC_SUBST([DUMPBIN]) + if test : != "$DUMPBIN"; then + NM=$DUMPBIN + fi +fi +test -z "$NM" && NM=nm +AC_SUBST([NM]) +_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl + +AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], + [lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) + cat conftest.out >&AS_MESSAGE_LOG_FD + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest*]) +])# LT_PATH_NM + +# Old names: +AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) +AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_PROG_NM], []) +dnl AC_DEFUN([AC_PROG_NM], []) + +# _LT_CHECK_SHAREDLIB_FROM_LINKLIB +# -------------------------------- +# how to determine the name of the shared library +# associated with a specific link library. +# -- PORTME fill in with the dynamic library characteristics +m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], +[m4_require([_LT_DECL_EGREP]) +m4_require([_LT_DECL_OBJDUMP]) +m4_require([_LT_DECL_DLLTOOL]) +AC_CACHE_CHECK([how to associate runtime and link libraries], +lt_cv_sharedlib_from_linklib_cmd, +[lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh; + # decide which one to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd=$ECHO + ;; +esac +]) +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + +_LT_DECL([], [sharedlib_from_linklib_cmd], [1], + [Command to associate shared and link libraries]) +])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB + + +# _LT_PATH_MANIFEST_TOOL +# ---------------------- +# locate the manifest tool +m4_defun([_LT_PATH_MANIFEST_TOOL], +[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], + [lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&AS_MESSAGE_LOG_FD + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest*]) +if test yes != "$lt_cv_path_mainfest_tool"; then + MANIFEST_TOOL=: +fi +_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl +])# _LT_PATH_MANIFEST_TOOL + + +# _LT_DLL_DEF_P([FILE]) +# --------------------- +# True iff FILE is a Windows DLL '.def' file. +# Keep in sync with func_dll_def_p in the libtool script +AC_DEFUN([_LT_DLL_DEF_P], +[dnl + test DEF = "`$SED -n dnl + -e '\''s/^[[ ]]*//'\'' dnl Strip leading whitespace + -e '\''/^\(;.*\)*$/d'\'' dnl Delete empty lines and comments + -e '\''s/^\(EXPORTS\|LIBRARY\)\([[ ]].*\)*$/DEF/p'\'' dnl + -e q dnl Only consider the first "real" line + $1`" dnl +])# _LT_DLL_DEF_P + + +# LT_LIB_M +# -------- +# check for math library +AC_DEFUN([LT_LIB_M], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +LIBM= +case $host in +*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) + # These system don't have libm, or don't need it + ;; +*-ncr-sysv4.3*) + AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM=-lmw) + AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") + ;; +*) + AC_CHECK_LIB(m, cos, LIBM=-lm) + ;; +esac +AC_SUBST([LIBM]) +])# LT_LIB_M + +# Old name: +AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_CHECK_LIBM], []) + + +# _LT_COMPILER_NO_RTTI([TAGNAME]) +# ------------------------------- +m4_defun([_LT_COMPILER_NO_RTTI], +[m4_require([_LT_TAG_COMPILER])dnl + +_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + +if test yes = "$GCC"; then + case $cc_basename in + nvcc*) + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; + *) + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; + esac + + _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], + lt_cv_prog_compiler_rtti_exceptions, + [-fno-rtti -fno-exceptions], [], + [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) +fi +_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], + [Compiler flag to turn off builtin functions]) +])# _LT_COMPILER_NO_RTTI + + +# _LT_CMD_GLOBAL_SYMBOLS +# ---------------------- +m4_defun([_LT_CMD_GLOBAL_SYMBOLS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([LT_PATH_NM])dnl +AC_REQUIRE([LT_PATH_LD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_TAG_COMPILER])dnl + +# Check for command to grab the raw symbol name followed by C symbol from nm. +AC_MSG_CHECKING([command to parse $NM output from $compiler object]) +AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], +[ +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[[BCDEGRST]]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[[BCDT]]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[[ABCDGISTW]]' + ;; +hpux*) + if test ia64 = "$host_cpu"; then + symcode='[[ABCDEGRST]]' + fi + ;; +irix* | nonstopux*) + symcode='[[BCDEGRST]]' + ;; +osf*) + symcode='[[BCDEGQRST]]' + ;; +solaris*) + symcode='[[BDRT]]' + ;; +sco3.2v5*) + symcode='[[DT]]' + ;; +sysv4.2uw2*) + symcode='[[DT]]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[[ABDT]]' + ;; +sysv4) + symcode='[[DFNSTU]]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[[ABCDGIRSTW]]' ;; +esac + +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Gets list of data symbols to import. + lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" + # Adjust the below global symbol transforms to fixup imported variables. + lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" + lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" + lt_c_name_lib_hook="\ + -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ + -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" +else + # Disable hooks by default. + lt_cv_sys_global_symbol_to_import= + lt_cdecl_hook= + lt_c_name_hook= + lt_c_name_lib_hook= +fi + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n"\ +$lt_cdecl_hook\ +" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ +$lt_c_name_hook\ +" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" + +# Transform an extracted symbol line into symbol name with lib prefix and +# symbol address. +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ +$lt_c_name_lib_hook\ +" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function, + # D for any global variable and I for any imported variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK ['"\ +" {last_section=section; section=\$ 3};"\ +" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ +" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ +" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ +" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ +" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx]" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if AC_TRY_EVAL(ac_compile); then + # Now try to grab the symbols. + nlist=conftest.nm + $ECHO "$as_me:$LINENO: $NM conftest.$ac_objext | $lt_cv_sys_global_symbol_pipe > $nlist" >&AS_MESSAGE_LOG_FD + if eval "$NM" conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist 2>&AS_MESSAGE_LOG_FD && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT@&t@_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT@&t@_DLSYM_CONST +#else +# define LT@&t@_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT@&t@_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[[]] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS=conftstm.$ac_objext + CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" + if AC_TRY_EVAL(ac_link) && test -s conftest$ac_exeext; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD + fi + else + echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test yes = "$pipe_works"; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done +]) +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + AC_MSG_RESULT(failed) +else + AC_MSG_RESULT(ok) +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + +_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], + [Take the output of nm and produce a listing of raw symbols and C names]) +_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], + [Transform the output of nm in a proper C declaration]) +_LT_DECL([global_symbol_to_import], [lt_cv_sys_global_symbol_to_import], [1], + [Transform the output of nm into a list of symbols to manually relocate]) +_LT_DECL([global_symbol_to_c_name_address], + [lt_cv_sys_global_symbol_to_c_name_address], [1], + [Transform the output of nm in a C name address pair]) +_LT_DECL([global_symbol_to_c_name_address_lib_prefix], + [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], + [Transform the output of nm in a C name address pair when lib prefix is needed]) +_LT_DECL([nm_interface], [lt_cv_nm_interface], [1], + [The name lister interface]) +_LT_DECL([], [nm_file_list_spec], [1], + [Specify filename containing input files for $NM]) +]) # _LT_CMD_GLOBAL_SYMBOLS + + +# _LT_COMPILER_PIC([TAGNAME]) +# --------------------------- +m4_defun([_LT_COMPILER_PIC], +[m4_require([_LT_TAG_COMPILER])dnl +_LT_TAGVAR(lt_prog_compiler_wl, $1)= +_LT_TAGVAR(lt_prog_compiler_pic, $1)= +_LT_TAGVAR(lt_prog_compiler_static, $1)= + +m4_if([$1], [CXX], [ + # C++ specific cases for pic, static, wl, etc. + if test yes = "$GXX"; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the '-m68020' flag to GCC prevents building anything better, + # like '-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + case $host_os in + os2*) + _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' + ;; + esac + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + _LT_TAGVAR(lt_prog_compiler_static, $1)= + ;; + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + case $host_os in + aix[[4-9]]*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + dgux*) + case $cc_basename in + ec++*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' + if test ia64 != "$host_cpu"; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + fi + ;; + aCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64, which still supported -KPIC. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) + # IBM XL 8.0, 9.0 on PPC and BlueGene + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd* | netbsdelf*-gnu) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + cxx*) + # Digital/Compaq C++ + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + lcc*) + # Lucid + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +], +[ + if test yes = "$GCC"; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the '-m68020' flag to GCC prevents building anything better, + # like '-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + case $host_os in + os2*) + _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' + ;; + esac + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + _LT_TAGVAR(lt_prog_compiler_static, $1)= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' + if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)" + fi + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + case $cc_basename in + nagfor*) + # NAG Fortran compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + case $host_os in + os2*) + _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' + ;; + esac + ;; + + hpux9* | hpux10* | hpux11*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC (with -KPIC) is the default. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + # old Intel for x86_64, which still supported -KPIC. + ecc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # flang / f18. f95 an alias for gfortran or flang on Debian + flang* | f18* | f95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' + _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' + ;; + nagfor*) + # NAG Fortran compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + tcc*) + # Fabrice Bellard et al's Tiny C Compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + ccc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All Alpha code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='' + ;; + *Sun\ F* | *Sun*Fortran*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + *Sun\ C*) + # Sun C 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + *Intel*\ [[CF]]*Compiler*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + *Portland\ Group*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + esac + ;; + + newsos6) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All OSF/1 code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + rdos*) + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + solaris*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; + *) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; + esac + ;; + + sunos4*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + unicos*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + + uts4*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +]) +case $host_os in + # For platforms that do not support PIC, -DPIC is meaningless: + *djgpp*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" + ;; +esac + +AC_CACHE_CHECK([for $compiler option to produce PIC], + [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) +_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then + _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], + [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], + [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], + [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in + "" | " "*) ;; + *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; + esac], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) +fi +_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], + [Additional compiler flags for building library objects]) + +_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], + [How to pass a linker flag through the compiler]) +# +# Check to make sure the static flag actually works. +# +wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" +_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], + _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), + $lt_tmp_static_flag, + [], + [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) +_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], + [Compiler flag to prevent dynamic linking]) +])# _LT_COMPILER_PIC + + +# _LT_LINKER_SHLIBS([TAGNAME]) +# ---------------------------- +# See if the linker supports building shared libraries. +m4_defun([_LT_LINKER_SHLIBS], +[AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +m4_require([_LT_PATH_MANIFEST_TOOL])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +m4_if([$1], [CXX], [ + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + case $host_os in + aix[[4-9]]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to GNU nm, but means don't demangle to AIX nm. + # Without the "-l" option, or with the "-B" option, AIX nm treats + # weak defined symbols like other global defined symbols, whereas + # GNU nm marks them as "W". + # While the 'weak' keyword is ignored in the Export File, we need + # it in the Import File for the 'aix-soname' feature, so we have + # to replace the "-B" option with "-P" for AIX nm. + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + _LT_TAGVAR(export_symbols_cmds, $1)=$ltdll_cmds + ;; + cygwin* | mingw* | cegcc*) + case $cc_basename in + cl*) + _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] + ;; + esac + ;; + linux* | k*bsd*-gnu | gnu*) + _LT_TAGVAR(link_all_deplibs, $1)=no + ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac +], [ + runpath_var= + _LT_TAGVAR(allow_undefined_flag, $1)= + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(archive_cmds, $1)= + _LT_TAGVAR(archive_expsym_cmds, $1)= + _LT_TAGVAR(compiler_needs_object, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(hardcode_automatic, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(hardcode_libdir_separator, $1)= + _LT_TAGVAR(hardcode_minus_L, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_TAGVAR(inherit_rpath, $1)=no + _LT_TAGVAR(link_all_deplibs, $1)=unknown + _LT_TAGVAR(module_cmds, $1)= + _LT_TAGVAR(module_expsym_cmds, $1)= + _LT_TAGVAR(old_archive_from_new_cmds, $1)= + _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= + _LT_TAGVAR(thread_safe_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + _LT_TAGVAR(include_expsyms, $1)= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ' (' and ')$', so one must not match beginning or + # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', + # as well as any symbol that contains 'd'. + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. +dnl Note also adjust exclude_expsyms for C++ above. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test yes != "$GCC"; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd* | bitrig*) + with_gnu_ld=no + ;; + linux* | k*bsd*-gnu | gnu*) + _LT_TAGVAR(link_all_deplibs, $1)=no + ;; + esac + + _LT_TAGVAR(ld_shlibs, $1)=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test yes = "$with_gnu_ld"; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; + *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test yes = "$lt_use_gnu_ld_interface"; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='$wl' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + supports_anon_versioning=no + case `$LD -v | $SED -e 's/([^)]\+)\s\+//' 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[[3-9]]*) + # On AIX/PPC, the GNU linker is very broken + if test ia64 != "$host_cpu"; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file, use it as + # is; otherwise, prepend EXPORTS... + _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + haiku*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + os2*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + shrext_cmds=.dll + _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test linux-dietlibc = "$host_os"; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test no = "$tmp_diet" + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + _LT_TAGVAR(whole_archive_flag_spec, $1)= + tmp_sharedflag='--shared' ;; + nagfor*) # NAGFOR 5.3 + tmp_sharedflag='-Wl,-shared' ;; + xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + + if test yes = "$supports_anon_versioning"; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + tcc*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='-rdynamic' + ;; + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test yes = "$supports_anon_versioning"; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + sunos4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + + if test no = "$_LT_TAGVAR(ld_shlibs, $1)"; then + runpath_var= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + _LT_TAGVAR(hardcode_direct, $1)=unsupported + fi + ;; + + aix[[4-9]]*) + if test ia64 = "$host_cpu"; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag= + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to GNU nm, but means don't demangle to AIX nm. + # Without the "-l" option, or with the "-B" option, AIX nm treats + # weak defined symbols like other global defined symbols, whereas + # GNU nm marks them as "W". + # While the 'weak' keyword is ignored in the Export File, we need + # it in the Import File for the 'aix-soname' feature, so we have + # to replace the "-B" option with "-P" for AIX nm. + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # have runtime linking enabled, and use it for executables. + # For shared libraries, we enable/disable runtime linking + # depending on the kind of the shared library created - + # when "with_aix_soname,aix_use_runtimelinking" is: + # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables + # "aix,yes" lib.so shared, rtl:yes, for executables + # lib.a static archive + # "both,no" lib.so.V(shr.o) shared, rtl:yes + # lib.a(lib.so.V) shared, rtl:no, for executables + # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a(lib.so.V) shared, rtl:no + # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a static archive + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then + aix_use_runtimelinking=yes + break + fi + done + if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then + # With aix-soname=svr4, we create the lib.so.V shared archives only, + # so we don't have lib.a shared libs to link our executables. + # We have to force runtime linking in this case. + aix_use_runtimelinking=yes + LDFLAGS="$LDFLAGS -Wl,-brtl" + fi + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='$wl-f,' + case $with_aix_soname,$aix_use_runtimelinking in + aix,*) ;; # traditional, no import file + svr4,* | *,yes) # use import file + # The Import File defines what to hardcode. + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=no + ;; + esac + + if test yes = "$GCC"; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`$CC -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + ;; + esac + shared_flag='-shared' + if test yes = "$aix_use_runtimelinking"; then + shared_flag="$shared_flag "'$wl-G' + fi + # Need to ensure runtime linking is disabled for the traditional + # shared library, or the linker may eventually find shared libraries + # /with/ Import File - we do not want to mix them. + shared_flag_aix='-shared' + shared_flag_svr4='-shared $wl-G' + else + # not using gcc + if test ia64 = "$host_cpu"; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test yes = "$aix_use_runtimelinking"; then + shared_flag='$wl-G' + else + shared_flag='$wl-bM:SRE' + fi + shared_flag_aix='$wl-bM:SRE' + shared_flag_svr4='$wl-G' + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag + else + if test ia64 = "$host_cpu"; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' + if test yes = "$with_gnu_ld"; then + # We only use this code for GNU lds that support --whole-archive. + _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' + # -brtl affects multiple linker settings, -berok does not and is overridden later + compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' + if test svr4 != "$with_aix_soname"; then + # This is similar to how AIX traditionally builds its shared libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' + fi + if test aix != "$with_aix_soname"; then + _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' + else + # used by -dlpreopen to get the symbols + _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' + fi + _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + bsdi[[45]]*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' + _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then + cp "$export_symbols" "$output_objdir/$soname.def"; + echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; + else + $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' + _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile=$lt_outputfile.exe + lt_tool_outputfile=$lt_tool_outputfile.exe + ;; + esac~ + if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + # FIXME: Should let the user specify the lib program. + _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + esac + ;; + + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + dgux*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2.*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + hpux9*) + if test yes = "$GCC"; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' + ;; + + hpux10*) + if test yes,no = "$GCC,$with_gnu_ld"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test no = "$with_gnu_ld"; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + fi + ;; + + hpux11*) + if test yes,no = "$GCC,$with_gnu_ld"; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + m4_if($1, [], [ + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + _LT_LINKER_OPTION([if $CC understands -b], + _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], + [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], + [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], + [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) + ;; + esac + fi + if test no = "$with_gnu_ld"; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test yes = "$GCC"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], + [lt_cv_irix_exported_symbol], + [save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" + AC_LINK_IFELSE( + [AC_LANG_SOURCE( + [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], + [C++], [[int foo (void) { return 0; }]], + [Fortran 77], [[ + subroutine foo + end]], + [Fortran], [[ + subroutine foo + end]])])], + [lt_cv_irix_exported_symbol=yes], + [lt_cv_irix_exported_symbol=no]) + LDFLAGS=$save_LDFLAGS]) + if test yes = "$lt_cv_irix_exported_symbol"; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' + fi + _LT_TAGVAR(link_all_deplibs, $1)=no + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + linux*) + case $cc_basename in + tcc*) + # Fabrice Bellard et al's Tiny C Compiler + _LT_TAGVAR(ld_shlibs, $1)=yes + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + newsos6) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *nto* | *qnx*) + ;; + + openbsd* | bitrig*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' + fi + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + os2*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + shrext_cmds=.dll + _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + + osf3*) + if test yes = "$GCC"; then + _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test yes = "$GCC"; then + _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + solaris*) + _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' + if test yes = "$GCC"; then + wlarc='$wl' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + _LT_TAGVAR(archive_cmds, $1)='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='$wl' + _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands '-z linker_flag'. GCC discards it without '$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test yes = "$GCC"; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + fi + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + sunos4*) + if test sequent = "$host_vendor"; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4) + case $host_vendor in + sni) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' + _LT_TAGVAR(hardcode_direct, $1)=no + ;; + motorola) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4.3*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + _LT_TAGVAR(ld_shlibs, $1)=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + if test yes = "$GCC"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We CANNOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' + runpath_var='LD_RUN_PATH' + + if test yes = "$GCC"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + if test sni = "$host_vendor"; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Blargedynsym' + ;; + esac + fi + fi +]) +AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) +test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no + +_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld + +_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl +_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl +_LT_DECL([], [extract_expsyms_cmds], [2], + [The commands to extract the exported symbol list from a shared archive]) + +# +# Do we need to explicitly link libc? +# +case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in +x|xyes) + # Assume -lc should be added + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + + if test yes,yes = "$GCC,$enable_shared"; then + case $_LT_TAGVAR(archive_cmds, $1) in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + AC_CACHE_CHECK([whether -lc should be explicitly linked in], + [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), + [$RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if AC_TRY_EVAL(ac_compile) 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) + pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) + _LT_TAGVAR(allow_undefined_flag, $1)= + if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) + then + lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no + else + lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes + fi + _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + ]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) + ;; + esac + fi + ;; +esac + +_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], + [Whether or not to add -lc for building shared libraries]) +_LT_TAGDECL([allow_libtool_libs_with_static_runtimes], + [enable_shared_with_static_runtimes], [0], + [Whether or not to disallow shared libs when runtime libs are static]) +_LT_TAGDECL([], [export_dynamic_flag_spec], [1], + [Compiler flag to allow reflexive dlopens]) +_LT_TAGDECL([], [whole_archive_flag_spec], [1], + [Compiler flag to generate shared objects directly from archives]) +_LT_TAGDECL([], [compiler_needs_object], [1], + [Whether the compiler copes with passing no objects directly]) +_LT_TAGDECL([], [old_archive_from_new_cmds], [2], + [Create an old-style archive from a shared archive]) +_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], + [Create a temporary old-style archive to link instead of a shared archive]) +_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) +_LT_TAGDECL([], [archive_expsym_cmds], [2]) +_LT_TAGDECL([], [module_cmds], [2], + [Commands used to build a loadable module if different from building + a shared archive.]) +_LT_TAGDECL([], [module_expsym_cmds], [2]) +_LT_TAGDECL([], [with_gnu_ld], [1], + [Whether we are building with GNU ld or not]) +_LT_TAGDECL([], [allow_undefined_flag], [1], + [Flag that allows shared libraries with undefined symbols to be built]) +_LT_TAGDECL([], [no_undefined_flag], [1], + [Flag that enforces no undefined symbols]) +_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], + [Flag to hardcode $libdir into a binary during linking. + This must work even if $libdir does not exist]) +_LT_TAGDECL([], [hardcode_libdir_separator], [1], + [Whether we need a single "-rpath" flag with a separated argument]) +_LT_TAGDECL([], [hardcode_direct], [0], + [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes + DIR into the resulting binary]) +_LT_TAGDECL([], [hardcode_direct_absolute], [0], + [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes + DIR into the resulting binary and the resulting library dependency is + "absolute", i.e impossible to change by setting $shlibpath_var if the + library is relocated]) +_LT_TAGDECL([], [hardcode_minus_L], [0], + [Set to "yes" if using the -LDIR flag during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_shlibpath_var], [0], + [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_automatic], [0], + [Set to "yes" if building a shared library automatically hardcodes DIR + into the library and all subsequent libraries and executables linked + against it]) +_LT_TAGDECL([], [inherit_rpath], [0], + [Set to yes if linker adds runtime paths of dependent libraries + to runtime path list]) +_LT_TAGDECL([], [link_all_deplibs], [0], + [Whether libtool must link a program against all its dependency libraries]) +_LT_TAGDECL([], [always_export_symbols], [0], + [Set to "yes" if exported symbols are required]) +_LT_TAGDECL([], [export_symbols_cmds], [2], + [The commands to list exported symbols]) +_LT_TAGDECL([], [exclude_expsyms], [1], + [Symbols that should not be listed in the preloaded symbols]) +_LT_TAGDECL([], [include_expsyms], [1], + [Symbols that must always be exported]) +_LT_TAGDECL([], [prelink_cmds], [2], + [Commands necessary for linking programs (against libraries) with templates]) +_LT_TAGDECL([], [postlink_cmds], [2], + [Commands necessary for finishing linking programs]) +_LT_TAGDECL([], [file_list_spec], [1], + [Specify filename containing input files]) +dnl FIXME: Not yet implemented +dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], +dnl [Compiler flag to generate thread safe objects]) +])# _LT_LINKER_SHLIBS + + +# _LT_LANG_C_CONFIG([TAG]) +# ------------------------ +# Ensure that the configuration variables for a C compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to 'libtool'. +m4_defun([_LT_LANG_C_CONFIG], +[m4_require([_LT_DECL_EGREP])dnl +lt_save_CC=$CC +AC_LANG_PUSH(C) + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + +_LT_TAG_COMPILER +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + LT_SYS_DLOPEN_SELF + _LT_CMD_STRIPLIB + + # Report what library types will actually be built + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test no = "$can_build_shared" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test yes = "$enable_shared" && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[[4-9]]*) + if test ia64 != "$host_cpu"; then + case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in + yes,aix,yes) ;; # shared object as lib.so file only + yes,svr4,*) ;; # shared object as lib.so archive member only + yes,*) enable_static=no ;; # shared object in lib.a archive as well + esac + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test yes = "$enable_shared" || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_CONFIG($1) +fi +AC_LANG_POP +CC=$lt_save_CC +])# _LT_LANG_C_CONFIG + + +# _LT_LANG_CXX_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a C++ compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to 'libtool'. +m4_defun([_LT_LANG_CXX_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_PATH_MANIFEST_TOOL])dnl +if test -n "$CXX" && ( test no != "$CXX" && + ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || + (test g++ != "$CXX"))); then + AC_PROG_CXXCPP +else + _lt_caught_CXX_error=yes +fi + +AC_LANG_PUSH(C++) +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(compiler_needs_object, $1)=no +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test yes != "$_lt_caught_CXX_error"; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + CFLAGS=$CXXFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test yes = "$GXX"; then + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' + else + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + fi + + if test yes = "$GXX"; then + # Set up default GNU C++ configuration + + LT_PATH_LD + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test yes = "$with_gnu_ld"; then + _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='$wl' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) + _LT_TAGVAR(ld_shlibs, $1)=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aix[[4-9]]*) + if test ia64 = "$host_cpu"; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag= + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # have runtime linking enabled, and use it for executables. + # For shared libraries, we enable/disable runtime linking + # depending on the kind of the shared library created - + # when "with_aix_soname,aix_use_runtimelinking" is: + # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables + # "aix,yes" lib.so shared, rtl:yes, for executables + # lib.a static archive + # "both,no" lib.so.V(shr.o) shared, rtl:yes + # lib.a(lib.so.V) shared, rtl:no, for executables + # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a(lib.so.V) shared, rtl:no + # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a static archive + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then + # With aix-soname=svr4, we create the lib.so.V shared archives only, + # so we don't have lib.a shared libs to link our executables. + # We have to force runtime linking in this case. + aix_use_runtimelinking=yes + LDFLAGS="$LDFLAGS -Wl,-brtl" + fi + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='$wl-f,' + case $with_aix_soname,$aix_use_runtimelinking in + aix,*) ;; # no import file + svr4,* | *,yes) # use import file + # The Import File defines what to hardcode. + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=no + ;; + esac + + if test yes = "$GXX"; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`$CC -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + esac + shared_flag='-shared' + if test yes = "$aix_use_runtimelinking"; then + shared_flag=$shared_flag' $wl-G' + fi + # Need to ensure runtime linking is disabled for the traditional + # shared library, or the linker may eventually find shared libraries + # /with/ Import File - we do not want to mix them. + shared_flag_aix='-shared' + shared_flag_svr4='-shared $wl-G' + else + # not using gcc + if test ia64 = "$host_cpu"; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test yes = "$aix_use_runtimelinking"; then + shared_flag='$wl-G' + else + shared_flag='$wl-bM:SRE' + fi + shared_flag_aix='$wl-bM:SRE' + shared_flag_svr4='$wl-G' + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + # The "-G" linker flag allows undefined symbols. + _LT_TAGVAR(no_undefined_flag, $1)='-bernotok' + # Determine the default libpath from the value encoded in an empty + # executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" + + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag + else + if test ia64 = "$host_cpu"; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' + if test yes = "$with_gnu_ld"; then + # We only use this code for GNU lds that support --whole-archive. + _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' + # -brtl affects multiple linker settings, -berok does not and is overridden later + compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' + if test svr4 != "$with_aix_soname"; then + # This is similar to how AIX traditionally builds its shared + # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. + _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' + fi + if test aix != "$with_aix_soname"; then + _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' + else + # used by -dlpreopen to get the symbols + _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' + fi + _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + case $GXX,$cc_basename in + ,cl* | no,cl*) + # Native MSVC + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' + _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then + cp "$export_symbols" "$output_objdir/$soname.def"; + echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; + else + $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + # Don't use ranlib + _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' + _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile=$lt_outputfile.exe + lt_tool_outputfile=$lt_tool_outputfile.exe + ;; + esac~ + func_to_tool_file "$lt_outputfile"~ + if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # g++ + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file, use it as + # is; otherwise, prepend EXPORTS... + _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + os2*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + shrext_cmds=.dll + _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + freebsd2.*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + freebsd-elf*) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + haiku*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + hpux9*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP " \-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes = "$GXX"; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test no = "$with_gnu_ld"; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP " \-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes = "$GXX"; then + if test no = "$with_gnu_ld"; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test yes = "$GXX"; then + if test no = "$with_gnu_ld"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' + fi + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + esac + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) + _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl--rpath $wl$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + ;; + cxx*) + # Compaq C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' + _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + if test yes = "$supports_anon_versioning"; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + openbsd* | bitrig*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' + _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + case $host in + osf3*) + _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + ;; + *) + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ + $RM $lib.exp' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes,no = "$GXX,$with_gnu_ld"; then + _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' + case $host in + osf3*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + psos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(archive_cmds_need_lc,$1)=yes + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands '-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test yes,no = "$GXX,$with_gnu_ld"; then + _LT_TAGVAR(no_undefined_flag, $1)=' $wl-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + else + # g++ 2.7 appears to require '-G' NOT '-shared' on this + # platform. + _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + fi + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $wl$libdir' + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We CANNOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ + '"$_LT_TAGVAR(old_archive_cmds, $1)" + _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ + '"$_LT_TAGVAR(reload_cmds, $1)" + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + vxworks*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) + test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no + + _LT_TAGVAR(GCC, $1)=$GXX + _LT_TAGVAR(LD, $1)=$LD + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test yes != "$_lt_caught_CXX_error" + +AC_LANG_POP +])# _LT_LANG_CXX_CONFIG + + +# _LT_FUNC_STRIPNAME_CNF +# ---------------------- +# func_stripname_cnf prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# +# This function is identical to the (non-XSI) version of func_stripname, +# except this one can be used by m4 code that may be executed by configure, +# rather than the libtool script. +m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl +AC_REQUIRE([_LT_DECL_SED]) +AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) +func_stripname_cnf () +{ + case @S|@2 in + .*) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%\\\\@S|@2\$%%"`;; + *) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%@S|@2\$%%"`;; + esac +} # func_stripname_cnf +])# _LT_FUNC_STRIPNAME_CNF + + +# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) +# --------------------------------- +# Figure out "hidden" library dependencies from verbose +# compiler output when linking a shared library. +# Parse the compiler output and extract the necessary +# objects, libraries and library flags. +m4_defun([_LT_SYS_HIDDEN_LIBDEPS], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl +# Dependencies to place before and after the object being linked: +_LT_TAGVAR(predep_objects, $1)= +_LT_TAGVAR(postdep_objects, $1)= +_LT_TAGVAR(predeps, $1)= +_LT_TAGVAR(postdeps, $1)= +_LT_TAGVAR(compiler_lib_search_path, $1)= + +dnl we can't use the lt_simple_compile_test_code here, +dnl because it contains code intended for an executable, +dnl not a library. It's possible we should let each +dnl tag define a new lt_????_link_test_code variable, +dnl but it's only used here... +m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF +int a; +void foo (void) { a = 0; } +_LT_EOF +], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF +], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer*4 a + a=0 + return + end +_LT_EOF +], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer a + a=0 + return + end +_LT_EOF +], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF +public class foo { + private int a; + public void bar (void) { + a = 0; + } +}; +_LT_EOF +], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF +package foo +func foo() { +} +_LT_EOF +]) + +_lt_libdeps_save_CFLAGS=$CFLAGS +case "$CC $CFLAGS " in #( +*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +esac + +dnl Parse the compiler output and extract the necessary +dnl objects, libraries and library flags. +if AC_TRY_EVAL(ac_compile); then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case $prev$p in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test x-L = "$p" || + test x-R = "$p"; then + prev=$p + continue + fi + + # Expand the sysroot to ease extracting the directories later. + if test -z "$prev"; then + case $p in + -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; + -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; + -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; + esac + fi + case $p in + =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; + esac + if test no = "$pre_test_object_deps_done"; then + case $prev in + -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then + _LT_TAGVAR(compiler_lib_search_path, $1)=$prev$p + else + _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} $prev$p" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$_LT_TAGVAR(postdeps, $1)"; then + _LT_TAGVAR(postdeps, $1)=$prev$p + else + _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} $prev$p" + fi + fi + prev= + ;; + + *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test no = "$pre_test_object_deps_done"; then + if test -z "$_LT_TAGVAR(predep_objects, $1)"; then + _LT_TAGVAR(predep_objects, $1)=$p + else + _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" + fi + else + if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then + _LT_TAGVAR(postdep_objects, $1)=$p + else + _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling $1 test program" +fi + +$RM -f confest.$objext +CFLAGS=$_lt_libdeps_save_CFLAGS + +# PORTME: override above test on systems where it is broken +m4_if([$1], [CXX], +[case $host_os in +interix[[3-9]]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + _LT_TAGVAR(predep_objects,$1)= + _LT_TAGVAR(postdep_objects,$1)= + _LT_TAGVAR(postdeps,$1)= + ;; +esac +]) + +case " $_LT_TAGVAR(postdeps, $1) " in +*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; +esac + _LT_TAGVAR(compiler_lib_search_dirs, $1)= +if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then + _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | $SED -e 's! -L! !g' -e 's!^ !!'` +fi +_LT_TAGDECL([], [compiler_lib_search_dirs], [1], + [The directories searched by this compiler when creating a shared library]) +_LT_TAGDECL([], [predep_objects], [1], + [Dependencies to place before and after the objects being linked to + create a shared library]) +_LT_TAGDECL([], [postdep_objects], [1]) +_LT_TAGDECL([], [predeps], [1]) +_LT_TAGDECL([], [postdeps], [1]) +_LT_TAGDECL([], [compiler_lib_search_path], [1], + [The library search path used internally by the compiler when linking + a shared library]) +])# _LT_SYS_HIDDEN_LIBDEPS + + +# _LT_LANG_F77_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a Fortran 77 compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to 'libtool'. +m4_defun([_LT_LANG_F77_CONFIG], +[AC_LANG_PUSH(Fortran 77) +if test -z "$F77" || test no = "$F77"; then + _lt_disable_F77=yes +fi + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for f77 test sources. +ac_ext=f + +# Object file extension for compiled f77 test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the F77 compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test yes != "$_lt_disable_F77"; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_GCC=$GCC + lt_save_CFLAGS=$CFLAGS + CC=${F77-"f77"} + CFLAGS=$FFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + GCC=$G77 + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test no = "$can_build_shared" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test yes = "$enable_shared" && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test ia64 != "$host_cpu"; then + case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in + yes,aix,yes) ;; # shared object as lib.so file only + yes,svr4,*) ;; # shared object as lib.so archive member only + yes,*) enable_static=no ;; # shared object in lib.a archive as well + esac + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test yes = "$enable_shared" || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)=$G77 + _LT_TAGVAR(LD, $1)=$LD + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS +fi # test yes != "$_lt_disable_F77" + +AC_LANG_POP +])# _LT_LANG_F77_CONFIG + + +# _LT_LANG_FC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for a Fortran compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to 'libtool'. +m4_defun([_LT_LANG_FC_CONFIG], +[AC_LANG_PUSH(Fortran) + +if test -z "$FC" || test no = "$FC"; then + _lt_disable_FC=yes +fi + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for fc test sources. +ac_ext=${ac_fc_srcext-f} + +# Object file extension for compiled fc test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the FC compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test yes != "$_lt_disable_FC"; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_GCC=$GCC + lt_save_CFLAGS=$CFLAGS + CC=${FC-"f95"} + CFLAGS=$FCFLAGS + compiler=$CC + GCC=$ac_cv_fc_compiler_gnu + + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test no = "$can_build_shared" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test yes = "$enable_shared" && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test ia64 != "$host_cpu"; then + case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in + yes,aix,yes) ;; # shared object as lib.so file only + yes,svr4,*) ;; # shared object as lib.so archive member only + yes,*) enable_static=no ;; # shared object in lib.a archive as well + esac + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test yes = "$enable_shared" || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)=$ac_cv_fc_compiler_gnu + _LT_TAGVAR(LD, $1)=$LD + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS +fi # test yes != "$_lt_disable_FC" + +AC_LANG_POP +])# _LT_LANG_FC_CONFIG + + +# _LT_LANG_GCJ_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for the GNU Java Compiler compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to 'libtool'. +m4_defun([_LT_LANG_GCJ_CONFIG], +[AC_REQUIRE([LT_PROG_GCJ])dnl +AC_LANG_SAVE + +# Source file extension for Java test sources. +ac_ext=java + +# Object file extension for compiled Java test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="class foo {}" + +# Code to be used in simple link tests +lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC=yes +CC=${GCJ-"gcj"} +CFLAGS=$GCJFLAGS +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_TAGVAR(LD, $1)=$LD +_LT_CC_BASENAME([$compiler]) + +# GCJ did not exist at the time GCC didn't implicitly link libc in. +_LT_TAGVAR(archive_cmds_need_lc, $1)=no + +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) +fi + +AC_LANG_RESTORE + +GCC=$lt_save_GCC +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_GCJ_CONFIG + + +# _LT_LANG_GO_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for the GNU Go compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to 'libtool'. +m4_defun([_LT_LANG_GO_CONFIG], +[AC_REQUIRE([LT_PROG_GO])dnl +AC_LANG_SAVE + +# Source file extension for Go test sources. +ac_ext=go + +# Object file extension for compiled Go test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="package main; func main() { }" + +# Code to be used in simple link tests +lt_simple_link_test_code='package main; func main() { }' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC=yes +CC=${GOC-"gccgo"} +CFLAGS=$GOFLAGS +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_TAGVAR(LD, $1)=$LD +_LT_CC_BASENAME([$compiler]) + +# Go did not exist at the time GCC didn't implicitly link libc in. +_LT_TAGVAR(archive_cmds_need_lc, $1)=no + +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) +fi + +AC_LANG_RESTORE + +GCC=$lt_save_GCC +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_GO_CONFIG + + +# _LT_LANG_RC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for the Windows resource compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to 'libtool'. +m4_defun([_LT_LANG_RC_CONFIG], +[AC_REQUIRE([LT_PROG_RC])dnl +AC_LANG_SAVE + +# Source file extension for RC test sources. +ac_ext=rc + +# Object file extension for compiled RC test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' + +# Code to be used in simple link tests +lt_simple_link_test_code=$lt_simple_compile_test_code + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC= +CC=${RC-"windres"} +CFLAGS= +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_CC_BASENAME([$compiler]) +_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + +if test -n "$compiler"; then + : + _LT_CONFIG($1) +fi + +GCC=$lt_save_GCC +AC_LANG_RESTORE +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_RC_CONFIG + + +# LT_PROG_GCJ +# ----------- +AC_DEFUN([LT_PROG_GCJ], +[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], + [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], + [AC_CHECK_TOOL(GCJ, gcj,) + test set = "${GCJFLAGS+set}" || GCJFLAGS="-g -O2" + AC_SUBST(GCJFLAGS)])])[]dnl +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_GCJ], []) + + +# LT_PROG_GO +# ---------- +AC_DEFUN([LT_PROG_GO], +[AC_CHECK_TOOL(GOC, gccgo,) +]) + + +# LT_PROG_RC +# ---------- +AC_DEFUN([LT_PROG_RC], +[AC_CHECK_TOOL(RC, windres,) +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_RC], []) + + +# _LT_DECL_EGREP +# -------------- +# If we don't have a new enough Autoconf to choose the best grep +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_EGREP], +[AC_REQUIRE([AC_PROG_EGREP])dnl +AC_REQUIRE([AC_PROG_FGREP])dnl +test -z "$GREP" && GREP=grep +_LT_DECL([], [GREP], [1], [A grep program that handles long lines]) +_LT_DECL([], [EGREP], [1], [An ERE matcher]) +_LT_DECL([], [FGREP], [1], [A literal string matcher]) +dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too +AC_SUBST([GREP]) +]) + + +# _LT_DECL_OBJDUMP +# -------------- +# If we don't have a new enough Autoconf to choose the best objdump +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_OBJDUMP], +[AC_CHECK_TOOL(OBJDUMP, objdump, false) +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) +AC_SUBST([OBJDUMP]) +]) + +# _LT_DECL_DLLTOOL +# ---------------- +# Ensure DLLTOOL variable is set. +m4_defun([_LT_DECL_DLLTOOL], +[AC_CHECK_TOOL(DLLTOOL, dlltool, false) +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [1], [DLL creation program]) +AC_SUBST([DLLTOOL]) +]) + +# _LT_DECL_SED +# ------------ +# Check for a fully-functional sed program, that truncates +# as few characters as possible. Prefer GNU sed if found. +m4_defun([_LT_DECL_SED], +[AC_PROG_SED +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" +_LT_DECL([], [SED], [1], [A sed program that does not truncate output]) +_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], + [Sed that helps us avoid accidentally triggering echo(1) options like -n]) +])# _LT_DECL_SED + +m4_ifndef([AC_PROG_SED], [ +############################################################ +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_SED. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +############################################################ + +m4_defun([AC_PROG_SED], +[AC_MSG_CHECKING([for a sed that does not truncate output]) +AC_CACHE_VAL(lt_cv_path_SED, +[# Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +IFS=$as_save_IFS +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f "$lt_ac_sed" && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test 10 -lt "$lt_ac_count" && break + lt_ac_count=`expr $lt_ac_count + 1` + if test "$lt_ac_count" -gt "$lt_ac_max"; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done +]) +SED=$lt_cv_path_SED +AC_SUBST([SED]) +AC_MSG_RESULT([$SED]) +])#AC_PROG_SED +])#m4_ifndef + +# Old name: +AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_SED], []) + + +# _LT_CHECK_SHELL_FEATURES +# ------------------------ +# Find out whether the shell is Bourne or XSI compatible, +# or has some other useful features. +m4_defun([_LT_CHECK_SHELL_FEATURES], +[if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi +_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac +_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl +_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl +])# _LT_CHECK_SHELL_FEATURES + + +# _LT_PATH_CONVERSION_FUNCTIONS +# ----------------------------- +# Determine what file name conversion functions should be used by +# func_to_host_file (and, implicitly, by func_to_host_path). These are needed +# for certain cross-compile configurations and native mingw. +m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_MSG_CHECKING([how to convert $build file names to $host format]) +AC_CACHE_VAL(lt_cv_to_host_file_cmd, +[case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac +]) +to_host_file_cmd=$lt_cv_to_host_file_cmd +AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) +_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], + [0], [convert $build file names to $host format])dnl + +AC_MSG_CHECKING([how to convert $build file names to toolchain format]) +AC_CACHE_VAL(lt_cv_to_tool_file_cmd, +[#assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac +]) +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) +_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], + [0], [convert $build files to toolchain format])dnl +])# _LT_PATH_CONVERSION_FUNCTIONS diff --git a/ext/libpqxx-7.7.3/config/m4/ltoptions.m4 b/ext/libpqxx-7.7.3/config/m4/ltoptions.m4 new file mode 100644 index 000000000..94b082976 --- /dev/null +++ b/ext/libpqxx-7.7.3/config/m4/ltoptions.m4 @@ -0,0 +1,437 @@ +# Helper functions for option handling. -*- Autoconf -*- +# +# Copyright (C) 2004-2005, 2007-2009, 2011-2015 Free Software +# Foundation, Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 8 ltoptions.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) + + +# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) +# ------------------------------------------ +m4_define([_LT_MANGLE_OPTION], +[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) + + +# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) +# --------------------------------------- +# Set option OPTION-NAME for macro MACRO-NAME, and if there is a +# matching handler defined, dispatch to it. Other OPTION-NAMEs are +# saved as a flag. +m4_define([_LT_SET_OPTION], +[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl +m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), + _LT_MANGLE_DEFUN([$1], [$2]), + [m4_warning([Unknown $1 option '$2'])])[]dnl +]) + + +# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) +# ------------------------------------------------------------ +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +m4_define([_LT_IF_OPTION], +[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) + + +# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) +# ------------------------------------------------------- +# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME +# are set. +m4_define([_LT_UNLESS_OPTIONS], +[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), + [m4_define([$0_found])])])[]dnl +m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 +])[]dnl +]) + + +# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) +# ---------------------------------------- +# OPTION-LIST is a space-separated list of Libtool options associated +# with MACRO-NAME. If any OPTION has a matching handler declared with +# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about +# the unknown option and exit. +m4_defun([_LT_SET_OPTIONS], +[# Set options +m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [_LT_SET_OPTION([$1], _LT_Option)]) + +m4_if([$1],[LT_INIT],[ + dnl + dnl Simply set some default values (i.e off) if boolean options were not + dnl specified: + _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no + ]) + _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no + ]) + dnl + dnl If no reference was made to various pairs of opposing options, then + dnl we run the default mode handler for the pair. For example, if neither + dnl 'shared' nor 'disable-shared' was passed, we enable building of shared + dnl archives by default: + _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) + _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], + [_LT_ENABLE_FAST_INSTALL]) + _LT_UNLESS_OPTIONS([LT_INIT], [aix-soname=aix aix-soname=both aix-soname=svr4], + [_LT_WITH_AIX_SONAME([aix])]) + ]) +])# _LT_SET_OPTIONS + + +## --------------------------------- ## +## Macros to handle LT_INIT options. ## +## --------------------------------- ## + +# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) +# ----------------------------------------- +m4_define([_LT_MANGLE_DEFUN], +[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) + + +# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) +# ----------------------------------------------- +m4_define([LT_OPTION_DEFINE], +[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl +])# LT_OPTION_DEFINE + + +# dlopen +# ------ +LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes +]) + +AU_DEFUN([AC_LIBTOOL_DLOPEN], +[_LT_SET_OPTION([LT_INIT], [dlopen]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the 'dlopen' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) + + +# win32-dll +# --------- +# Declare package support for building win32 dll's. +LT_OPTION_DEFINE([LT_INIT], [win32-dll], +[enable_win32_dll=yes + +case $host in +*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) + AC_CHECK_TOOL(AS, as, false) + AC_CHECK_TOOL(DLLTOOL, dlltool, false) + AC_CHECK_TOOL(OBJDUMP, objdump, false) + ;; +esac + +test -z "$AS" && AS=as +_LT_DECL([], [AS], [1], [Assembler program])dnl + +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl + +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl +])# win32-dll + +AU_DEFUN([AC_LIBTOOL_WIN32_DLL], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +_LT_SET_OPTION([LT_INIT], [win32-dll]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the 'win32-dll' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) + + +# _LT_ENABLE_SHARED([DEFAULT]) +# ---------------------------- +# implement the --enable-shared flag, and supports the 'shared' and +# 'disable-shared' LT_INIT options. +# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. +m4_define([_LT_ENABLE_SHARED], +[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([shared], + [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], + [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS=$lt_save_ifs + ;; + esac], + [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) + + _LT_DECL([build_libtool_libs], [enable_shared], [0], + [Whether or not to build shared libraries]) +])# _LT_ENABLE_SHARED + +LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) +]) + +AC_DEFUN([AC_DISABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], [disable-shared]) +]) + +AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) +AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_SHARED], []) +dnl AC_DEFUN([AM_DISABLE_SHARED], []) + + + +# _LT_ENABLE_STATIC([DEFAULT]) +# ---------------------------- +# implement the --enable-static flag, and support the 'static' and +# 'disable-static' LT_INIT options. +# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. +m4_define([_LT_ENABLE_STATIC], +[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([static], + [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], + [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS=$lt_save_ifs + ;; + esac], + [enable_static=]_LT_ENABLE_STATIC_DEFAULT) + + _LT_DECL([build_old_libs], [enable_static], [0], + [Whether or not to build static libraries]) +])# _LT_ENABLE_STATIC + +LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) +]) + +AC_DEFUN([AC_DISABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], [disable-static]) +]) + +AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) +AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_STATIC], []) +dnl AC_DEFUN([AM_DISABLE_STATIC], []) + + + +# _LT_ENABLE_FAST_INSTALL([DEFAULT]) +# ---------------------------------- +# implement the --enable-fast-install flag, and support the 'fast-install' +# and 'disable-fast-install' LT_INIT options. +# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. +m4_define([_LT_ENABLE_FAST_INSTALL], +[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([fast-install], + [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], + [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS=$lt_save_ifs + ;; + esac], + [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) + +_LT_DECL([fast_install], [enable_fast_install], [0], + [Whether or not to optimize for fast installation])dnl +])# _LT_ENABLE_FAST_INSTALL + +LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) + +# Old names: +AU_DEFUN([AC_ENABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the 'fast-install' option into LT_INIT's first parameter.]) +]) + +AU_DEFUN([AC_DISABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], [disable-fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the 'disable-fast-install' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) +dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) + + +# _LT_WITH_AIX_SONAME([DEFAULT]) +# ---------------------------------- +# implement the --with-aix-soname flag, and support the `aix-soname=aix' +# and `aix-soname=both' and `aix-soname=svr4' LT_INIT options. DEFAULT +# is either `aix', `both' or `svr4'. If omitted, it defaults to `aix'. +m4_define([_LT_WITH_AIX_SONAME], +[m4_define([_LT_WITH_AIX_SONAME_DEFAULT], [m4_if($1, svr4, svr4, m4_if($1, both, both, aix))])dnl +shared_archive_member_spec= +case $host,$enable_shared in +power*-*-aix[[5-9]]*,yes) + AC_MSG_CHECKING([which variant of shared library versioning to provide]) + AC_ARG_WITH([aix-soname], + [AS_HELP_STRING([--with-aix-soname=aix|svr4|both], + [shared library versioning (aka "SONAME") variant to provide on AIX, @<:@default=]_LT_WITH_AIX_SONAME_DEFAULT[@:>@.])], + [case $withval in + aix|svr4|both) + ;; + *) + AC_MSG_ERROR([Unknown argument to --with-aix-soname]) + ;; + esac + lt_cv_with_aix_soname=$with_aix_soname], + [AC_CACHE_VAL([lt_cv_with_aix_soname], + [lt_cv_with_aix_soname=]_LT_WITH_AIX_SONAME_DEFAULT) + with_aix_soname=$lt_cv_with_aix_soname]) + AC_MSG_RESULT([$with_aix_soname]) + if test aix != "$with_aix_soname"; then + # For the AIX way of multilib, we name the shared archive member + # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', + # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. + # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, + # the AIX toolchain works better with OBJECT_MODE set (default 32). + if test 64 = "${OBJECT_MODE-32}"; then + shared_archive_member_spec=shr_64 + else + shared_archive_member_spec=shr + fi + fi + ;; +*) + with_aix_soname=aix + ;; +esac + +_LT_DECL([], [shared_archive_member_spec], [0], + [Shared archive member basename, for filename based shared library versioning on AIX])dnl +])# _LT_WITH_AIX_SONAME + +LT_OPTION_DEFINE([LT_INIT], [aix-soname=aix], [_LT_WITH_AIX_SONAME([aix])]) +LT_OPTION_DEFINE([LT_INIT], [aix-soname=both], [_LT_WITH_AIX_SONAME([both])]) +LT_OPTION_DEFINE([LT_INIT], [aix-soname=svr4], [_LT_WITH_AIX_SONAME([svr4])]) + + +# _LT_WITH_PIC([MODE]) +# -------------------- +# implement the --with-pic flag, and support the 'pic-only' and 'no-pic' +# LT_INIT options. +# MODE is either 'yes' or 'no'. If omitted, it defaults to 'both'. +m4_define([_LT_WITH_PIC], +[AC_ARG_WITH([pic], + [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@], + [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], + [lt_p=${PACKAGE-default} + case $withval in + yes|no) pic_mode=$withval ;; + *) + pic_mode=default + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for lt_pkg in $withval; do + IFS=$lt_save_ifs + if test "X$lt_pkg" = "X$lt_p"; then + pic_mode=yes + fi + done + IFS=$lt_save_ifs + ;; + esac], + [pic_mode=m4_default([$1], [default])]) + +_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl +])# _LT_WITH_PIC + +LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) + +# Old name: +AU_DEFUN([AC_LIBTOOL_PICMODE], +[_LT_SET_OPTION([LT_INIT], [pic-only]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the 'pic-only' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) + +## ----------------- ## +## LTDL_INIT Options ## +## ----------------- ## + +m4_define([_LTDL_MODE], []) +LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], + [m4_define([_LTDL_MODE], [nonrecursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [recursive], + [m4_define([_LTDL_MODE], [recursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [subproject], + [m4_define([_LTDL_MODE], [subproject])]) + +m4_define([_LTDL_TYPE], []) +LT_OPTION_DEFINE([LTDL_INIT], [installable], + [m4_define([_LTDL_TYPE], [installable])]) +LT_OPTION_DEFINE([LTDL_INIT], [convenience], + [m4_define([_LTDL_TYPE], [convenience])]) diff --git a/ext/libpqxx-7.7.3/config/m4/ltsugar.m4 b/ext/libpqxx-7.7.3/config/m4/ltsugar.m4 new file mode 100644 index 000000000..48bc9344a --- /dev/null +++ b/ext/libpqxx-7.7.3/config/m4/ltsugar.m4 @@ -0,0 +1,124 @@ +# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- +# +# Copyright (C) 2004-2005, 2007-2008, 2011-2015 Free Software +# Foundation, Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 6 ltsugar.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) + + +# lt_join(SEP, ARG1, [ARG2...]) +# ----------------------------- +# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their +# associated separator. +# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier +# versions in m4sugar had bugs. +m4_define([lt_join], +[m4_if([$#], [1], [], + [$#], [2], [[$2]], + [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) +m4_define([_lt_join], +[m4_if([$#$2], [2], [], + [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) + + +# lt_car(LIST) +# lt_cdr(LIST) +# ------------ +# Manipulate m4 lists. +# These macros are necessary as long as will still need to support +# Autoconf-2.59, which quotes differently. +m4_define([lt_car], [[$1]]) +m4_define([lt_cdr], +[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], + [$#], 1, [], + [m4_dquote(m4_shift($@))])]) +m4_define([lt_unquote], $1) + + +# lt_append(MACRO-NAME, STRING, [SEPARATOR]) +# ------------------------------------------ +# Redefine MACRO-NAME to hold its former content plus 'SEPARATOR''STRING'. +# Note that neither SEPARATOR nor STRING are expanded; they are appended +# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). +# No SEPARATOR is output if MACRO-NAME was previously undefined (different +# than defined and empty). +# +# This macro is needed until we can rely on Autoconf 2.62, since earlier +# versions of m4sugar mistakenly expanded SEPARATOR but not STRING. +m4_define([lt_append], +[m4_define([$1], + m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) + + + +# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) +# ---------------------------------------------------------- +# Produce a SEP delimited list of all paired combinations of elements of +# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list +# has the form PREFIXmINFIXSUFFIXn. +# Needed until we can rely on m4_combine added in Autoconf 2.62. +m4_define([lt_combine], +[m4_if(m4_eval([$# > 3]), [1], + [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl +[[m4_foreach([_Lt_prefix], [$2], + [m4_foreach([_Lt_suffix], + ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, + [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) + + +# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) +# ----------------------------------------------------------------------- +# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited +# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. +m4_define([lt_if_append_uniq], +[m4_ifdef([$1], + [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], + [lt_append([$1], [$2], [$3])$4], + [$5])], + [lt_append([$1], [$2], [$3])$4])]) + + +# lt_dict_add(DICT, KEY, VALUE) +# ----------------------------- +m4_define([lt_dict_add], +[m4_define([$1($2)], [$3])]) + + +# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) +# -------------------------------------------- +m4_define([lt_dict_add_subkey], +[m4_define([$1($2:$3)], [$4])]) + + +# lt_dict_fetch(DICT, KEY, [SUBKEY]) +# ---------------------------------- +m4_define([lt_dict_fetch], +[m4_ifval([$3], + m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), + m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) + + +# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) +# ----------------------------------------------------------------- +m4_define([lt_if_dict_fetch], +[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], + [$5], + [$6])]) + + +# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) +# -------------------------------------------------------------- +m4_define([lt_dict_filter], +[m4_if([$5], [], [], + [lt_join(m4_quote(m4_default([$4], [[, ]])), + lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), + [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl +]) diff --git a/ext/libpqxx-7.7.3/config/m4/ltversion.m4 b/ext/libpqxx-7.7.3/config/m4/ltversion.m4 new file mode 100644 index 000000000..fa04b52a3 --- /dev/null +++ b/ext/libpqxx-7.7.3/config/m4/ltversion.m4 @@ -0,0 +1,23 @@ +# ltversion.m4 -- version numbers -*- Autoconf -*- +# +# Copyright (C) 2004, 2011-2015 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# @configure_input@ + +# serial 4179 ltversion.m4 +# This file is part of GNU Libtool + +m4_define([LT_PACKAGE_VERSION], [2.4.6]) +m4_define([LT_PACKAGE_REVISION], [2.4.6]) + +AC_DEFUN([LTVERSION_VERSION], +[macro_version='2.4.6' +macro_revision='2.4.6' +_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) +_LT_DECL(, macro_revision, 0) +]) diff --git a/ext/libpqxx-7.7.3/config/m4/lt~obsolete.m4 b/ext/libpqxx-7.7.3/config/m4/lt~obsolete.m4 new file mode 100644 index 000000000..c6b26f88f --- /dev/null +++ b/ext/libpqxx-7.7.3/config/m4/lt~obsolete.m4 @@ -0,0 +1,99 @@ +# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- +# +# Copyright (C) 2004-2005, 2007, 2009, 2011-2015 Free Software +# Foundation, Inc. +# Written by Scott James Remnant, 2004. +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 5 lt~obsolete.m4 + +# These exist entirely to fool aclocal when bootstrapping libtool. +# +# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN), +# which have later been changed to m4_define as they aren't part of the +# exported API, or moved to Autoconf or Automake where they belong. +# +# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN +# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us +# using a macro with the same name in our local m4/libtool.m4 it'll +# pull the old libtool.m4 in (it doesn't see our shiny new m4_define +# and doesn't know about Autoconf macros at all.) +# +# So we provide this file, which has a silly filename so it's always +# included after everything else. This provides aclocal with the +# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything +# because those macros already exist, or will be overwritten later. +# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. +# +# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. +# Yes, that means every name once taken will need to remain here until +# we give up compatibility with versions before 1.7, at which point +# we need to keep only those names which we still refer to. + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) + +m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) +m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) +m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) +m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) +m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) +m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) +m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) +m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) +m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) +m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) +m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) +m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) +m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) +m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) +m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) +m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) +m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) +m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) +m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) +m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) +m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) +m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) +m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) +m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) +m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) +m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) +m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) +m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) +m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) +m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) +m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) +m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) +m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) +m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) +m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) +m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) +m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) +m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) +m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) +m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) +m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) +m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) +m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) +m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) +m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) +m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) +m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) +m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) +m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) +m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) +m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) diff --git a/ext/libpqxx-7.7.3/config/missing b/ext/libpqxx-7.7.3/config/missing new file mode 100755 index 000000000..1fe1611f1 --- /dev/null +++ b/ext/libpqxx-7.7.3/config/missing @@ -0,0 +1,215 @@ +#! /bin/sh +# Common wrapper for a few potentially missing GNU programs. + +scriptversion=2018-03-07.03; # UTC + +# Copyright (C) 1996-2021 Free Software Foundation, Inc. +# Originally written by Fran,cois Pinard , 1996. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +if test $# -eq 0; then + echo 1>&2 "Try '$0 --help' for more information" + exit 1 +fi + +case $1 in + + --is-lightweight) + # Used by our autoconf macros to check whether the available missing + # script is modern enough. + exit 0 + ;; + + --run) + # Back-compat with the calling convention used by older automake. + shift + ;; + + -h|--h|--he|--hel|--help) + echo "\ +$0 [OPTION]... PROGRAM [ARGUMENT]... + +Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due +to PROGRAM being missing or too old. + +Options: + -h, --help display this help and exit + -v, --version output version information and exit + +Supported PROGRAM values: + aclocal autoconf autoheader autom4te automake makeinfo + bison yacc flex lex help2man + +Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and +'g' are ignored when checking the name. + +Send bug reports to ." + exit $? + ;; + + -v|--v|--ve|--ver|--vers|--versi|--versio|--version) + echo "missing $scriptversion (GNU Automake)" + exit $? + ;; + + -*) + echo 1>&2 "$0: unknown '$1' option" + echo 1>&2 "Try '$0 --help' for more information" + exit 1 + ;; + +esac + +# Run the given program, remember its exit status. +"$@"; st=$? + +# If it succeeded, we are done. +test $st -eq 0 && exit 0 + +# Also exit now if we it failed (or wasn't found), and '--version' was +# passed; such an option is passed most likely to detect whether the +# program is present and works. +case $2 in --version|--help) exit $st;; esac + +# Exit code 63 means version mismatch. This often happens when the user +# tries to use an ancient version of a tool on a file that requires a +# minimum version. +if test $st -eq 63; then + msg="probably too old" +elif test $st -eq 127; then + # Program was missing. + msg="missing on your system" +else + # Program was found and executed, but failed. Give up. + exit $st +fi + +perl_URL=https://www.perl.org/ +flex_URL=https://github.com/westes/flex +gnu_software_URL=https://www.gnu.org/software + +program_details () +{ + case $1 in + aclocal|automake) + echo "The '$1' program is part of the GNU Automake package:" + echo "<$gnu_software_URL/automake>" + echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:" + echo "<$gnu_software_URL/autoconf>" + echo "<$gnu_software_URL/m4/>" + echo "<$perl_URL>" + ;; + autoconf|autom4te|autoheader) + echo "The '$1' program is part of the GNU Autoconf package:" + echo "<$gnu_software_URL/autoconf/>" + echo "It also requires GNU m4 and Perl in order to run:" + echo "<$gnu_software_URL/m4/>" + echo "<$perl_URL>" + ;; + esac +} + +give_advice () +{ + # Normalize program name to check for. + normalized_program=`echo "$1" | sed ' + s/^gnu-//; t + s/^gnu//; t + s/^g//; t'` + + printf '%s\n' "'$1' is $msg." + + configure_deps="'configure.ac' or m4 files included by 'configure.ac'" + case $normalized_program in + autoconf*) + echo "You should only need it if you modified 'configure.ac'," + echo "or m4 files included by it." + program_details 'autoconf' + ;; + autoheader*) + echo "You should only need it if you modified 'acconfig.h' or" + echo "$configure_deps." + program_details 'autoheader' + ;; + automake*) + echo "You should only need it if you modified 'Makefile.am' or" + echo "$configure_deps." + program_details 'automake' + ;; + aclocal*) + echo "You should only need it if you modified 'acinclude.m4' or" + echo "$configure_deps." + program_details 'aclocal' + ;; + autom4te*) + echo "You might have modified some maintainer files that require" + echo "the 'autom4te' program to be rebuilt." + program_details 'autom4te' + ;; + bison*|yacc*) + echo "You should only need it if you modified a '.y' file." + echo "You may want to install the GNU Bison package:" + echo "<$gnu_software_URL/bison/>" + ;; + lex*|flex*) + echo "You should only need it if you modified a '.l' file." + echo "You may want to install the Fast Lexical Analyzer package:" + echo "<$flex_URL>" + ;; + help2man*) + echo "You should only need it if you modified a dependency" \ + "of a man page." + echo "You may want to install the GNU Help2man package:" + echo "<$gnu_software_URL/help2man/>" + ;; + makeinfo*) + echo "You should only need it if you modified a '.texi' file, or" + echo "any other file indirectly affecting the aspect of the manual." + echo "You might want to install the Texinfo package:" + echo "<$gnu_software_URL/texinfo/>" + echo "The spurious makeinfo call might also be the consequence of" + echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might" + echo "want to install GNU make:" + echo "<$gnu_software_URL/make/>" + ;; + *) + echo "You might have modified some files without having the proper" + echo "tools for further handling them. Check the 'README' file, it" + echo "often tells you about the needed prerequisites for installing" + echo "this package. You may also peek at any GNU archive site, in" + echo "case some other package contains this missing '$1' program." + ;; + esac +} + +give_advice "$1" | sed -e '1s/^/WARNING: /' \ + -e '2,$s/^/ /' >&2 + +# Propagate the correct exit status (expected to be 127 for a program +# not found, 63 for a program that failed due to version mismatch). +exit $st + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/ext/libpqxx-7.7.3/config/mkinstalldirs b/ext/libpqxx-7.7.3/config/mkinstalldirs new file mode 100755 index 000000000..6b3b5fc5d --- /dev/null +++ b/ext/libpqxx-7.7.3/config/mkinstalldirs @@ -0,0 +1,40 @@ +#! /bin/sh +# mkinstalldirs --- make directory hierarchy +# Author: Noah Friedman +# Created: 1993-05-16 +# Public domain + +# $Id$ + +errstatus=0 + +for file +do + set fnord `echo ":$file" | sed -ne 's/^:\//#/;s/^://;s/\// /g;s/^#/\//;p'` + shift + + pathcomp= + for d + do + pathcomp="$pathcomp$d" + case "$pathcomp" in + -* ) pathcomp=./$pathcomp ;; + esac + + if test ! -d "$pathcomp"; then + echo "mkdir $pathcomp" + + mkdir "$pathcomp" || lasterr=$? + + if test ! -d "$pathcomp"; then + errstatus=$lasterr + fi + fi + + pathcomp="$pathcomp/" + done +done + +exit $errstatus + +# mkinstalldirs ends here diff --git a/ext/libpqxx-7.7.3/config/test-driver b/ext/libpqxx-7.7.3/config/test-driver new file mode 100755 index 000000000..8e575b017 --- /dev/null +++ b/ext/libpqxx-7.7.3/config/test-driver @@ -0,0 +1,148 @@ +#! /bin/sh +# test-driver - basic testsuite driver script. + +scriptversion=2013-07-13.22; # UTC + +# Copyright (C) 2011-2014 Free Software Foundation, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +# Make unconditional expansion of undefined variables an error. This +# helps a lot in preventing typo-related bugs. +set -u + +usage_error () +{ + echo "$0: $*" >&2 + print_usage >&2 + exit 2 +} + +print_usage () +{ + cat <$log_file 2>&1 +estatus=$? + +if test $enable_hard_errors = no && test $estatus -eq 99; then + tweaked_estatus=1 +else + tweaked_estatus=$estatus +fi + +case $tweaked_estatus:$expect_failure in + 0:yes) col=$red res=XPASS recheck=yes gcopy=yes;; + 0:*) col=$grn res=PASS recheck=no gcopy=no;; + 77:*) col=$blu res=SKIP recheck=no gcopy=yes;; + 99:*) col=$mgn res=ERROR recheck=yes gcopy=yes;; + *:yes) col=$lgn res=XFAIL recheck=no gcopy=yes;; + *:*) col=$red res=FAIL recheck=yes gcopy=yes;; +esac + +# Report the test outcome and exit status in the logs, so that one can +# know whether the test passed or failed simply by looking at the '.log' +# file, without the need of also peaking into the corresponding '.trs' +# file (automake bug#11814). +echo "$res $test_name (exit status: $estatus)" >>$log_file + +# Report outcome to console. +echo "${col}${res}${std}: $test_name" + +# Register the test result, and other relevant metadata. +echo ":test-result: $res" > $trs_file +echo ":global-test-result: $res" >> $trs_file +echo ":recheck: $recheck" >> $trs_file +echo ":copy-in-global-log: $gcopy" >> $trs_file + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/ext/libpqxx-7.7.3/configitems b/ext/libpqxx-7.7.3/configitems new file mode 100644 index 000000000..e6f3925ef --- /dev/null +++ b/ext/libpqxx-7.7.3/configitems @@ -0,0 +1,26 @@ +PACKAGE internal autotools +PACKAGE_BUGREPORT internal autotools +PACKAGE_NAME internal autotools +PACKAGE_STRING internal autotools +PACKAGE_TARNAME internal autotools +PACKAGE_VERSION internal autotools +PQXX_HAVE_CHARCONV_INT internal compiler +PQXX_HAVE_CHARCONV_FLOAT internal compiler +PQXX_HAVE_CMP public compiler +PQXX_HAVE_CONCEPTS public compiler +PQXX_HAVE_CXA_DEMANGLE internal compiler +PQXX_HAVE_GCC_PURE public compiler +PQXX_HAVE_GCC_VISIBILITY public compiler +PQXX_HAVE_LIKELY public compiler +PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT public compiler +PQXX_HAVE_PATH public compiler +PQXX_HAVE_POLL internal compiler +PQXX_HAVE_PQENCRYPTPASSWORDCONN internal libpq +PQXX_HAVE_PQ_PIPELINE internal libpq +PQXX_HAVE_SLEEP_FOR internal compiler +PQXX_HAVE_SPAN public compiler +PQXX_HAVE_STRERROR_R internal compiler +PQXX_HAVE_STRERROR_S internal compiler +PQXX_HAVE_THREAD_LOCAL internal compiler +PQXX_HAVE_YEAR_MONTH_DAY public compiler +VERSION internal autotools diff --git a/ext/libpqxx-7.7.3/configure b/ext/libpqxx-7.7.3/configure new file mode 100755 index 000000000..4c58f19c2 --- /dev/null +++ b/ext/libpqxx-7.7.3/configure @@ -0,0 +1,20443 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.69 for libpqxx 7.7.3. +# +# Report bugs to . +# +# +# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +# Use a proper internal environment variable to ensure we don't fall + # into an infinite loop, continuously re-executing ourselves. + if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then + _as_can_reexec=no; export _as_can_reexec; + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +as_fn_exit 255 + fi + # We don't want this to propagate to other subprocesses. + { _as_can_reexec=; unset _as_can_reexec;} +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1 +test -x / || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 + + test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ + || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + export CONFIG_SHELL + # We cannot yet assume a decent shell, so we have to provide a +# neutralization value for shells without unset; and this also +# works around shells that cannot unset nonexistent variables. +# Preserve -v and -x to the replacement shell. +BASH_ENV=/dev/null +ENV=/dev/null +(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV +case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; +esac +exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} +# Admittedly, this is quite paranoid, since all the known shells bail +# out after a failed `exec'. +$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 +exit 255 +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org and Jeroen T. +$0: Vermeulen about your system, including any error +$0: possibly output before this message. Then install a +$0: modern shell, or manually run the script under such a +$0: shell if you do have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # If we had to re-execute with $CONFIG_SHELL, we're ensured to have + # already done that, so ensure we don't try to do so again and fall + # in an infinite loop. This has already happened in practice. + _as_can_reexec=no; export _as_can_reexec + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + +SHELL=${CONFIG_SHELL-/bin/sh} + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='libpqxx' +PACKAGE_TARNAME='libpqxx' +PACKAGE_VERSION='7.7.3' +PACKAGE_STRING='libpqxx 7.7.3' +PACKAGE_BUGREPORT='Jeroen T. Vermeulen' +PACKAGE_URL='' + +ac_unique_file="src/connection.cxx" +ac_default_prefix=/usr/local +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='am__EXEEXT_FALSE +am__EXEEXT_TRUE +LTLIBOBJS +LIBOBJS +with_postgres_lib +POSTGRES_INCLUDE +PG_CONFIG +PKG_CONFIG +MAINT +MAINTAINER_MODE_FALSE +MAINTAINER_MODE_TRUE +BUILD_REFERENCE_FALSE +BUILD_REFERENCE_TRUE +HAVE_DOT +DOXYGEN +MKDIR +CXXCPP +CPP +LT_SYS_LIBRARY_PATH +OTOOL64 +OTOOL +LIPO +NMEDIT +DSYMUTIL +MANIFEST_TOOL +RANLIB +ac_ct_AR +AR +DLLTOOL +OBJDUMP +LN_S +NM +ac_ct_DUMPBIN +DUMPBIN +LD +FGREP +EGREP +GREP +SED +am__fastdepCC_FALSE +am__fastdepCC_TRUE +CCDEPMODE +ac_ct_CC +CFLAGS +CC +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +LIBTOOL +am__fastdepCXX_FALSE +am__fastdepCXX_TRUE +CXXDEPMODE +am__nodep +AMDEPBACKSLASH +AMDEP_FALSE +AMDEP_TRUE +am__include +DEPDIR +OBJEXT +EXEEXT +ac_ct_CXX +CPPFLAGS +LDFLAGS +CXXFLAGS +CXX +PQXX_ABI +PQXXVERSION +AM_BACKSLASH +AM_DEFAULT_VERBOSITY +AM_DEFAULT_V +AM_V +CSCOPE +ETAGS +CTAGS +am__untar +am__tar +AMTAR +am__leading_dot +SET_MAKE +AWK +mkdir_p +MKDIR_P +INSTALL_STRIP_PROGRAM +STRIP +install_sh +MAKEINFO +AUTOHEADER +AUTOMAKE +AUTOCONF +ACLOCAL +VERSION +PACKAGE +CYGPATH_W +am__isrc +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +runstatedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL +am__quote' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +enable_silent_rules +enable_dependency_tracking +enable_shared +enable_static +with_pic +enable_fast_install +with_aix_soname +with_gnu_ld +with_sysroot +enable_libtool_lock +enable_documentation +enable_maintainer_mode +enable_audit +enable_suggest +with_postgres_include +with_postgres_lib +' + ac_precious_vars='build_alias +host_alias +target_alias +CXX +CXXFLAGS +LDFLAGS +LIBS +CPPFLAGS +CCC +CC +CFLAGS +LT_SYS_LIBRARY_PATH +CPP +CXXCPP +DOXYGEN +HAVE_DOT' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +runstatedir='${localstatedir}/run' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -runstatedir | --runstatedir | --runstatedi | --runstated \ + | --runstate | --runstat | --runsta | --runst | --runs \ + | --run | --ru | --r) + ac_prev=runstatedir ;; + -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ + | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ + | --run=* | --ru=* | --r=*) + runstatedir=$ac_optarg ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir runstatedir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures libpqxx 7.7.3 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/libpqxx] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +Program names: + --program-prefix=PREFIX prepend PREFIX to installed program names + --program-suffix=SUFFIX append SUFFIX to installed program names + --program-transform-name=PROGRAM run sed PROGRAM on installed program names + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of libpqxx 7.7.3:";; + esac + cat <<\_ACEOF + +Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --enable-silent-rules less verbose build output (undo: "make V=1") + --disable-silent-rules verbose build output (undo: "make V=0") + --enable-dependency-tracking + do not reject slow dependency extractors + --disable-dependency-tracking + speeds up one-time build + --enable-shared[=PKGS] build shared libraries [default=no] + --enable-static[=PKGS] build static libraries [default=yes] + --enable-fast-install[=PKGS] + optimize for fast installation [default=yes] + --disable-libtool-lock avoid locking (might break parallel builds) + --enable-documentation Generate documentation + --enable-maintainer-mode + enable make rules and dependencies not useful (and + sometimes confusing) to the casual installer + + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use + both] + --with-aix-soname=aix|svr4|both + shared library versioning (aka "SONAME") variant to + provide on AIX, [default=aix]. + --with-gnu-ld assume the C compiler uses GNU ld [default=no] + --with-sysroot[=DIR] Search for dependent libraries within DIR (or the + compiler's sysroot if not specified). + --with-postgres-include=DIR + Use PostgreSQL includes from DIR. Defaults to + querying pg_config or pkg-config, whichever is + available. + --with-postgres-lib=DIR Use PostgreSQL libraries from DIR. Defaults to + querying pg_config. + +Some influential environment variables: + CXX C++ compiler command + CXXFLAGS C++ compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CC C compiler command + CFLAGS C compiler flags + LT_SYS_LIBRARY_PATH + User-defined run-time library search path. + CPP C preprocessor + CXXCPP C++ preprocessor + DOXYGEN Path to doxygen needed to build reference documentation + HAVE_DOT Variable used by doxygen to declare availibility of dot + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to . +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +libpqxx configure 7.7.3 +generated by GNU Autoconf 2.69 + +Copyright (C) 2012 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_cxx_try_compile LINENO +# ---------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_compile + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link + +# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_c_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_compile + +# ac_fn_c_try_cpp LINENO +# ---------------------- +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_cpp + +# ac_fn_c_try_run LINENO +# ---------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_c_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_run + +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func + +# ac_fn_cxx_try_cpp LINENO +# ------------------------ +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_cpp + +# ac_fn_cxx_try_link LINENO +# ------------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + test -x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_link + +# ac_fn_cxx_check_header_mongrel LINENO HEADER VAR INCLUDES +# --------------------------------------------------------- +# Tests whether HEADER exists, giving a warning if it cannot be compiled using +# the include files in INCLUDES and setting the cache variable VAR +# accordingly. +ac_fn_cxx_check_header_mongrel () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if eval \${$3+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 +$as_echo_n "checking $2 usability... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_header_compiler=yes +else + ac_header_compiler=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 +$as_echo_n "checking $2 presence... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include <$2> +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + ac_header_preproc=yes +else + ac_header_preproc=no +fi +rm -f conftest.err conftest.i conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in #(( + yes:no: ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} + ;; + no:yes:* ) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} +( $as_echo "## ---------------------------------- ## +## Report this to Jeroen T. Vermeulen ## +## ---------------------------------- ##" + ) | sed "s/^/$as_me: WARNING: /" >&2 + ;; +esac + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + eval "$3=\$ac_header_compiler" +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_cxx_check_header_mongrel +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by libpqxx $as_me 7.7.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + +ac_aux_dir= +for ac_dir in config "$srcdir"/config; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in config \"$srcdir\"/config" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + + +am__api_version='1.16' + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if ${ac_cv_path_install+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in #(( + ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 +$as_echo_n "checking whether build environment is sane... " >&6; } +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[\\\"\#\$\&\'\`$am_lf]*) + as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; +esac +case $srcdir in + *[\\\"\#\$\&\'\`$am_lf\ \ ]*) + as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;; +esac + +# Do 'set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + am_has_slept=no + for am_try in 1 2; do + echo "timestamp, slept: $am_has_slept" > conftest.file + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$*" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + if test "$*" != "X $srcdir/configure conftest.file" \ + && test "$*" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + as_fn_error $? "ls -t appears to fail. Make sure there is not a broken + alias in your environment" "$LINENO" 5 + fi + if test "$2" = conftest.file || test $am_try -eq 2; then + break + fi + # Just in case. + sleep 1 + am_has_slept=yes + done + test "$2" = conftest.file + ) +then + # Ok. + : +else + as_fn_error $? "newly created file is older than distributed files! +Check your system clock" "$LINENO" 5 +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +# If we didn't sleep, we still need to ensure time stamps of config.status and +# generated files are strictly newer. +am_sleep_pid= +if grep 'slept: no' conftest.file >/dev/null 2>&1; then + ( sleep 1 ) & + am_sleep_pid=$! +fi + +rm -f conftest.file + +test "$program_prefix" != NONE && + program_transform_name="s&^&$program_prefix&;$program_transform_name" +# Use a double $ so make ignores it. +test "$program_suffix" != NONE && + program_transform_name="s&\$&$program_suffix&;$program_transform_name" +# Double any \ or $. +# By default was `s,x,x', remove it if useless. +ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' +program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` + +# Expand $ac_aux_dir to an absolute path. +am_aux_dir=`cd "$ac_aux_dir" && pwd` + +if test x"${MISSING+set}" != xset; then + MISSING="\${SHELL} '$am_aux_dir/missing'" +fi +# Use eval to expand $SHELL +if eval "$MISSING --is-lightweight"; then + am_missing_run="$MISSING " +else + am_missing_run= + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 +$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;} +fi + +if test x"${install_sh+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi + +# Installed binaries are usually stripped using 'strip' when the user +# run "make install-strip". However 'strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the 'STRIP' environment variable to overrule this program. +if test "$cross_compiling" != no; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 +$as_echo_n "checking for a thread-safe mkdir -p... " >&6; } +if test -z "$MKDIR_P"; then + if ${ac_cv_path_mkdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in mkdir gmkdir; do + for ac_exec_ext in '' $ac_executable_extensions; do + as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue + case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( + 'mkdir (GNU coreutils) '* | \ + 'mkdir (coreutils) '* | \ + 'mkdir (fileutils) '4.1*) + ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext + break 3;; + esac + done + done + done +IFS=$as_save_IFS + +fi + + test -d ./--version && rmdir ./--version + if test "${ac_cv_path_mkdir+set}" = set; then + MKDIR_P="$ac_cv_path_mkdir -p" + else + # As a last resort, use the slow shell script. Don't cache a + # value for MKDIR_P within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + MKDIR_P="$ac_install_sh -d" + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 +$as_echo "$MKDIR_P" >&6; } + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AWK+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AWK="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AWK" && break +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + SET_MAKE= +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + +rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null + +# Check whether --enable-silent-rules was given. +if test "${enable_silent_rules+set}" = set; then : + enableval=$enable_silent_rules; +fi + +case $enable_silent_rules in # ((( + yes) AM_DEFAULT_VERBOSITY=0;; + no) AM_DEFAULT_VERBOSITY=1;; + *) AM_DEFAULT_VERBOSITY=1;; +esac +am_make=${MAKE-make} +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 +$as_echo_n "checking whether $am_make supports nested variables... " >&6; } +if ${am_cv_make_support_nested_variables+:} false; then : + $as_echo_n "(cached) " >&6 +else + if $as_echo 'TRUE=$(BAR$(V)) +BAR0=false +BAR1=true +V=1 +am__doit: + @$(TRUE) +.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then + am_cv_make_support_nested_variables=yes +else + am_cv_make_support_nested_variables=no +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 +$as_echo "$am_cv_make_support_nested_variables" >&6; } +if test $am_cv_make_support_nested_variables = yes; then + AM_V='$(V)' + AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' +else + AM_V=$AM_DEFAULT_VERBOSITY + AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY +fi +AM_BACKSLASH='\' + +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + am__isrc=' -I$(srcdir)' + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi + + +# Define the identity of the package. + PACKAGE='libpqxx' + VERSION='7.7.3' + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE "$PACKAGE" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define VERSION "$VERSION" +_ACEOF + +# Some tools Automake needs. + +ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} + + +AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} + + +AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} + + +AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} + + +MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} + +# For better backward compatibility. To be removed once Automake 1.9.x +# dies out for good. For more background, see: +# +# +mkdir_p='$(MKDIR_P)' + +# We need awk for the "check" target (and possibly the TAP driver). The +# system "awk" is bad on some platforms. +# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AMTAR='$${TAR-tar}' + + +# We'll loop over all known methods to create a tar archive until one works. +_am_tools='gnutar pax cpio none' + +am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' + + + + + +# Variables for tags utilities; see am/tags.am +if test -z "$CTAGS"; then + CTAGS=ctags +fi + +if test -z "$ETAGS"; then + ETAGS=etags +fi + +if test -z "$CSCOPE"; then + CSCOPE=cscope +fi + + + +# POSIX will say in a future version that running "rm -f" with no argument +# is OK; and we want to be able to make that assumption in our Makefile +# recipes. So use an aggressive probe to check that the usage we want is +# actually supported "in the wild" to an acceptable degree. +# See automake bug#10828. +# To make any issue more visible, cause the running configure to be aborted +# by default if the 'rm' program in use doesn't match our expectations; the +# user can still override this though. +if rm -f && rm -fr && rm -rf; then : OK; else + cat >&2 <<'END' +Oops! + +Your 'rm' program seems unable to run without file operands specified +on the command line, even when the '-f' option is present. This is contrary +to the behaviour of most rm programs out there, and not conforming with +the upcoming POSIX standard: + +Please tell bug-automake@gnu.org about your system, including the value +of your $PATH and any error possibly output before this message. This +can help us improve future automake versions. + +END + if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then + echo 'Configuration will proceed anyway, since you have set the' >&2 + echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 + echo >&2 + else + cat >&2 <<'END' +Aborting the configuration process, to ensure you take notice of the issue. + +You can download and install GNU coreutils to get an 'rm' implementation +that behaves properly: . + +If you want to complete the configuration process using your problematic +'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM +to "yes", and re-run configure. + +END + as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5 + fi +fi + + +PQXX_ABI=7.7 +PQXXVERSION=$PACKAGE_VERSION + + + +ac_config_headers="$ac_config_headers include/pqxx/config.h" + + +# Default prefix for installs. + + + +# Read test programme from config-test. + + + +# Checks for programs. +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +$as_echo "$CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +$as_echo "$ac_ct_CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C++ compiler works" >&5 +$as_echo_n "checking whether the C++ compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C++ compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler default output file name" >&5 +$as_echo_n "checking for C++ compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C++ compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 +$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } +if ${ac_cv_cxx_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +$as_echo "$ac_cv_cxx_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GXX=yes +else + GXX= +fi +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +$as_echo_n "checking whether $CXX accepts -g... " >&6; } +if ${ac_cv_prog_cxx_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +else + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +$as_echo "$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +DEPDIR="${am__leading_dot}deps" + +ac_config_commands="$ac_config_commands depfiles" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5 +$as_echo_n "checking whether ${MAKE-make} supports the include directive... " >&6; } +cat > confinc.mk << 'END' +am__doit: + @echo this is the am__doit target >confinc.out +.PHONY: am__doit +END +am__include="#" +am__quote= +# BSD make does it like this. +echo '.include "confinc.mk" # ignored' > confmf.BSD +# Other make implementations (GNU, Solaris 10, AIX) do it like this. +echo 'include confinc.mk # ignored' > confmf.GNU +_am_result=no +for s in GNU BSD; do + { echo "$as_me:$LINENO: ${MAKE-make} -f confmf.$s && cat confinc.out" >&5 + (${MAKE-make} -f confmf.$s && cat confinc.out) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } + case $?:`cat confinc.out 2>/dev/null` in #( + '0:this is the am__doit target') : + case $s in #( + BSD) : + am__include='.include' am__quote='"' ;; #( + *) : + am__include='include' am__quote='' ;; +esac ;; #( + *) : + ;; +esac + if test "$am__include" != "#"; then + _am_result="yes ($s style)" + break + fi +done +rm -f confinc.* confmf.* +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5 +$as_echo "${_am_result}" >&6; } + +# Check whether --enable-dependency-tracking was given. +if test "${enable_dependency_tracking+set}" = set; then : + enableval=$enable_dependency_tracking; +fi + +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi + if test "x$enable_dependency_tracking" != xno; then + AMDEP_TRUE= + AMDEP_FALSE='#' +else + AMDEP_TRUE='#' + AMDEP_FALSE= +fi + + + +depcc="$CXX" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CXX_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CXX_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CXX_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CXX_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } +CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then + am__fastdepCXX_TRUE= + am__fastdepCXX_FALSE='#' +else + am__fastdepCXX_TRUE='#' + am__fastdepCXX_FALSE= +fi + + + +# Check whether --enable-shared was given. +if test "${enable_shared+set}" = set; then : + enableval=$enable_shared; p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else + enable_shared=no +fi + + + + + + + + + +case `pwd` in + *\ * | *\ *) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 +$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; +esac + + + +macro_version='2.4.6' +macro_revision='2.4.6' + + + + + + + + + + + + + +ltmain=$ac_aux_dir/ltmain.sh + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 +$as_echo_n "checking how to print strings... " >&6; } +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "" +} + +case $ECHO in + printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 +$as_echo "printf" >&6; } ;; + print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 +$as_echo "print -r" >&6; } ;; + *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 +$as_echo "cat" >&6; } ;; +esac + + + + + + + + + + + + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +struct stat; +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 +$as_echo_n "checking whether $CC understands -c and -o together... " >&6; } +if ${am_cv_prog_cc_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF + # Make sure it works both with $CC and with simple cc. + # Following AC_PROG_CC_C_O, we do the test twice because some + # compilers refuse to overwrite an existing .o file with -o, + # though they will create one. + am_cv_prog_cc_c_o=yes + for am_i in 1 2; do + if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5 + ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } \ + && test -f conftest2.$ac_objext; then + : OK + else + am_cv_prog_cc_c_o=no + break + fi + done + rm -f core conftest* + unset am_i +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 +$as_echo "$am_cv_prog_cc_c_o" >&6; } +if test "$am_cv_prog_cc_c_o" != yes; then + # Losing compiler, so override with the script. + # FIXME: It is wrong to rewrite CC. + # But if we don't then we get into trouble of one sort or another. + # A longer-term fix would be to have automake use am__CC in this case, + # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" + CC="$am_aux_dir/compile $CC" +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + +depcc="$CC" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CC_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named 'D' -- because '-MD' means "put the output + # in D". + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CC_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with + # Solaris 10 /bin/sh. + echo '/* dummy */' > sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with '-c' and '-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle '-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs. + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # After this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested. + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok '-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CC_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CC_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } +CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then + am__fastdepCC_TRUE= + am__fastdepCC_FALSE='#' +else + am__fastdepCC_TRUE='#' + am__fastdepCC_FALSE= +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 +$as_echo_n "checking for a sed that does not truncate output... " >&6; } +if ${ac_cv_path_SED+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for ac_i in 1 2 3 4 5 6 7; do + ac_script="$ac_script$as_nl$ac_script" + done + echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed + { ac_script=; unset ac_script;} + if test -z "$SED"; then + ac_path_SED_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_SED" || continue +# Check for GNU ac_path_SED and select it if it is found. + # Check for GNU $ac_path_SED +case `"$ac_path_SED" --version 2>&1` in +*GNU*) + ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo '' >> "conftest.nl" + "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_SED_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_SED="$ac_path_SED" + ac_path_SED_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_SED_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_SED"; then + as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 + fi +else + ac_cv_path_SED=$SED +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 +$as_echo "$ac_cv_path_SED" >&6; } + SED="$ac_cv_path_SED" + rm -f conftest.sed + +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 +$as_echo_n "checking for fgrep... " >&6; } +if ${ac_cv_path_FGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 + then ac_cv_path_FGREP="$GREP -F" + else + if test -z "$FGREP"; then + ac_path_FGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in fgrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_FGREP" || continue +# Check for GNU ac_path_FGREP and select it if it is found. + # Check for GNU $ac_path_FGREP +case `"$ac_path_FGREP" --version 2>&1` in +*GNU*) + ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'FGREP' >> "conftest.nl" + "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_FGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_FGREP="$ac_path_FGREP" + ac_path_FGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_FGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_FGREP"; then + as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_FGREP=$FGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 +$as_echo "$ac_cv_path_FGREP" >&6; } + FGREP="$ac_cv_path_FGREP" + + +test -z "$GREP" && GREP=grep + + + + + + + + + + + + + + + + + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test yes = "$GCC"; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return, which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD=$ac_prog + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test yes = "$with_gnu_ld"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD=$ac_dir/$ac_prog + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 +$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } +if ${lt_cv_path_NM+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM=$NM +else + lt_nm_to_check=${ac_tool_prefix}nm + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + tmp_nm=$ac_dir/$lt_tmp_nm + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the 'sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty + case $build_os in + mingw*) lt_bad_file=conftest.nm/nofile ;; + *) lt_bad_file=/dev/null ;; + esac + case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in + *$lt_bad_file* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break 2 + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break 2 + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS=$lt_save_ifs + done + : ${lt_cv_path_NM=no} +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 +$as_echo "$lt_cv_path_NM" >&6; } +if test no != "$lt_cv_path_NM"; then + NM=$lt_cv_path_NM +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + if test -n "$ac_tool_prefix"; then + for ac_prog in dumpbin "link -dump" + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DUMPBIN"; then + ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DUMPBIN=$ac_cv_prog_DUMPBIN +if test -n "$DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 +$as_echo "$DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$DUMPBIN" && break + done +fi +if test -z "$DUMPBIN"; then + ac_ct_DUMPBIN=$DUMPBIN + for ac_prog in dumpbin "link -dump" +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DUMPBIN"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN +if test -n "$ac_ct_DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 +$as_echo "$ac_ct_DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_DUMPBIN" && break +done + + if test "x$ac_ct_DUMPBIN" = x; then + DUMPBIN=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DUMPBIN=$ac_ct_DUMPBIN + fi +fi + + case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols -headers" + ;; + *) + DUMPBIN=: + ;; + esac + fi + + if test : != "$DUMPBIN"; then + NM=$DUMPBIN + fi +fi +test -z "$NM" && NM=nm + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 +$as_echo_n "checking the name lister ($NM) interface... " >&6; } +if ${lt_cv_nm_interface+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 +$as_echo "$lt_cv_nm_interface" >&6; } + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 +$as_echo_n "checking whether ln -s works... " >&6; } +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 +$as_echo "no, using $LN_S" >&6; } +fi + +# find the maximum length of command line arguments +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 +$as_echo_n "checking the maximum length of command line arguments... " >&6; } +if ${lt_cv_sys_max_cmd_len+:} false; then : + $as_echo_n "(cached) " >&6 +else + i=0 + teststring=ABCD + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + os2*) + # The test takes a long time on OS/2. + lt_cv_sys_max_cmd_len=8192 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len" && \ + test undefined != "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test X`env echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test 17 != "$i" # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac + +fi + +if test -n "$lt_cv_sys_max_cmd_len"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 +$as_echo "$lt_cv_sys_max_cmd_len" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 +$as_echo "none" >&6; } +fi +max_cmd_len=$lt_cv_sys_max_cmd_len + + + + + + +: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi + + + + + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 +$as_echo_n "checking how to convert $build file names to $host format... " >&6; } +if ${lt_cv_to_host_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac + +fi + +to_host_file_cmd=$lt_cv_to_host_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 +$as_echo "$lt_cv_to_host_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 +$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } +if ${lt_cv_to_tool_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + #assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac + +fi + +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 +$as_echo "$lt_cv_to_tool_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 +$as_echo_n "checking for $LD option to reload object files... " >&6; } +if ${lt_cv_ld_reload_flag+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_reload_flag='-r' +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 +$as_echo "$lt_cv_ld_reload_flag" >&6; } +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + if test yes != "$GCC"; then + reload_cmds=false + fi + ;; + darwin*) + if test yes = "$GCC"; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. +set dummy ${ac_tool_prefix}objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OBJDUMP=$ac_cv_prog_OBJDUMP +if test -n "$OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 +$as_echo "$OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. +set dummy objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP +if test -n "$ac_ct_OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 +$as_echo "$ac_ct_OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OBJDUMP" = x; then + OBJDUMP="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OBJDUMP=$ac_ct_OBJDUMP + fi +else + OBJDUMP="$ac_cv_prog_OBJDUMP" +fi + +test -z "$OBJDUMP" && OBJDUMP=objdump + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 +$as_echo_n "checking how to recognize dependent libraries... " >&6; } +if ${lt_cv_deplibs_check_method+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# 'unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# that responds to the $file_magic_cmd with a given extended regex. +# If you have 'file' or equivalent on your system and you're not sure +# whether 'pass_all' will *always* work, you probably want this one. + +case $host_os in +aix[4-9]*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. + if ( file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[3-9]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd* | bitrig*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +os2*) + lt_cv_deplibs_check_method=pass_all + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 +$as_echo "$lt_cv_deplibs_check_method" >&6; } + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + + + + + + + + + + + + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. +set dummy ${ac_tool_prefix}dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DLLTOOL"; then + ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DLLTOOL=$ac_cv_prog_DLLTOOL +if test -n "$DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 +$as_echo "$DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DLLTOOL"; then + ac_ct_DLLTOOL=$DLLTOOL + # Extract the first word of "dlltool", so it can be a program name with args. +set dummy dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DLLTOOL"; then + ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DLLTOOL="dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL +if test -n "$ac_ct_DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 +$as_echo "$ac_ct_DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DLLTOOL" = x; then + DLLTOOL="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DLLTOOL=$ac_ct_DLLTOOL + fi +else + DLLTOOL="$ac_cv_prog_DLLTOOL" +fi + +test -z "$DLLTOOL" && DLLTOOL=dlltool + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 +$as_echo_n "checking how to associate runtime and link libraries... " >&6; } +if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh; + # decide which one to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd=$ECHO + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 +$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + + + + + + + + +if test -n "$ac_tool_prefix"; then + for ac_prog in ar + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AR" && break + done +fi +if test -z "$AR"; then + ac_ct_AR=$AR + for ac_prog in ar +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_AR" && break +done + + if test "x$ac_ct_AR" = x; then + AR="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +fi + +: ${AR=ar} +: ${AR_FLAGS=cr} + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 +$as_echo_n "checking for archiver @FILE support... " >&6; } +if ${lt_cv_ar_at_file+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ar_at_file=no + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test 0 -eq "$ac_status"; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test 0 -ne "$ac_status"; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 +$as_echo "$lt_cv_ar_at_file" >&6; } + +if test no = "$lt_cv_ar_at_file"; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +test -z "$STRIP" && STRIP=: + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +test -z "$RANLIB" && RANLIB=: + + + + + + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + bitrig* | openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# Check for command to grab the raw symbol name followed by C symbol from nm. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 +$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } +if ${lt_cv_sys_global_symbol_pipe+:} false; then : + $as_echo_n "(cached) " >&6 +else + +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[BCDEGRST]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([_A-Za-z][_A-Za-z0-9]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[BCDT]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[ABCDGISTW]' + ;; +hpux*) + if test ia64 = "$host_cpu"; then + symcode='[ABCDEGRST]' + fi + ;; +irix* | nonstopux*) + symcode='[BCDEGRST]' + ;; +osf*) + symcode='[BCDEGQRST]' + ;; +solaris*) + symcode='[BDRT]' + ;; +sco3.2v5*) + symcode='[DT]' + ;; +sysv4.2uw2*) + symcode='[DT]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[ABDT]' + ;; +sysv4) + symcode='[DFNSTU]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac + +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Gets list of data symbols to import. + lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" + # Adjust the below global symbol transforms to fixup imported variables. + lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" + lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" + lt_c_name_lib_hook="\ + -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ + -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" +else + # Disable hooks by default. + lt_cv_sys_global_symbol_to_import= + lt_cdecl_hook= + lt_c_name_hook= + lt_c_name_lib_hook= +fi + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n"\ +$lt_cdecl_hook\ +" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ +$lt_c_name_hook\ +" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" + +# Transform an extracted symbol line into symbol name with lib prefix and +# symbol address. +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ +$lt_c_name_lib_hook\ +" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ +" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ +" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function, + # D for any global variable and I for any imported variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK '"\ +" {last_section=section; section=\$ 3};"\ +" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ +" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ +" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ +" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ +" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Now try to grab the symbols. + nlist=conftest.nm + $ECHO "$as_me:$LINENO: $NM conftest.$ac_objext | $lt_cv_sys_global_symbol_pipe > $nlist" >&5 + if eval "$NM" conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist 2>&5 && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE +/* DATA imports from DLLs on WIN32 can't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined __osf__ +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS=conftstm.$ac_objext + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest$ac_exeext; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi + else + echo "cannot find nm_test_var in $nlist" >&5 + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "$progname: failed program was:" >&5 + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test yes = "$pipe_works"; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done + +fi + +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +$as_echo "failed" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 +$as_echo_n "checking for sysroot... " >&6; } + +# Check whether --with-sysroot was given. +if test "${with_sysroot+set}" = set; then : + withval=$with_sysroot; +else + with_sysroot=no +fi + + +lt_sysroot= +case $with_sysroot in #( + yes) + if test yes = "$GCC"; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5 +$as_echo "$with_sysroot" >&6; } + as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 + ;; +esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 +$as_echo "${lt_sysroot:-no}" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5 +$as_echo_n "checking for a working dd... " >&6; } +if ${ac_cv_path_lt_DD+:} false; then : + $as_echo_n "(cached) " >&6 +else + printf 0123456789abcdef0123456789abcdef >conftest.i +cat conftest.i conftest.i >conftest2.i +: ${lt_DD:=$DD} +if test -z "$lt_DD"; then + ac_path_lt_DD_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in dd; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_lt_DD="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_lt_DD" || continue +if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then + cmp -s conftest.i conftest.out \ + && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: +fi + $ac_path_lt_DD_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_lt_DD"; then + : + fi +else + ac_cv_path_lt_DD=$lt_DD +fi + +rm -f conftest.i conftest2.i conftest.out +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5 +$as_echo "$ac_cv_path_lt_DD" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5 +$as_echo_n "checking how to truncate binary pipes... " >&6; } +if ${lt_cv_truncate_bin+:} false; then : + $as_echo_n "(cached) " >&6 +else + printf 0123456789abcdef0123456789abcdef >conftest.i +cat conftest.i conftest.i >conftest2.i +lt_cv_truncate_bin= +if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then + cmp -s conftest.i conftest.out \ + && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" +fi +rm -f conftest.i conftest2.i conftest.out +test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5 +$as_echo "$lt_cv_truncate_bin" >&6; } + + + + + + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# Check whether --enable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then : + enableval=$enable_libtool_lock; +fi + +test no = "$enable_libtool_lock" || enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out what ABI is being produced by ac_compile, and set mode + # options accordingly. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE=32 + ;; + *ELF-64*) + HPUX_IA64_MODE=64 + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + if test yes = "$lt_cv_prog_gnu_ld"; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +mips64*-*linux*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + emul=elf + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + emul="${emul}32" + ;; + *64-bit*) + emul="${emul}64" + ;; + esac + case `/usr/bin/file conftest.$ac_objext` in + *MSB*) + emul="${emul}btsmip" + ;; + *LSB*) + emul="${emul}ltsmip" + ;; + esac + case `/usr/bin/file conftest.$ac_objext` in + *N32*) + emul="${emul}n32" + ;; + esac + LD="${LD-ld} -m $emul" + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. Note that the listed cases only cover the + # situations where additional linker options are needed (such as when + # doing 32-bit compilation for a host where ld defaults to 64-bit, or + # vice versa); the common cases where no linker options are needed do + # not appear in the list. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + case `/usr/bin/file conftest.o` in + *x86-64*) + LD="${LD-ld} -m elf32_x86_64" + ;; + *) + LD="${LD-ld} -m elf_i386" + ;; + esac + ;; + powerpc64le-*linux*) + LD="${LD-ld} -m elf32lppclinux" + ;; + powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + powerpcle-*linux*) + LD="${LD-ld} -m elf64lppc" + ;; + powerpc-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS=$CFLAGS + CFLAGS="$CFLAGS -belf" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 +$as_echo_n "checking whether the C compiler needs -belf... " >&6; } +if ${lt_cv_cc_needs_belf+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_cc_needs_belf=yes +else + lt_cv_cc_needs_belf=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 +$as_echo "$lt_cv_cc_needs_belf" >&6; } + if test yes != "$lt_cv_cc_needs_belf"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS=$SAVE_CFLAGS + fi + ;; +*-*solaris*) + # Find out what ABI is being produced by ac_compile, and set linker + # options accordingly. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) + case $host in + i?86-*-solaris*|x86_64-*-solaris*) + LD="${LD-ld} -m elf_x86_64" + ;; + sparc*-*-solaris*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + # GNU ld 2.21 introduced _sol2 emulations. Use them if available. + if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then + LD=${LD-ld}_sol2 + fi + ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks=$enable_libtool_lock + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. +set dummy ${ac_tool_prefix}mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$MANIFEST_TOOL"; then + ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL +if test -n "$MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 +$as_echo "$MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_MANIFEST_TOOL"; then + ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL + # Extract the first word of "mt", so it can be a program name with args. +set dummy mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_MANIFEST_TOOL"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL +if test -n "$ac_ct_MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 +$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_MANIFEST_TOOL" = x; then + MANIFEST_TOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL + fi +else + MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" +fi + +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 +$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } +if ${lt_cv_path_mainfest_tool+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&5 + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 +$as_echo "$lt_cv_path_mainfest_tool" >&6; } +if test yes != "$lt_cv_path_mainfest_tool"; then + MANIFEST_TOOL=: +fi + + + + + + + case $host_os in + rhapsody* | darwin*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. +set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DSYMUTIL"; then + ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DSYMUTIL=$ac_cv_prog_DSYMUTIL +if test -n "$DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 +$as_echo "$DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DSYMUTIL"; then + ac_ct_DSYMUTIL=$DSYMUTIL + # Extract the first word of "dsymutil", so it can be a program name with args. +set dummy dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DSYMUTIL"; then + ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL +if test -n "$ac_ct_DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 +$as_echo "$ac_ct_DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DSYMUTIL" = x; then + DSYMUTIL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DSYMUTIL=$ac_ct_DSYMUTIL + fi +else + DSYMUTIL="$ac_cv_prog_DSYMUTIL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. +set dummy ${ac_tool_prefix}nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NMEDIT"; then + ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +NMEDIT=$ac_cv_prog_NMEDIT +if test -n "$NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 +$as_echo "$NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_NMEDIT"; then + ac_ct_NMEDIT=$NMEDIT + # Extract the first word of "nmedit", so it can be a program name with args. +set dummy nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_NMEDIT"; then + ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_NMEDIT="nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT +if test -n "$ac_ct_NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 +$as_echo "$ac_ct_NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_NMEDIT" = x; then + NMEDIT=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + NMEDIT=$ac_ct_NMEDIT + fi +else + NMEDIT="$ac_cv_prog_NMEDIT" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. +set dummy ${ac_tool_prefix}lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$LIPO"; then + ac_cv_prog_LIPO="$LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_LIPO="${ac_tool_prefix}lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +LIPO=$ac_cv_prog_LIPO +if test -n "$LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 +$as_echo "$LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_LIPO"; then + ac_ct_LIPO=$LIPO + # Extract the first word of "lipo", so it can be a program name with args. +set dummy lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_LIPO"; then + ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_LIPO="lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO +if test -n "$ac_ct_LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 +$as_echo "$ac_ct_LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_LIPO" = x; then + LIPO=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + LIPO=$ac_ct_LIPO + fi +else + LIPO="$ac_cv_prog_LIPO" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL"; then + ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL="${ac_tool_prefix}otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL=$ac_cv_prog_OTOOL +if test -n "$OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 +$as_echo "$OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL"; then + ac_ct_OTOOL=$OTOOL + # Extract the first word of "otool", so it can be a program name with args. +set dummy otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL"; then + ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL="otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL +if test -n "$ac_ct_OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 +$as_echo "$ac_ct_OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL" = x; then + OTOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL=$ac_ct_OTOOL + fi +else + OTOOL="$ac_cv_prog_OTOOL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL64"; then + ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL64=$ac_cv_prog_OTOOL64 +if test -n "$OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 +$as_echo "$OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL64"; then + ac_ct_OTOOL64=$OTOOL64 + # Extract the first word of "otool64", so it can be a program name with args. +set dummy otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL64"; then + ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_OTOOL64="otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 +if test -n "$ac_ct_OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 +$as_echo "$ac_ct_OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL64" = x; then + OTOOL64=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL64=$ac_ct_OTOOL64 + fi +else + OTOOL64="$ac_cv_prog_OTOOL64" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 +$as_echo_n "checking for -single_module linker flag... " >&6; } +if ${lt_cv_apple_cc_single_mod+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_apple_cc_single_mod=no + if test -z "$LT_MULTI_MODULE"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + # If there is a non-empty error log, and "single_module" + # appears in it, assume the flag caused a linker warning + if test -s conftest.err && $GREP single_module conftest.err; then + cat conftest.err >&5 + # Otherwise, if the output was created with a 0 exit code from + # the compiler, it worked. + elif test -f libconftest.dylib && test 0 = "$_lt_result"; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&5 + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 +$as_echo "$lt_cv_apple_cc_single_mod" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 +$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } +if ${lt_cv_ld_exported_symbols_list+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_ld_exported_symbols_list=yes +else + lt_cv_ld_exported_symbols_list=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 +$as_echo "$lt_cv_ld_exported_symbols_list" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 +$as_echo_n "checking for -force_load linker flag... " >&6; } +if ${lt_cv_ld_force_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cr libconftest.a conftest.o" >&5 + $AR cr libconftest.a conftest.o 2>&5 + echo "$RANLIB libconftest.a" >&5 + $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -s conftest.err && $GREP force_load conftest.err; then + cat conftest.err >&5 + elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&5 + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 +$as_echo "$lt_cv_ld_force_load" >&6; } + case $host_os in + rhapsody* | darwin1.[012]) + _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[912]*) + _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; + 10.[012][,.]*) + _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; + 10.*|11.*) + _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test yes = "$lt_cv_apple_cc_single_mod"; then + _lt_dar_single_mod='$single_module' + fi + if test yes = "$lt_cv_ld_exported_symbols_list"; then + _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' + fi + if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if ${ac_cv_prog_CPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +$as_echo "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + +for ac_header in dlfcn.h +do : + ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default +" +if test "x$ac_cv_header_dlfcn_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_DLFCN_H 1 +_ACEOF + +fi + +done + + + +func_stripname_cnf () +{ + case $2 in + .*) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%\\\\$2\$%%"`;; + *) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%$2\$%%"`;; + esac +} # func_stripname_cnf + + + + + +# Set options + + + + enable_dlopen=no + + + enable_win32_dll=no + + + + # Check whether --enable-static was given. +if test "${enable_static+set}" = set; then : + enableval=$enable_static; p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else + enable_static=yes +fi + + + + + + + + + + +# Check whether --with-pic was given. +if test "${with_pic+set}" = set; then : + withval=$with_pic; lt_p=${PACKAGE-default} + case $withval in + yes|no) pic_mode=$withval ;; + *) + pic_mode=default + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for lt_pkg in $withval; do + IFS=$lt_save_ifs + if test "X$lt_pkg" = "X$lt_p"; then + pic_mode=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else + pic_mode=default +fi + + + + + + + + + # Check whether --enable-fast-install was given. +if test "${enable_fast_install+set}" = set; then : + enableval=$enable_fast_install; p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, + for pkg in $enableval; do + IFS=$lt_save_ifs + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS=$lt_save_ifs + ;; + esac +else + enable_fast_install=yes +fi + + + + + + + + + shared_archive_member_spec= +case $host,$enable_shared in +power*-*-aix[5-9]*,yes) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5 +$as_echo_n "checking which variant of shared library versioning to provide... " >&6; } + +# Check whether --with-aix-soname was given. +if test "${with_aix_soname+set}" = set; then : + withval=$with_aix_soname; case $withval in + aix|svr4|both) + ;; + *) + as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5 + ;; + esac + lt_cv_with_aix_soname=$with_aix_soname +else + if ${lt_cv_with_aix_soname+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_with_aix_soname=aix +fi + + with_aix_soname=$lt_cv_with_aix_soname +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5 +$as_echo "$with_aix_soname" >&6; } + if test aix != "$with_aix_soname"; then + # For the AIX way of multilib, we name the shared archive member + # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', + # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. + # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, + # the AIX toolchain works better with OBJECT_MODE set (default 32). + if test 64 = "${OBJECT_MODE-32}"; then + shared_archive_member_spec=shr_64 + else + shared_archive_member_spec=shr + fi + fi + ;; +*) + with_aix_soname=aix + ;; +esac + + + + + + + + + + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS=$ltmain + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +test -z "$LN_S" && LN_S="ln -s" + + + + + + + + + + + + + + +if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 +$as_echo_n "checking for objdir... " >&6; } +if ${lt_cv_objdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 +$as_echo "$lt_cv_objdir" >&6; } +objdir=$lt_cv_objdir + + + + + +cat >>confdefs.h <<_ACEOF +#define LT_OBJDIR "$lt_cv_objdir/" +_ACEOF + + + + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a '.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld=$lt_cv_prog_gnu_ld + +old_CC=$CC +old_CFLAGS=$CFLAGS + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +func_cc_basename $compiler +cc_basename=$func_cc_basename_result + + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 +$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD=$MAGIC_CMD + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/${ac_tool_prefix}file"; then + lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD=$lt_cv_path_MAGIC_CMD + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS=$lt_save_ifs + MAGIC_CMD=$lt_save_MAGIC_CMD + ;; +esac +fi + +MAGIC_CMD=$lt_cv_path_MAGIC_CMD +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + + + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 +$as_echo_n "checking for file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD=$MAGIC_CMD + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/file"; then + lt_cv_path_MAGIC_CMD=$ac_dir/"file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD=$lt_cv_path_MAGIC_CMD + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS=$lt_save_ifs + MAGIC_CMD=$lt_save_MAGIC_CMD + ;; +esac +fi + +MAGIC_CMD=$lt_cv_path_MAGIC_CMD +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + else + MAGIC_CMD=: + fi +fi + + fi + ;; +esac + +# Use C for the default configuration in the libtool script + +lt_save_CC=$CC +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +objext=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + +lt_prog_compiler_no_builtin_flag= + +if test yes = "$GCC"; then + case $cc_basename in + nvcc*) + lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; + *) + lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; + esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } +if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } + +if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" +else + : +fi + +fi + + + + + + + lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= + + + if test yes = "$GCC"; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + lt_prog_compiler_pic='-fPIC' + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the '-m68020' flag to GCC prevents building anything better, + # like '-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic='-DDLL_EXPORT' + case $host_os in + os2*) + lt_prog_compiler_static='$wl-static' + ;; + esac + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + lt_prog_compiler_wl='-Xlinker ' + if test -n "$lt_prog_compiler_pic"; then + lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" + fi + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + case $cc_basename in + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + esac + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + case $host_os in + os2*) + lt_prog_compiler_static='$wl-static' + ;; + esac + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='$wl-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + # old Intel for x86_64, which still supported -KPIC. + ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-static' + ;; + # flang / f18. f95 an alias for gfortran or flang on Debian + flang* | f18* | f95*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + tcc*) + # Fabrice Bellard et al's Tiny C Compiler + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-qpic' + lt_prog_compiler_static='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='' + ;; + *Sun\ F* | *Sun*Fortran*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Qoption ld ' + ;; + *Sun\ C*) + # Sun C 5.9 + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Wl,' + ;; + *Intel*\ [CF]*Compiler*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + *Portland\ Group*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + esac + ;; + esac + ;; + + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + + rdos*) + lt_prog_compiler_static='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; + + uts4*) + lt_prog_compiler_pic='-pic' + lt_prog_compiler_static='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi + +case $host_os in + # For platforms that do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } +if ${lt_cv_prog_compiler_pic+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic=$lt_prog_compiler_pic +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 +$as_echo "$lt_cv_prog_compiler_pic" >&6; } +lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } +if ${lt_cv_prog_compiler_pic_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works" >&6; } + +if test yes = "$lt_cv_prog_compiler_pic_works"; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi + +fi + + + + + + + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if ${lt_cv_prog_compiler_static_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works=yes + fi + else + lt_cv_prog_compiler_static_works=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 +$as_echo "$lt_cv_prog_compiler_static_works" >&6; } + +if test yes = "$lt_cv_prog_compiler_static_works"; then + : +else + lt_prog_compiler_static= +fi + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + +hard_links=nottested +if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test no = "$hard_links"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + runpath_var= + allow_undefined_flag= + always_export_symbols=no + archive_cmds= + archive_expsym_cmds= + compiler_needs_object=no + enable_shared_with_static_runtimes=no + export_dynamic_flag_spec= + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + hardcode_automatic=no + hardcode_direct=no + hardcode_direct_absolute=no + hardcode_libdir_flag_spec= + hardcode_libdir_separator= + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + inherit_rpath=no + link_all_deplibs=unknown + module_cmds= + module_expsym_cmds= + old_archive_from_new_cmds= + old_archive_from_expsyms_cmds= + thread_safe_flag_spec= + whole_archive_flag_spec= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ' (' and ')$', so one must not match beginning or + # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', + # as well as any symbol that contains 'd'. + exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test yes != "$GCC"; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd* | bitrig*) + with_gnu_ld=no + ;; + linux* | k*bsd*-gnu | gnu*) + link_all_deplibs=no + ;; + esac + + ld_shlibs=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test yes = "$with_gnu_ld"; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; + *\ \(GNU\ Binutils\)\ [3-9]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test yes = "$lt_use_gnu_ld_interface"; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='$wl' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + export_dynamic_flag_spec='$wl--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v | $SED -e 's/(^)\+)\s\+//' 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[3-9]*) + # On AIX/PPC, the GNU linker is very broken + if test ia64 != "$host_cpu"; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + export_dynamic_flag_spec='$wl--export-all-symbols' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file, use it as + # is; otherwise, prepend EXPORTS... + archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; + + haiku*) + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + link_all_deplibs=yes + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + shrext_cmds=.dll + archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + enable_shared_with_static_runtimes=yes + ;; + + interix[3-9]*) + hardcode_direct=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + export_dynamic_flag_spec='$wl-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test linux-dietlibc = "$host_os"; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test no = "$tmp_diet" + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + whole_archive_flag_spec= + tmp_sharedflag='--shared' ;; + nagfor*) # NAGFOR 5.3 + tmp_sharedflag='-Wl,-shared' ;; + xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + compiler_needs_object=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + compiler_needs_object=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + + if test yes = "$supports_anon_versioning"; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + tcc*) + export_dynamic_flag_spec='-rdynamic' + ;; + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test yes = "$supports_anon_versioning"; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + ld_shlibs=no + fi + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + + if test no = "$ld_shlibs"; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + + aix[4-9]*) + if test ia64 = "$host_cpu"; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag= + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to GNU nm, but means don't demangle to AIX nm. + # Without the "-l" option, or with the "-B" option, AIX nm treats + # weak defined symbols like other global defined symbols, whereas + # GNU nm marks them as "W". + # While the 'weak' keyword is ignored in the Export File, we need + # it in the Import File for the 'aix-soname' feature, so we have + # to replace the "-B" option with "-P" for AIX nm. + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # have runtime linking enabled, and use it for executables. + # For shared libraries, we enable/disable runtime linking + # depending on the kind of the shared library created - + # when "with_aix_soname,aix_use_runtimelinking" is: + # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables + # "aix,yes" lib.so shared, rtl:yes, for executables + # lib.a static archive + # "both,no" lib.so.V(shr.o) shared, rtl:yes + # lib.a(lib.so.V) shared, rtl:no, for executables + # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a(lib.so.V) shared, rtl:no + # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a static archive + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then + aix_use_runtimelinking=yes + break + fi + done + if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then + # With aix-soname=svr4, we create the lib.so.V shared archives only, + # so we don't have lib.a shared libs to link our executables. + # We have to force runtime linking in this case. + aix_use_runtimelinking=yes + LDFLAGS="$LDFLAGS -Wl,-brtl" + fi + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds='' + hardcode_direct=yes + hardcode_direct_absolute=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + file_list_spec='$wl-f,' + case $with_aix_soname,$aix_use_runtimelinking in + aix,*) ;; # traditional, no import file + svr4,* | *,yes) # use import file + # The Import File defines what to hardcode. + hardcode_direct=no + hardcode_direct_absolute=no + ;; + esac + + if test yes = "$GCC"; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`$CC -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + shared_flag='-shared' + if test yes = "$aix_use_runtimelinking"; then + shared_flag="$shared_flag "'$wl-G' + fi + # Need to ensure runtime linking is disabled for the traditional + # shared library, or the linker may eventually find shared libraries + # /with/ Import File - we do not want to mix them. + shared_flag_aix='-shared' + shared_flag_svr4='-shared $wl-G' + else + # not using gcc + if test ia64 = "$host_cpu"; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test yes = "$aix_use_runtimelinking"; then + shared_flag='$wl-G' + else + shared_flag='$wl-bM:SRE' + fi + shared_flag_aix='$wl-bM:SRE' + shared_flag_svr4='$wl-G' + fi + fi + + export_dynamic_flag_spec='$wl-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag + else + if test ia64 = "$host_cpu"; then + hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' $wl-bernotok' + allow_undefined_flag=' $wl-berok' + if test yes = "$with_gnu_ld"; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + fi + archive_cmds_need_lc=yes + archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' + # -brtl affects multiple linker settings, -berok does not and is overridden later + compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' + if test svr4 != "$with_aix_soname"; then + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' + fi + if test aix != "$with_aix_soname"; then + archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' + else + # used by -dlpreopen to get the symbols + archive_expsym_cmds="$archive_expsym_cmds"'~$MV $output_objdir/$realname.d/$soname $output_objdir' + fi + archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + always_export_symbols=yes + file_list_spec='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' + archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp "$export_symbols" "$output_objdir/$soname.def"; + echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; + else + $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, )='true' + enable_shared_with_static_runtimes=yes + exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + old_postinstall_cmds='chmod 644 $oldlib' + postlink_cmds='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile=$lt_outputfile.exe + lt_tool_outputfile=$lt_tool_outputfile.exe + ;; + esac~ + if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_from_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' + enable_shared_with_static_runtimes=yes + ;; + esac + ;; + + darwin* | rhapsody*) + + + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + if test yes = "$lt_cv_ld_force_load"; then + whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec='' + fi + link_all_deplibs=yes + allow_undefined_flag=$_lt_dar_allow_undefined + case $cc_basename in + ifort*|nagfor*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test yes = "$_lt_dar_can_shared"; then + output_verbose_link_cmd=func_echo_all + archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" + module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" + archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" + module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" + + else + ld_shlibs=no + fi + + ;; + + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2.*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) + if test yes = "$GCC"; then + archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='$wl-E' + ;; + + hpux10*) + if test yes,no = "$GCC,$with_gnu_ld"; then + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test no = "$with_gnu_ld"; then + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='$wl-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; + + hpux11*) + if test yes,no = "$GCC,$with_gnu_ld"; then + case $host_cpu in + hppa*64*) + archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 +$as_echo_n "checking if $CC understands -b... " >&6; } +if ${lt_cv_prog_compiler__b+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler__b=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS -b" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler__b=yes + fi + else + lt_cv_prog_compiler__b=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 +$as_echo "$lt_cv_prog_compiler__b" >&6; } + +if test yes = "$lt_cv_prog_compiler__b"; then + archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' +else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' +fi + + ;; + esac + fi + if test no = "$with_gnu_ld"; then + hardcode_libdir_flag_spec='$wl+b $wl$libdir' + hardcode_libdir_separator=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + *) + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='$wl-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test yes = "$GCC"; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 +$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } +if ${lt_cv_irix_exported_symbol+:} false; then : + $as_echo_n "(cached) " >&6 +else + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int foo (void) { return 0; } +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_irix_exported_symbol=yes +else + lt_cv_irix_exported_symbol=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 +$as_echo "$lt_cv_irix_exported_symbol" >&6; } + if test yes = "$lt_cv_irix_exported_symbol"; then + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' + fi + link_all_deplibs=no + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + inherit_rpath=yes + link_all_deplibs=yes + ;; + + linux*) + case $cc_basename in + tcc*) + # Fabrice Bellard et al's Tiny C Compiler + ld_shlibs=yes + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; + + *nto* | *qnx*) + ;; + + openbsd* | bitrig*) + if test -f /usr/libexec/ld.so; then + hardcode_direct=yes + hardcode_shlibpath_var=no + hardcode_direct_absolute=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + export_dynamic_flag_spec='$wl-E' + else + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='$wl-rpath,$libdir' + fi + else + ld_shlibs=no + fi + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + shrext_cmds=.dll + archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + enable_shared_with_static_runtimes=yes + ;; + + osf3*) + if test yes = "$GCC"; then + allow_undefined_flag=' $wl-expect_unresolved $wl\*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test yes = "$GCC"; then + allow_undefined_flag=' $wl-expect_unresolved $wl\*' + archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + archive_cmds_need_lc='no' + hardcode_libdir_separator=: + ;; + + solaris*) + no_undefined_flag=' -z defs' + if test yes = "$GCC"; then + wlarc='$wl' + archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='$wl' + archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands '-z linker_flag'. GCC discards it without '$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test yes = "$GCC"; then + whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' + else + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' + fi + ;; + esac + link_all_deplibs=yes + ;; + + sunos4*) + if test sequent = "$host_vendor"; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag='$wl-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + + if test yes = "$GCC"; then + archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We CANNOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag='$wl-z,text' + allow_undefined_flag='$wl-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='$wl-R,$libdir' + hardcode_libdir_separator=':' + link_all_deplibs=yes + export_dynamic_flag_spec='$wl-Bexport' + runpath_var='LD_RUN_PATH' + + if test yes = "$GCC"; then + archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + *) + ld_shlibs=no + ;; + esac + + if test sni = "$host_vendor"; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + export_dynamic_flag_spec='$wl-Blargedynsym' + ;; + esac + fi + fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 +$as_echo "$ld_shlibs" >&6; } +test no = "$ld_shlibs" && can_build_shared=no + +with_gnu_ld=$with_gnu_ld + + + + + + + + + + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test yes,yes = "$GCC,$enable_shared"; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } +if ${lt_cv_archive_cmds_need_lc+:} false; then : + $as_echo_n "(cached) " >&6 +else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + pic_flag=$lt_prog_compiler_pic + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc=no + else + lt_cv_archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 +$as_echo "$lt_cv_archive_cmds_need_lc" >&6; } + archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +if test yes = "$GCC"; then + case $host_os in + darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; + *) lt_awk_arg='/^libraries:/' ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;; + *) lt_sed_strip_eq='s|=/|/|g' ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary... + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + # ...but if some path component already ends with the multilib dir we assume + # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). + case "$lt_multi_os_dir; $lt_search_path_spec " in + "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) + lt_multi_os_dir= + ;; + esac + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" + elif test -n "$lt_multi_os_dir"; then + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS = " "; FS = "/|\n";} { + lt_foo = ""; + lt_count = 0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo = "/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[lt_foo]++; } + if (lt_freq[lt_foo] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's|/\([A-Za-z]:\)|\1|g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=.so +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + + + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='$libname$release$shared_ext$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test ia64 = "$host_cpu"; then + # AIX 5 supports IA64 + library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line '#! .'. This would cause the generated library to + # depend on '.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # Using Import Files as archive members, it is possible to support + # filename-based versioning of shared library archives on AIX. While + # this would work for both with and without runtime linking, it will + # prevent static linking of such archives. So we do filename-based + # shared library versioning with .so extension only, which is used + # when both runtime linking and shared linking is enabled. + # Unfortunately, runtime linking may impact performance, so we do + # not want this to be the default eventually. Also, we use the + # versioned .so libs for executables only if there is the -brtl + # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. + # To allow for filename-based versioning support, we need to create + # libNAME.so.V as an archive file, containing: + # *) an Import File, referring to the versioned filename of the + # archive as well as the shared archive member, telling the + # bitwidth (32 or 64) of that shared object, and providing the + # list of exported symbols of that shared object, eventually + # decorated with the 'weak' keyword + # *) the shared object with the F_LOADONLY flag set, to really avoid + # it being seen by the linker. + # At run time we better use the real file rather than another symlink, + # but for link time we create the symlink libNAME.so -> libNAME.so.V + + case $with_aix_soname,$aix_use_runtimelinking in + # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + aix,yes) # traditional libtool + dynamic_linker='AIX unversionable lib.so' + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + aix,no) # traditional AIX only + dynamic_linker='AIX lib.a(lib.so.V)' + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + ;; + svr4,*) # full svr4 only + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,yes) # both, prefer svr4 + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # unpreferred sharedlib libNAME.a needs extra handling + postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' + postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,no) # both, prefer aix + dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling + postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' + postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' + ;; + esac + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='$libname$shared_ext' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + library_names_spec='$libname.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec=$LIB + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' + soname_spec='$libname$release$major$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=no + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + if test 32 = "$HPUX_IA64_MODE"; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + sys_lib_dlsearch_path_spec=/usr/lib/hpux32 + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + sys_lib_dlsearch_path_spec=/usr/lib/hpux64 + fi + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test yes = "$lt_cv_prog_gnu_ld"; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" + sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +linux*android*) + version_type=none # Android doesn't support versioned libraries. + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext' + soname_spec='$libname$release$shared_ext' + finish_cmds= + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + dynamic_linker='Android linker' + # Don't embed -rpath directories since the linker doesn't support them. + hardcode_libdir_flag_spec='-L$libdir' + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if ${lt_cv_shlibpath_overrides_runpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Ideally, we could use ldconfig to report *all* directores which are + # searched for libraries, however this is still not possible. Aside from not + # being certain /sbin/ldconfig is available, command + # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, + # even though it is searched at run-time. Try to do the best guess by + # appending ld.so.conf contents (and includes) to the search path. + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd* | bitrig*) + version_type=sunos + sys_lib_dlsearch_path_spec=/usr/lib + need_lib_prefix=no + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + need_version=no + else + need_version=yes + fi + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +os2*) + libname_spec='$name' + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + # OS/2 can only load a DLL with a base name of 8 characters or less. + soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; + v=$($ECHO $release$versuffix | tr -d .-); + n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); + $ECHO $n$v`$shared_ext' + library_names_spec='${libname}_dll.$libext' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=BEGINLIBPATH + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test yes = "$with_gnu_ld"; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec; then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' + soname_spec='$libname$shared_ext.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=sco + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test yes = "$with_gnu_ld"; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } +test no = "$dynamic_linker" && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test yes = "$GCC"; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then + sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec +fi + +if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then + sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec +fi + +# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... +configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec + +# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code +func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" + +# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool +configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || + test -n "$runpath_var" || + test yes = "$hardcode_automatic"; then + + # We can hardcode non-existent directories. + if test no != "$hardcode_direct" && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" && + test no != "$hardcode_minus_L"; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 +$as_echo "$hardcode_action" >&6; } + +if test relink = "$hardcode_action" || + test yes = "$inherit_rpath"; then + # Fast installation is not supported + enable_fast_install=no +elif test yes = "$shlibpath_overrides_runpath" || + test no = "$enable_shared"; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + if test yes != "$enable_dlopen"; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen=load_add_on + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen=LoadLibrary + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen=dlopen + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl +else + + lt_cv_dlopen=dyld + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + tpf*) + # Don't try to run any link tests for TPF. We know it's impossible + # because TPF is a cross-compiler, and we know how we open DSOs. + lt_cv_dlopen=dlopen + lt_cv_dlopen_libs= + lt_cv_dlopen_self=no + ;; + + *) + ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" +if test "x$ac_cv_func_shl_load" = xyes; then : + lt_cv_dlopen=shl_load +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 +$as_echo_n "checking for shl_load in -ldld... " >&6; } +if ${ac_cv_lib_dld_shl_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); +int +main () +{ +return shl_load (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_shl_load=yes +else + ac_cv_lib_dld_shl_load=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 +$as_echo "$ac_cv_lib_dld_shl_load" >&6; } +if test "x$ac_cv_lib_dld_shl_load" = xyes; then : + lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld +else + ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" +if test "x$ac_cv_func_dlopen" = xyes; then : + lt_cv_dlopen=dlopen +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 +$as_echo_n "checking for dlopen in -lsvld... " >&6; } +if ${ac_cv_lib_svld_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_svld_dlopen=yes +else + ac_cv_lib_svld_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 +$as_echo "$ac_cv_lib_svld_dlopen" >&6; } +if test "x$ac_cv_lib_svld_dlopen" = xyes; then : + lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 +$as_echo_n "checking for dld_link in -ldld... " >&6; } +if ${ac_cv_lib_dld_dld_link+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dld_link (); +int +main () +{ +return dld_link (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_dld_link=yes +else + ac_cv_lib_dld_dld_link=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 +$as_echo "$ac_cv_lib_dld_dld_link" >&6; } +if test "x$ac_cv_lib_dld_dld_link" = xyes; then : + lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test no = "$lt_cv_dlopen"; then + enable_dlopen=no + else + enable_dlopen=yes + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS=$CPPFLAGS + test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS=$LDFLAGS + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS=$LIBS + LIBS="$lt_cv_dlopen_libs $LIBS" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 +$as_echo_n "checking whether a program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test yes = "$cross_compiling"; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisibility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 +$as_echo "$lt_cv_dlopen_self" >&6; } + + if test yes = "$lt_cv_dlopen_self"; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 +$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self_static+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test yes = "$cross_compiling"; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisibility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 +$as_echo "$lt_cv_dlopen_self_static" >&6; } + fi + + CPPFLAGS=$save_CPPFLAGS + LDFLAGS=$save_LDFLAGS + LIBS=$save_LIBS + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + + + + + + + + + + + + + + + + +striplib= +old_striplib= +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 +$as_echo_n "checking whether stripping libraries is possible... " >&6; } +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP"; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + ;; + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + ;; + esac +fi + + + + + + + + + + + + + # Report what library types will actually be built + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 +$as_echo_n "checking if libtool supports shared libraries... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 +$as_echo "$can_build_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 +$as_echo_n "checking whether to build shared libraries... " >&6; } + test no = "$can_build_shared" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test yes = "$enable_shared" && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[4-9]*) + if test ia64 != "$host_cpu"; then + case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in + yes,aix,yes) ;; # shared object as lib.so file only + yes,svr4,*) ;; # shared object as lib.so archive member only + yes,*) enable_static=no ;; # shared object in lib.a archive as well + esac + fi + ;; + esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 +$as_echo "$enable_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 +$as_echo_n "checking whether to build static libraries... " >&6; } + # Make sure either enable_shared or enable_static is yes. + test yes = "$enable_shared" || enable_static=yes + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 +$as_echo "$enable_static" >&6; } + + + + +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +CC=$lt_save_CC + + if test -n "$CXX" && ( test no != "$CXX" && + ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || + (test g++ != "$CXX"))); then + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 +$as_echo_n "checking how to run the C++ preprocessor... " >&6; } +if test -z "$CXXCPP"; then + if ${ac_cv_prog_CXXCPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 +$as_echo "$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +else + _lt_caught_CXX_error=yes +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +archive_cmds_need_lc_CXX=no +allow_undefined_flag_CXX= +always_export_symbols_CXX=no +archive_expsym_cmds_CXX= +compiler_needs_object_CXX=no +export_dynamic_flag_spec_CXX= +hardcode_direct_CXX=no +hardcode_direct_absolute_CXX=no +hardcode_libdir_flag_spec_CXX= +hardcode_libdir_separator_CXX= +hardcode_minus_L_CXX=no +hardcode_shlibpath_var_CXX=unsupported +hardcode_automatic_CXX=no +inherit_rpath_CXX=no +module_cmds_CXX= +module_expsym_cmds_CXX= +link_all_deplibs_CXX=unknown +old_archive_cmds_CXX=$old_archive_cmds +reload_flag_CXX=$reload_flag +reload_cmds_CXX=$reload_cmds +no_undefined_flag_CXX= +whole_archive_flag_spec_CXX= +enable_shared_with_static_runtimes_CXX=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +objext_CXX=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test yes != "$_lt_caught_CXX_error"; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + + # save warnings/boilerplate of simple test code + ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + + ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + CFLAGS=$CXXFLAGS + compiler=$CC + compiler_CXX=$CC + func_cc_basename $compiler +cc_basename=$func_cc_basename_result + + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test yes = "$GXX"; then + lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' + else + lt_prog_compiler_no_builtin_flag_CXX= + fi + + if test yes = "$GXX"; then + # Set up default GNU C++ configuration + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test yes = "$GCC"; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return, which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD=$ac_prog + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test yes = "$with_gnu_ld"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS=$lt_save_ifs + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD=$ac_dir/$ac_prog + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test yes = "$with_gnu_ld"; then + archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='$wl' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + else + whole_archive_flag_spec_CXX= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + ld_shlibs_CXX=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aix[4-9]*) + if test ia64 = "$host_cpu"; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag= + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # have runtime linking enabled, and use it for executables. + # For shared libraries, we enable/disable runtime linking + # depending on the kind of the shared library created - + # when "with_aix_soname,aix_use_runtimelinking" is: + # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables + # "aix,yes" lib.so shared, rtl:yes, for executables + # lib.a static archive + # "both,no" lib.so.V(shr.o) shared, rtl:yes + # lib.a(lib.so.V) shared, rtl:no, for executables + # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a(lib.so.V) shared, rtl:no + # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables + # lib.a static archive + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then + # With aix-soname=svr4, we create the lib.so.V shared archives only, + # so we don't have lib.a shared libs to link our executables. + # We have to force runtime linking in this case. + aix_use_runtimelinking=yes + LDFLAGS="$LDFLAGS -Wl,-brtl" + fi + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_CXX='' + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + file_list_spec_CXX='$wl-f,' + case $with_aix_soname,$aix_use_runtimelinking in + aix,*) ;; # no import file + svr4,* | *,yes) # use import file + # The Import File defines what to hardcode. + hardcode_direct_CXX=no + hardcode_direct_absolute_CXX=no + ;; + esac + + if test yes = "$GXX"; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`$CC -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct_CXX=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_CXX=yes + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_libdir_separator_CXX= + fi + esac + shared_flag='-shared' + if test yes = "$aix_use_runtimelinking"; then + shared_flag=$shared_flag' $wl-G' + fi + # Need to ensure runtime linking is disabled for the traditional + # shared library, or the linker may eventually find shared libraries + # /with/ Import File - we do not want to mix them. + shared_flag_aix='-shared' + shared_flag_svr4='-shared $wl-G' + else + # not using gcc + if test ia64 = "$host_cpu"; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test yes = "$aix_use_runtimelinking"; then + shared_flag='$wl-G' + else + shared_flag='$wl-bM:SRE' + fi + shared_flag_aix='$wl-bM:SRE' + shared_flag_svr4='$wl-G' + fi + fi + + export_dynamic_flag_spec_CXX='$wl-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + always_export_symbols_CXX=yes + if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + # The "-G" linker flag allows undefined symbols. + no_undefined_flag_CXX='-bernotok' + # Determine the default libpath from the value encoded in an empty + # executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath__CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" + + archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag + else + if test ia64 = "$host_cpu"; then + hardcode_libdir_flag_spec_CXX='$wl-R $libdir:/usr/lib:/lib' + allow_undefined_flag_CXX="-z nodefs" + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test set = "${lt_cv_aix_libpath+set}"; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath__CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=/usr/lib:/lib + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_CXX=' $wl-bernotok' + allow_undefined_flag_CXX=' $wl-berok' + if test yes = "$with_gnu_ld"; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_CXX='$convenience' + fi + archive_cmds_need_lc_CXX=yes + archive_expsym_cmds_CXX='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' + # -brtl affects multiple linker settings, -berok does not and is overridden later + compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' + if test svr4 != "$with_aix_soname"; then + # This is similar to how AIX traditionally builds its shared + # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' + fi + if test aix != "$with_aix_soname"; then + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' + else + # used by -dlpreopen to get the symbols + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$MV $output_objdir/$realname.d/$soname $output_objdir' + fi + archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$RM -r $output_objdir/$realname.d' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_CXX=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + else + ld_shlibs_CXX=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + case $GXX,$cc_basename in + ,cl* | no,cl*) + # Native MSVC + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_CXX=' ' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=yes + file_list_spec_CXX='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=.dll + # FIXME: Setting linknames here is a bad hack. + archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' + archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp "$export_symbols" "$output_objdir/$soname.def"; + echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; + else + $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' + enable_shared_with_static_runtimes_CXX=yes + # Don't use ranlib + old_postinstall_cmds_CXX='chmod 644 $oldlib' + postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile=$lt_outputfile.exe + lt_tool_outputfile=$lt_tool_outputfile.exe + ;; + esac~ + func_to_tool_file "$lt_outputfile"~ + if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # g++ + # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_CXX='-L$libdir' + export_dynamic_flag_spec_CXX='$wl--export-all-symbols' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=no + enable_shared_with_static_runtimes_CXX=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file, use it as + # is; otherwise, prepend EXPORTS... + archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_CXX=no + fi + ;; + esac + ;; + darwin* | rhapsody*) + + + archive_cmds_need_lc_CXX=no + hardcode_direct_CXX=no + hardcode_automatic_CXX=yes + hardcode_shlibpath_var_CXX=unsupported + if test yes = "$lt_cv_ld_force_load"; then + whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec_CXX='' + fi + link_all_deplibs_CXX=yes + allow_undefined_flag_CXX=$_lt_dar_allow_undefined + case $cc_basename in + ifort*|nagfor*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test yes = "$_lt_dar_can_shared"; then + output_verbose_link_cmd=func_echo_all + archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" + module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" + archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" + module_expsym_cmds_CXX="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" + if test yes != "$lt_cv_apple_cc_single_mod"; then + archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" + archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" + fi + + else + ld_shlibs_CXX=no + fi + + ;; + + os2*) + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_minus_L_CXX=yes + allow_undefined_flag_CXX=unsupported + shrext_cmds=.dll + archive_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + archive_expsym_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ + $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ + $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ + $ECHO EXPORTS >> $output_objdir/$libname.def~ + prefix_cmds="$SED"~ + if test EXPORTS = "`$SED 1q $export_symbols`"; then + prefix_cmds="$prefix_cmds -e 1d"; + fi~ + prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ + cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ + $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ + emximp -o $lib $output_objdir/$libname.def' + old_archive_From_new_cmds_CXX='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' + enable_shared_with_static_runtimes_CXX=yes + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + freebsd2.*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + ld_shlibs_CXX=no + ;; + + freebsd-elf*) + archive_cmds_need_lc_CXX=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + ld_shlibs_CXX=yes + ;; + + haiku*) + archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + link_all_deplibs_CXX=yes + ;; + + hpux9*) + hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' + hardcode_libdir_separator_CXX=: + export_dynamic_flag_spec_CXX='$wl-E' + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP " \-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes = "$GXX"; then + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test no = "$with_gnu_ld"; then + hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' + hardcode_libdir_separator_CXX=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + export_dynamic_flag_spec_CXX='$wl-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + ;; + *) + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP " \-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes = "$GXX"; then + if test no = "$with_gnu_ld"; then + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + interix[3-9]*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + export_dynamic_flag_spec_CXX='$wl-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_CXX='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test yes = "$GXX"; then + if test no = "$with_gnu_ld"; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + else + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' + fi + fi + link_all_deplibs_CXX=yes + ;; + esac + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + hardcode_libdir_separator_CXX=: + inherit_rpath_CXX=yes + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [1-5].* | *pgcpp\ [1-5].*) + prelink_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + old_archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='$wl--rpath $wl$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + whole_archive_flag_spec_CXX='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + ;; + cxx*) + # Compaq C++ + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + export_dynamic_flag_spec_CXX='$wl--export-dynamic' + archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' + if test yes = "$supports_anon_versioning"; then + archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' + hardcode_libdir_flag_spec_CXX='-R$libdir' + whole_archive_flag_spec_CXX='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' + compiler_needs_object_CXX=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + ld_shlibs_CXX=yes + ;; + + openbsd* | bitrig*) + if test -f /usr/libexec/ld.so; then + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + hardcode_direct_absolute_CXX=yes + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' + export_dynamic_flag_spec_CXX='$wl-E' + whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else + ld_shlibs_CXX=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + case $host in + osf3*) + allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' + archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + ;; + *) + allow_undefined_flag_CXX=' -expect_unresolved \*' + archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' + archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ + $RM $lib.exp' + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + ;; + esac + + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test yes,no = "$GXX,$with_gnu_ld"; then + allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' + case $host in + osf3*) + archive_cmds_CXX='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + ;; + *) + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + psos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + archive_cmds_need_lc_CXX=yes + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_shlibpath_var_CXX=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands '-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' + ;; + esac + link_all_deplibs_CXX=yes + + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test yes,no = "$GXX,$with_gnu_ld"; then + no_undefined_flag_CXX=' $wl-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + else + # g++ 2.7 appears to require '-G' NOT '-shared' on this + # platform. + archive_cmds_CXX='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' + fi + + hardcode_libdir_flag_spec_CXX='$wl-R $wl$libdir' + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + whole_archive_flag_spec_CXX='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag_CXX='$wl-z,text' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We CANNOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag_CXX='$wl-z,text' + allow_undefined_flag_CXX='$wl-z,nodefs' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='$wl-R,$libdir' + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + export_dynamic_flag_spec_CXX='$wl-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ + '"$old_archive_cmds_CXX" + reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ + '"$reload_cmds_CXX" + ;; + *) + archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + vxworks*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +$as_echo "$ld_shlibs_CXX" >&6; } + test no = "$ld_shlibs_CXX" && can_build_shared=no + + GCC_CXX=$GXX + LD_CXX=$LD + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + # Dependencies to place before and after the object being linked: +predep_objects_CXX= +postdep_objects_CXX= +predeps_CXX= +postdeps_CXX= +compiler_lib_search_path_CXX= + +cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF + + +_lt_libdeps_save_CFLAGS=$CFLAGS +case "$CC $CFLAGS " in #( +*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +esac + +if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case $prev$p in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test x-L = "$p" || + test x-R = "$p"; then + prev=$p + continue + fi + + # Expand the sysroot to ease extracting the directories later. + if test -z "$prev"; then + case $p in + -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; + -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; + -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; + esac + fi + case $p in + =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; + esac + if test no = "$pre_test_object_deps_done"; then + case $prev in + -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$compiler_lib_search_path_CXX"; then + compiler_lib_search_path_CXX=$prev$p + else + compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} $prev$p" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$postdeps_CXX"; then + postdeps_CXX=$prev$p + else + postdeps_CXX="${postdeps_CXX} $prev$p" + fi + fi + prev= + ;; + + *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test no = "$pre_test_object_deps_done"; then + if test -z "$predep_objects_CXX"; then + predep_objects_CXX=$p + else + predep_objects_CXX="$predep_objects_CXX $p" + fi + else + if test -z "$postdep_objects_CXX"; then + postdep_objects_CXX=$p + else + postdep_objects_CXX="$postdep_objects_CXX $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling CXX test program" +fi + +$RM -f confest.$objext +CFLAGS=$_lt_libdeps_save_CFLAGS + +# PORTME: override above test on systems where it is broken +case $host_os in +interix[3-9]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + predep_objects_CXX= + postdep_objects_CXX= + postdeps_CXX= + ;; +esac + + +case " $postdeps_CXX " in +*" -lc "*) archive_cmds_need_lc_CXX=no ;; +esac + compiler_lib_search_dirs_CXX= +if test -n "${compiler_lib_search_path_CXX}"; then + compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | $SED -e 's! -L! !g' -e 's!^ !!'` +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lt_prog_compiler_wl_CXX= +lt_prog_compiler_pic_CXX= +lt_prog_compiler_static_CXX= + + + # C++ specific cases for pic, static, wl, etc. + if test yes = "$GXX"; then + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + fi + lt_prog_compiler_pic_CXX='-fPIC' + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic_CXX='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the '-m68020' flag to GCC prevents building anything better, + # like '-m68040'. + lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + case $host_os in + os2*) + lt_prog_compiler_static_CXX='$wl-static' + ;; + esac + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_CXX='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + lt_prog_compiler_pic_CXX= + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static_CXX= + ;; + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_CXX=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + else + case $host_os in + aix[4-9]*) + # All AIX code is PIC. + if test ia64 = "$host_cpu"; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + else + lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + dgux*) + case $cc_basename in + ec++*) + lt_prog_compiler_pic_CXX='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='$wl-a ${wl}archive' + if test ia64 != "$host_cpu"; then + lt_prog_compiler_pic_CXX='+Z' + fi + ;; + aCC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='$wl-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_CXX='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + case $cc_basename in + KCC*) + # KAI C++ Compiler + lt_prog_compiler_wl_CXX='--backend -Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64, which still supported -KPIC. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + lt_prog_compiler_static_CXX='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fpic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) + # IBM XL 8.0, 9.0 on PPC and BlueGene + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-qpic' + lt_prog_compiler_static_CXX='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + lt_prog_compiler_pic_CXX='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd* | netbsdelf*-gnu) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + lt_prog_compiler_wl_CXX='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + lt_prog_compiler_pic_CXX='-pic' + ;; + cxx*) + # Digital/Compaq C++ + lt_prog_compiler_wl_CXX='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + lt_prog_compiler_pic_CXX='-pic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + lcc*) + # Lucid + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + lt_prog_compiler_pic_CXX='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + lt_prog_compiler_can_build_shared_CXX=no + ;; + esac + fi + +case $host_os in + # For platforms that do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_CXX= + ;; + *) + lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } +if ${lt_cv_prog_compiler_pic_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } +lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } +if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works_CXX=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" ## exclude from sc_useless_quotes_in_assignment + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works_CXX=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } + +if test yes = "$lt_cv_prog_compiler_pic_works_CXX"; then + case $lt_prog_compiler_pic_CXX in + "" | " "*) ;; + *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; + esac +else + lt_prog_compiler_pic_CXX= + lt_prog_compiler_can_build_shared_CXX=no +fi + +fi + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if ${lt_cv_prog_compiler_static_works_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works_CXX=no + save_LDFLAGS=$LDFLAGS + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works_CXX=yes + fi + else + lt_cv_prog_compiler_static_works_CXX=yes + fi + fi + $RM -r conftest* + LDFLAGS=$save_LDFLAGS + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } + +if test yes = "$lt_cv_prog_compiler_static_works_CXX"; then + : +else + lt_prog_compiler_static_CXX= +fi + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + + +hard_links=nottested +if test no = "$lt_cv_prog_compiler_c_o_CXX" && test no != "$need_locks"; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test no = "$hard_links"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + case $host_os in + aix[4-9]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to GNU nm, but means don't demangle to AIX nm. + # Without the "-l" option, or with the "-B" option, AIX nm treats + # weak defined symbols like other global defined symbols, whereas + # GNU nm marks them as "W". + # While the 'weak' keyword is ignored in the Export File, we need + # it in the Import File for the 'aix-soname' feature, so we have + # to replace the "-B" option with "-P" for AIX nm. + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_CXX='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + export_symbols_cmds_CXX=$ltdll_cmds + ;; + cygwin* | mingw* | cegcc*) + case $cc_basename in + cl*) + exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + ;; + esac + ;; + linux* | k*bsd*-gnu | gnu*) + link_all_deplibs_CXX=no + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +$as_echo "$ld_shlibs_CXX" >&6; } +test no = "$ld_shlibs_CXX" && can_build_shared=no + +with_gnu_ld_CXX=$with_gnu_ld + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_CXX" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_CXX=yes + + if test yes,yes = "$GCC,$enable_shared"; then + case $archive_cmds_CXX in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } +if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_CXX + pic_flag=$lt_prog_compiler_pic_CXX + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_CXX + allow_undefined_flag_CXX= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc_CXX=no + else + lt_cv_archive_cmds_need_lc_CXX=yes + fi + allow_undefined_flag_CXX=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 +$as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; } + archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=.so +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + + + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='$libname$release$shared_ext$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test ia64 = "$host_cpu"; then + # AIX 5 supports IA64 + library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line '#! .'. This would cause the generated library to + # depend on '.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # Using Import Files as archive members, it is possible to support + # filename-based versioning of shared library archives on AIX. While + # this would work for both with and without runtime linking, it will + # prevent static linking of such archives. So we do filename-based + # shared library versioning with .so extension only, which is used + # when both runtime linking and shared linking is enabled. + # Unfortunately, runtime linking may impact performance, so we do + # not want this to be the default eventually. Also, we use the + # versioned .so libs for executables only if there is the -brtl + # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. + # To allow for filename-based versioning support, we need to create + # libNAME.so.V as an archive file, containing: + # *) an Import File, referring to the versioned filename of the + # archive as well as the shared archive member, telling the + # bitwidth (32 or 64) of that shared object, and providing the + # list of exported symbols of that shared object, eventually + # decorated with the 'weak' keyword + # *) the shared object with the F_LOADONLY flag set, to really avoid + # it being seen by the linker. + # At run time we better use the real file rather than another symlink, + # but for link time we create the symlink libNAME.so -> libNAME.so.V + + case $with_aix_soname,$aix_use_runtimelinking in + # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + aix,yes) # traditional libtool + dynamic_linker='AIX unversionable lib.so' + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + aix,no) # traditional AIX only + dynamic_linker='AIX lib.a(lib.so.V)' + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + ;; + svr4,*) # full svr4 only + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,yes) # both, prefer svr4 + dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" + library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' + # unpreferred sharedlib libNAME.a needs extra handling + postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' + postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' + # We do not specify a path in Import Files, so LIBPATH fires. + shlibpath_overrides_runpath=yes + ;; + *,no) # both, prefer aix + dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" + library_names_spec='$libname$release.a $libname.a' + soname_spec='$libname$release$shared_ext$major' + # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling + postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' + postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' + ;; + esac + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='$libname$shared_ext' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' + library_names_spec='$libname.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec=$LIB + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' + soname_spec='$libname$release$major$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=no + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + if test 32 = "$HPUX_IA64_MODE"; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + sys_lib_dlsearch_path_spec=/usr/lib/hpux32 + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + sys_lib_dlsearch_path_spec=/usr/lib/hpux64 + fi + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test yes = "$lt_cv_prog_gnu_ld"; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" + sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +linux*android*) + version_type=none # Android doesn't support versioned libraries. + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext' + soname_spec='$libname$release$shared_ext' + finish_cmds= + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + dynamic_linker='Android linker' + # Don't embed -rpath directories since the linker doesn't support them. + hardcode_libdir_flag_spec_CXX='-L$libdir' + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if ${lt_cv_shlibpath_overrides_runpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Ideally, we could use ldconfig to report *all* directores which are + # searched for libraries, however this is still not possible. Aside from not + # being certain /sbin/ldconfig is available, command + # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, + # even though it is searched at run-time. Try to do the best guess by + # appending ld.so.conf contents (and includes) to the search path. + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd* | bitrig*) + version_type=sunos + sys_lib_dlsearch_path_spec=/usr/lib + need_lib_prefix=no + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then + need_version=no + else + need_version=yes + fi + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +os2*) + libname_spec='$name' + version_type=windows + shrext_cmds=.dll + need_version=no + need_lib_prefix=no + # OS/2 can only load a DLL with a base name of 8 characters or less. + soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; + v=$($ECHO $release$versuffix | tr -d .-); + n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); + $ECHO $n$v`$shared_ext' + library_names_spec='${libname}_dll.$libext' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=BEGINLIBPATH + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + postinstall_cmds='base_file=`basename \$file`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='$libname$release$shared_ext$major' + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test yes = "$with_gnu_ld"; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec; then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' + soname_spec='$libname$shared_ext.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=sco + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test yes = "$with_gnu_ld"; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' + soname_spec='$libname$release$shared_ext$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } +test no = "$dynamic_linker" && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test yes = "$GCC"; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then + sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec +fi + +if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then + sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec +fi + +# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... +configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec + +# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code +func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" + +# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool +configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action_CXX= +if test -n "$hardcode_libdir_flag_spec_CXX" || + test -n "$runpath_var_CXX" || + test yes = "$hardcode_automatic_CXX"; then + + # We can hardcode non-existent directories. + if test no != "$hardcode_direct_CXX" && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" && + test no != "$hardcode_minus_L_CXX"; then + # Linking always hardcodes the temporary library directory. + hardcode_action_CXX=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_CXX=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_CXX=unsupported +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 +$as_echo "$hardcode_action_CXX" >&6; } + +if test relink = "$hardcode_action_CXX" || + test yes = "$inherit_rpath_CXX"; then + # Fast installation is not supported + enable_fast_install=no +elif test yes = "$shlibpath_overrides_runpath" || + test no = "$enable_shared"; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + + fi # test -n "$compiler" + + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test yes != "$_lt_caught_CXX_error" + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + + + + + + + + + + + + + ac_config_commands="$ac_config_commands libtool" + + + + +# Only expand once: + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + SET_MAKE= +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + +# Extract the first word of "mkdir", so it can be a program name with args. +set dummy mkdir; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_MKDIR+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MKDIR in + [\\/]* | ?:[\\/]*) + ac_cv_path_MKDIR="$MKDIR" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_MKDIR="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +MKDIR=$ac_cv_path_MKDIR +if test -n "$MKDIR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR" >&5 +$as_echo "$MKDIR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + +# Documentation. +# Check whether --enable-documentation was given. +if test "${enable_documentation+set}" = set; then : + enableval=$enable_documentation; +else + enable_documentation=auto +fi + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}doxygen", so it can be a program name with args. +set dummy ${ac_tool_prefix}doxygen; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_DOXYGEN+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $DOXYGEN in + [\\/]* | ?:[\\/]*) + ac_cv_path_DOXYGEN="$DOXYGEN" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_DOXYGEN="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +DOXYGEN=$ac_cv_path_DOXYGEN +if test -n "$DOXYGEN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DOXYGEN" >&5 +$as_echo "$DOXYGEN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_path_DOXYGEN"; then + ac_pt_DOXYGEN=$DOXYGEN + # Extract the first word of "doxygen", so it can be a program name with args. +set dummy doxygen; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_ac_pt_DOXYGEN+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $ac_pt_DOXYGEN in + [\\/]* | ?:[\\/]*) + ac_cv_path_ac_pt_DOXYGEN="$ac_pt_DOXYGEN" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_ac_pt_DOXYGEN="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +ac_pt_DOXYGEN=$ac_cv_path_ac_pt_DOXYGEN +if test -n "$ac_pt_DOXYGEN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_pt_DOXYGEN" >&5 +$as_echo "$ac_pt_DOXYGEN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_pt_DOXYGEN" = x; then + DOXYGEN="nodoxygen" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DOXYGEN=$ac_pt_DOXYGEN + fi +else + DOXYGEN="$ac_cv_path_DOXYGEN" +fi + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dot", so it can be a program name with args. +set dummy ${ac_tool_prefix}dot; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_HAVE_DOT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$HAVE_DOT"; then + ac_cv_prog_HAVE_DOT="$HAVE_DOT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in NO +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_HAVE_DOT="${ac_tool_prefix}dot" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +HAVE_DOT=$ac_cv_prog_HAVE_DOT +if test -n "$HAVE_DOT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $HAVE_DOT" >&5 +$as_echo "$HAVE_DOT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_HAVE_DOT"; then + ac_ct_HAVE_DOT=$HAVE_DOT + # Extract the first word of "dot", so it can be a program name with args. +set dummy dot; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_HAVE_DOT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_HAVE_DOT"; then + ac_cv_prog_ac_ct_HAVE_DOT="$ac_ct_HAVE_DOT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in NO +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ac_ct_HAVE_DOT="dot" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_HAVE_DOT=$ac_cv_prog_ac_ct_HAVE_DOT +if test -n "$ac_ct_HAVE_DOT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_HAVE_DOT" >&5 +$as_echo "$ac_ct_HAVE_DOT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_HAVE_DOT" = x; then + HAVE_DOT="YES" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + HAVE_DOT=$ac_ct_HAVE_DOT + fi +else + HAVE_DOT="$ac_cv_prog_HAVE_DOT" +fi + +if test "$enable_documentation" = "yes" && test "$DOXYGEN" = "nodoxygen"; then : + as_fn_error $? "could not find tools necessary to build documentation" "$LINENO" 5 +fi + if test "$enable_documentation" != "no" -a "$DOXYGEN" != "nodoxygen"; then + BUILD_REFERENCE_TRUE= + BUILD_REFERENCE_FALSE='#' +else + BUILD_REFERENCE_TRUE='#' + BUILD_REFERENCE_FALSE= +fi + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5 +$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; } + # Check whether --enable-maintainer-mode was given. +if test "${enable_maintainer_mode+set}" = set; then : + enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval +else + USE_MAINTAINER_MODE=no +fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5 +$as_echo "$USE_MAINTAINER_MODE" >&6; } + if test $USE_MAINTAINER_MODE = yes; then + MAINTAINER_MODE_TRUE= + MAINTAINER_MODE_FALSE='#' +else + MAINTAINER_MODE_TRUE='#' + MAINTAINER_MODE_FALSE= +fi + + MAINT=$MAINTAINER_MODE_TRUE + + + +# See if we want stricter compiler warnings. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking maintainer mode" >&5 +$as_echo_n "checking maintainer mode... " >&6; } +# Check whether --enable-maintainer-mode was given. +if test "${enable_maintainer_mode+set}" = set; then : + enableval=$enable_maintainer_mode; +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_maintainer_mode}" >&5 +$as_echo "${enable_maintainer_mode}" >&6; } + +# See if we want runtime debug checking. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking audit" >&5 +$as_echo_n "checking audit... " >&6; } +# Check whether --enable-audit was given. +if test "${enable_audit+set}" = set; then : + enableval=$enable_audit; +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_audit}" >&5 +$as_echo "${enable_audit}" >&6; } + +# See if we want "suggestions," such as "this class could be final." +# (The suggestions are often useful, but can also easily be wrong.) +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking suggest" >&5 +$as_echo_n "checking suggest... " >&6; } +# Check whether --enable-suggest was given. +if test "${enable_suggest+set}" = set; then : + enableval=$enable_suggest; +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${enable_suggest}" >&5 +$as_echo "${enable_suggest}" >&6; } + + +# Check whether --enable-shared was given. +if test "${enable_shared+set}" = set; then : + enableval=$enable_shared; +fi + +if test "${shared}" = "yes" ; then : + CPPFLAGS="$CPPFLAGS -DPQXX_SHARED" +fi + + +# Add options to compiler command line, if compiler accepts them. +add_compiler_opts_if_ok() { + for option in $* + do + ACO_SAVE_CXXFLAGS="$CXXFLAGS" + CXXFLAGS="$CXXFLAGS $option" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts $option" >&5 +$as_echo_n "checking whether $CXX accepts $option... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + has_option=yes +else + has_option=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $has_option" >&5 +$as_echo "$has_option" >&6; } + if test "$has_option" = "no" ; then : + CXXFLAGS="$ACO_SAVE_CXXFLAGS" +fi + done +} + + +# Add options to compiler command line, unconditionally. +add_compiler_opts() { + CXXFLAGS="$CXXFLAGS $*" +} + + +# It's tempting to use Autoconf Archive's AX_CXX_COMPILE_STDCXX_17 for this, +# but it's 2022 and the C++20 equivalent isn't quite ready for use. +# Seems simpler and more reliable for the user to arrange for the desired +# language versions by setting the appropriate option for their compiler. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sufficient C++ language/library level" >&5 +$as_echo_n "checking for sufficient C++ language/library level... " >&6; } +sufficient_cxx=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #if __cplusplus < 201611L + #error "Need C++17 or better." + #endif + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + sufficient_cxx=yes +else + sufficient_cxx=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $sufficient_cxx" >&5 +$as_echo "$sufficient_cxx" >&6; } +if test "$sufficient_cxx" != "yes" +then + as_fn_error $? "This libpqxx version needs at least C++17." "$LINENO" 5 +fi + + +# Let's try to get the compiler to be helpful. +# +# (Omit options -Weffc++ and -Wabi because they currently yield too many +# warnings in gcc's standard headers; omit -Wunreachable-code because it isn't +# always right) +if test "$GCC" = "yes" +then + # In maintainer mode, enable all the warning options we can. + if test "$enable_maintainer_mode" = "yes" + then + # "Eternal" (FLW) g++ options. These have been around for + # ages, and both g++ and clang++ support them. Don't bother + # checking for support; just add them to the compiler options. + add_compiler_opts \ + -fstrict-enums \ + -Werror \ + -Wall \ + -pedantic \ + -Wcast-align \ + -Wcast-qual \ + -Wconversion \ + -Wctor-dtor-privacy \ + -Wendif-labels \ + -Wextra \ + -Wfloat-equal \ + -Wformat=2 \ + -Wformat-security \ + -Wmissing-include-dirs \ + -Wno-div-by-zero \ + -Wnon-virtual-dtor \ + -Wold-style-cast \ + -Woverlength-strings \ + -Woverloaded-virtual \ + -Wpointer-arith \ + -Wredundant-decls \ + -Wshadow \ + -Wsign-promo \ + -Wundef \ + -Wunused \ + -Wwrite-strings + + # "Iffy" g++ options. Some reasonably current g++-like + # compilers may not support these. + add_compiler_opts_if_ok \ + -fnothrow-opt \ + -Wattribute-alias=2 \ + -Wextra-semi \ + -Wlogical-op \ + -Wmismatched-tags \ + -Wnoexcept \ + -Wredundant-tags \ + -Wrestrict \ + -Wstringop-overflow \ + -Wzero-as-null-pointer-constant \ + -Warray-bounds=2 \ + -Wduplicated-branches \ + -Wduplicated-cond \ + -Wsuggest-attribute=noreturn \ + -Wsuggest-override \ + -Wtrampolines + fi + + # In "audit," enable all runtime checks we can. + if test "$enable_audit" = "yes" + then + add_compiler_opts_if_ok \ + -D_FORTIFY_SOURCE=2 \ + -fsanitize=address \ + -fsanitize-address-use-after-scope \ + -fsanitize=alignment \ + -fsanitize=bool \ + -fsanitize=bounds \ + -fsanitize=bounds-strict \ + -fsanitize=builtin \ + -fsanitize=enum \ + -fsanitize=float-cast-overflow \ + -fsanitize=float-divide-by-zero \ + -fsanitize=integer-divide-by-zero \ + -fsanitize=leak \ + -fsanitize=nonnull-attribute \ + -fsanitize=null \ + -fsanitize=object-size \ + -fsanitize=pointer-compare \ + -fsanitize=pointer-overflow \ + -fsanitize=pointer-subtract \ + -fsanitize=return \ + -fsanitize=returns-nonnull-attribute \ + -fsanitize=shift \ + -fsanitize=shift-base \ + -fsanitize=shift-exponent \ + -fsanitize=signed-integer-overflow \ + -fsanitize=undefined \ + -fsanitize=unreachable \ + -fsanitize=vla-bound \ + -fsanitize=vptr \ + -fstack-protector-all + fi + + # In "suggest" mode, enable a bunch of code suggestions. + if test "$enable_suggest" = "yes" + then + add_compiler_opts_if_ok \ + -Wsuggest-attribute=cold \ + -Wsuggest-attribute=const \ + -Wsuggest-attribute=malloc \ + -Wsuggest-attribute=pure \ + -Wsuggest-final-types \ + -Wsuggest-final-methods + fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking g++ visibility attribute" >&5 +$as_echo_n "checking g++ visibility attribute... " >&6; } +gcc_visibility=yes +SAVE_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS -Werror" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for gcc-style "visibility" attribute. + +struct __attribute__((visibility("hidden"))) D + +{ + + D() {} + + int f() { return 0; } + +}; + + + +int main() + +{ + + D d; + + return d.f(); + +} + + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_GCC_VISIBILITY 1" >>confdefs.h + +else + gcc_visibility=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_visibility" >&5 +$as_echo "$gcc_visibility" >&6; } +CXXFLAGS="$SAVE_CXXFLAGS" +if test "$gcc_visibility" = "yes" +then + # Make internal definitions accessible only to the library itself. + # Only definitions marked PQXX_LIBEXPORT will be accessible. + add_compiler_opts -fvisibility=hidden + add_compiler_opts -fvisibility-inlines-hidden +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking g++ pure attribute" >&5 +$as_echo_n "checking g++ pure attribute... " >&6; } +gcc_pure=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for gcc-style "pure" attribute. + +int __attribute__((pure)) f() + +{ + + return 0; + +} + + + +int main() + +{ + + return f(); + +} + + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_GCC_PURE 1" >>confdefs.h + +else + gcc_pure=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_pure" >&5 +$as_echo "$gcc_pure" >&6; } + +fi # End of gcc-specific part. + + +# Check for __cxa_demangle. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking __cxa_demangle" >&5 +$as_echo_n "checking __cxa_demangle... " >&6; } +cxa_demangle=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for cross-vendor C++ ABI's __cxa_demangle function. + +#include + +#include + +#include + +#include + + + +#include + + + +int main() + +{ + + int status = 0; + + char *name = + + abi::__cxa_demangle(typeid(10).name(), nullptr, nullptr, &status); + + if (status != 0) + + throw std::runtime_error("Demangle failed!"); + + int result = std::strcmp(name, "int"); + + std::free(name); + + return result; + +} + + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_CXA_DEMANGLE 1" >>confdefs.h + +else + cxa_demangle=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cxa_demangle" >&5 +$as_echo "$cxa_demangle" >&6; } + + +# Check for sufficient Concepts support, introduced with C++20. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking concepts" >&5 +$as_echo_n "checking concepts... " >&6; } +concepts=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for concepts support. Not just the language feature; also headers. + +#include + +#include + +#include + + + + + +template + +concept Foo = std::ranges::input_range; + + + + + +template auto foo(F const &r) + +{ + + return std::distance(std::begin(r), std::end(r)); + +} + + + + + +int main() + +{ + + std::vector const v{1, 2, 3}; + + std::cout << foo(v) << '\n'; + +} + + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_CONCEPTS 1" >>confdefs.h + +else + concepts=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $concepts" >&5 +$as_echo "$concepts" >&6; } + + +# Check for C++20 std::span. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking std::span" >&5 +$as_echo_n "checking std::span... " >&6; } +span=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for std::span. + +#include + + + +int main(int argc, char **argv) + +{ + + std::span args{argv, static_cast(argc)}; + + return static_cast(std::size(args) - 1u); + +} + + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_SPAN 1" >>confdefs.h + +else + span=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $span" >&5 +$as_echo "$span" >&6; } + + +# Check for multidimensional subscript operator support. Proposed for C++23. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for multidimensional subscript operator support" >&5 +$as_echo_n "checking for multidimensional subscript operator support... " >&6; } +multidim_subscript=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for multidimensional subscript operator support. + +// Proposed for C++23: P2128R6. + +struct table + +{ + + int width = 100; + + + + int operator[](int x, int y) const { return x + width * y; } + +}; + + + + + +int main() + +{ + + return table{}[0, 0]; + +} + + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT 1" >>confdefs.h + +else + multidim_subscript=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $multidim_subscript" >&5 +$as_echo "$multidim_subscript" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for strerror_r()" >&5 +$as_echo_n "checking for strerror_r()... " >&6; } +strerror_r=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Check for strerror_r. + +// It can be either the POSIX version (which returns int) or the GNU version + +// (which returns char *). + + + +#include + +#include + + + +int main() + +{ + + char buffer[200]; + + auto res{strerror_r(1, buffer, 200)}; + + // Sidestep type differences. We don't really care what the value is. + + return not not res; + +} + + +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_STRERROR_R 1" >>confdefs.h + +else + strerror_r=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $strerror_r" >&5 +$as_echo "$strerror_r" >&6; } + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for strerror_s()" >&5 +$as_echo_n "checking for strerror_s()... " >&6; } +strerror_s=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for strerror_s, as defined in Windows and C11. + +// Presumably this'll be part of the C++ standard some day. + + + +#include + + + +int main() + +{ + + using namespace std; + + char buf[200]; + + return strerror_s(buf, 200, 1); + +} + + +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_STRERROR_S 1" >>confdefs.h + +else + strerror_s=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $strerror_s" >&5 +$as_echo "$strerror_s" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for std::chrono::year_month_day etc" >&5 +$as_echo_n "checking for std::chrono::year_month_day etc... " >&6; } +year_month_day=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for std::chrono::year_month_day etc. + +#include + + + +int main() + +{ + + return int(std::chrono::year{1}); + +} + + +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_YEAR_MONTH_DAY 1" >>confdefs.h + +else + year_month_day=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $year_month_day" >&5 +$as_echo "$year_month_day" >&6; } + + +# Check for [[likely]] and [[unlikely]] attributes. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking attributes \"likely\" and \"unlikely\"." >&5 +$as_echo_n "checking attributes \"likely\" and \"unlikely\".... " >&6; } +likely=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for C++20 [[likely]] and [[unlikely]] attributes. + + + +int main(int argc, char **) + +{ + +#if __cplusplus < 202002L + + deliberately_fail(because, older, C++, standard); + +#endif + + + + int x = 0; + + if (argc == 1) [[likely]] + + x = 0; + + else + + x = 1; + + return x; + +} + + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_LIKELY 1" >>confdefs.h + +else + likely=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $likely" >&5 +$as_echo "$likely" >&6; } + + +# It's mid-2019, and gcc's charconv supports integers but not yet floats. +# So for now, we test for int and float conversion... separately. +# +# It's worse for older clang versions, which compile the integer conversions +# but then fail at link time because of a missing symbol "__muloti4" with the +# "long long" version. I couldn't resolve that symbol by adding -lm either. +# So don't just compile these tests; link them as well. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++17 charconv integer conversion" >&5 +$as_echo_n "checking for C++17 charconv integer conversion... " >&6; } +have_charconv_int=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for std::to_string/std::from_string for integral types. + +#include + +#include + + + +int main() + +{ + + char z[100]; + + auto rt = std::to_chars(std::begin(z), std::end(z), 9ULL); + + if (rt.ec != std::errc{}) + + return 1; + + unsigned long long n; + + auto rf = std::from_chars(std::cbegin(z), std::cend(z), n); + + if (rf.ec != std::errc{}) + + return 2; + + return (n == 9ULL) ? 0 : 1; + +} + + +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_CHARCONV_INT 1" >>confdefs.h + +else + have_charconv_int=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_charconv_int" >&5 +$as_echo "$have_charconv_int" >&6; } + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++17 charconv floating-point conversion" >&5 +$as_echo_n "checking for C++17 charconv floating-point conversion... " >&6; } +have_charconv_float=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for std::to_string/std::from_string for floating-point types. + +#include + +#include + + + +int main() + +{ + + char z[100]; + + auto rt = std::to_chars(std::begin(z), std::end(z), 3.14159L); + + if (rt.ec != std::errc{}) + + return 1; + + long double n; + + auto rf = std::from_chars(std::cbegin(z), std::cend(z), n); + + if (rf.ec != std::errc{}) + + return 2; + + return (n > 3 and n < 4) ? 0 : 1; + +} + + +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_CHARCONV_FLOAT 1" >>confdefs.h + +else + have_charconv_float=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_charconv_float" >&5 +$as_echo "$have_charconv_float" >&6; } + +# As per #262, clang with libcxxrt does not support thread_local on non-POD +# objects. Luckily we can live without those, it's just less efficient. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for full thread_local support" >&5 +$as_echo_n "checking for full thread_local support... " >&6; } +have_thread_local=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for std::to_string/std::from_string for floating-point types. + +#include + +#include + + + +int main(int argc, char **) + +{ + +#if defined(__MINGW32__) && defined(__GNUC__) + +# if __GNUC__ < 11 || ((__GNUC__ == 11) && (__GNU_MINOR__ == 0)) + +# error "On MinGW before gcc 11.1, thread_local breaks at run time." + +# endif + +#endif + + thread_local std::stringstream s; + + s << argc; + + std::cout << s.str(); + +} + + +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_THREAD_LOCAL 1" >>confdefs.h + +else + have_thread_local=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_thread_local" >&5 +$as_echo "$have_thread_local" >&6; } + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for std::this_thread::sleep_for" >&5 +$as_echo_n "checking for std::this_thread::sleep_for... " >&6; } +have_sleep_for=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for std::this_thread::sleep_for(). + +/* For some reason MinGW's header seems to be broken. + + * + + * But it gets worse. It looks as if we can include without problems + + * in this configuration test. Why does it break when MinGW users try to build + + * the library, but succeed when we try it here? + + * + + * To try and get close to the situation in the library code itself, we try + + * including some standard headers that we don't strictly need here. + + */ + + + +#if __has_include() + +# include + +#endif + + + +#include + +#include + +#include + +#include + +#include + + + +#include + +#include + + + +int main() + +{ + + std::this_thread::sleep_for(std::chrono::microseconds{10u}); + +} + + +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_SLEEP_FOR 1" >>confdefs.h + +else + have_sleep_for=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_sleep_for" >&5 +$as_echo "$have_sleep_for" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for std::cmp_greater, std::cmp_less_equal, etc" >&5 +$as_echo_n "checking for std::cmp_greater, std::cmp_less_equal, etc... " >&6; } +have_cmp=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for C++20 std::cmp_greater etc. support. + +#include + + + + + +int main() + +{ + + return std::cmp_greater(-1, 2u) && std::cmp_less_equal(3, 0); + +} + + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_CMP 1" >>confdefs.h + +else + have_cmp=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_cmp" >&5 +$as_echo "$have_cmp" >&6; } + + +# Doing my own check for poll(). There's one built into autoconf-archive, but +# it produces warnings in C++ (about unnecessarily using "struct", and using 0 +# as a null pointer constant). In maintainer mode, those warnings turn into +# errors. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for poll()" >&5 +$as_echo_n "checking for poll()... " >&6; } +ax_cv_have_poll=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Test for poll(). + +#include + + + +int main() + +{ + + return poll(nullptr, 0, 0); + +} + + +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_POLL 1" >>confdefs.h + +else + ax_cv_have_poll=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_have_poll" >&5 +$as_echo "$ax_cv_have_poll" >&6; } + +if test "$ax_cv_have_poll" != "yes" +then +# No poll(); we'll fall back to select(). + +# Some systems keep select() in a separate library which is not linked by +# default. See if we need one of those. +socklibok=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing select" >&5 +$as_echo_n "checking for library containing select... " >&6; } +if ${ac_cv_search_select+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char select (); +int +main () +{ +return select (); + ; + return 0; +} +_ACEOF +for ac_lib in '' socket nsl ws2_32 wsock32 winsock; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_search_select=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_select+:} false; then : + break +fi +done +if ${ac_cv_search_select+:} false; then : + +else + ac_cv_search_select=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_select" >&5 +$as_echo "$ac_cv_search_select" >&6; } +ac_res=$ac_cv_search_select +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + socklibok=yes +fi + + +# Microsoft proprietary libraries do not work with code that is generated with +# autoconf's SEARCH_LIBS macro, so we need to check manually and just use the +# first socket library available. +# We only do this if select() is not available by other means, to avoid picking +# up an unnecessary Windows compatibility library on a non-Windows system. +for l in ws2_32 wsock32 winsock +do + if test "${socklibok}" != "yes" + then + as_ac_Lib=`$as_echo "ac_cv_lib_$l''_main" | $as_tr_sh` +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -l$l" >&5 +$as_echo_n "checking for main in -l$l... " >&6; } +if eval \${$as_ac_Lib+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-l$l $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + eval "$as_ac_Lib=yes" +else + eval "$as_ac_Lib=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +eval ac_res=\$$as_ac_Lib + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +if eval test \"x\$"$as_ac_Lib"\" = x"yes"; then : + LIBS="$LIBS -l$l";socklibok=yes +fi + + fi +done + +if test "${socklibok}" != "yes" +then + as_fn_error $? " +Could not figure out how to link a simple sockets-based program. Please read +the config.log file for more clues as to why this failed. +" "$LINENO" 5 +fi + +fi # No poll() + + +# Find PostgreSQL includes and libraries +# Extract the first word of "pkg-config", so it can be a program name with args. +set dummy pkg-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PKG_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PKG_CONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_PKG_CONFIG="$PKG_CONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_PKG_CONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +PKG_CONFIG=$ac_cv_path_PKG_CONFIG +if test -n "$PKG_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PKG_CONFIG" >&5 +$as_echo "$PKG_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +for ac_prog in pg_config +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PG_CONFIG+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PG_CONFIG in + [\\/]* | ?:[\\/]*) + ac_cv_path_PG_CONFIG="$PG_CONFIG" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_path_PG_CONFIG="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + ;; +esac +fi +PG_CONFIG=$ac_cv_path_PG_CONFIG +if test -n "$PG_CONFIG"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PG_CONFIG" >&5 +$as_echo "$PG_CONFIG" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$PG_CONFIG" && break +done + + + +# Check whether --with-postgres-include was given. +if test "${with_postgres_include+set}" = set; then : + withval=$with_postgres_include; if test "x$with_postgres_include" = "xyes"; then : + with_postgres_include="" +fi +fi + + +if test -n "$with_postgres_include" +then + POSTGRES_INCLUDE="-I$with_postgres_include" +else + if test -x "$PKG_CONFIG" || test -x "$PG_CONFIG" + then + # We should prefer pkg-config over pg_config, but there seems to be a + # problem in pkg-config 1.6.3. Until that's been resolved (#291), go + # with pg_config if we can. + if test -x "$PG_CONFIG" + then + # From pg_config we can either get the C compiler options used to + # compile postgres, which isn't quite what we want; or we can get + # the headers directory, without the full option. That's something + # we can work with. The compiler must support the "-I" option for + # that, but both scripts assume that anyway. + POSTGRES_INCLUDE="-I$($PG_CONFIG --includedir)" + else + # From pkg-config we can get the compiler options to extend the + # include path. We use that. + POSTGRES_INCLUDE=$($PKG_CONFIG libpq --cflags-only-I) + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: finding PostgreSQL headers using $POSTGRES_INCLUDE" >&5 +$as_echo "$as_me: finding PostgreSQL headers using $POSTGRES_INCLUDE" >&6;} + else + POSTGRES_INCLUDE="" + + # We have nothing to tell us where the libpq headers are. That's fine + # if the compiler can find it, but if not, fail here. + +ac_fn_cxx_check_header_mongrel "$LINENO" "libpq-fe.h" "ac_cv_header_libpq_fe_h" "$ac_includes_default" +if test "x$ac_cv_header_libpq_fe_h" = xyes; then : + +else + as_fn_error $? " +Can't find the main PostgreSQL client header, libpq-fe.h. Make sure that it +is installed, and either use the --with-postgres-include option or install +pkg-config. +" "$LINENO" 5 +fi + + + fi +fi + + +# Add the compiler option so we can compile configure tests which rely on the +# libpq headers. +CPPFLAGS="$CPPFLAGS $POSTGRES_INCLUDE" + + + +# Check whether --with-postgres-lib was given. +if test "${with_postgres_lib+set}" = set; then : + withval=$with_postgres_lib; if test "x$with_postgres_lib" = "xyes"; then : + with_postgres_lib="" +fi +fi + + +# If no --with-postgres-lib was given, and we have pkg-config, use that. +if test -z "$with_postgres_lib" -a -x "$PKG_CONFIG"; then : + with_postgres_lib=$($PKG_CONFIG libpq --libs-only-L | sed 's/^-L//') +fi + +# pg_config is deprecated, but for some users it may still provide the only +# right answer. For instance, `pkg-config` may not know where `libpq` is +# installed. +if test -z "$with_postgres_lib" -a -x "$PG_CONFIG"; then : + with_postgres_lib=$($PG_CONFIG --libdir) +fi + +if test -n "$with_postgres_lib"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: using PostgreSQL libraries at $with_postgres_lib" >&5 +$as_echo "$as_me: using PostgreSQL libraries at $with_postgres_lib" >&6;} +else + { $as_echo "$as_me:${as_lineno-$LINENO}: using PostgreSQL libraries in default location" >&5 +$as_echo "$as_me: using PostgreSQL libraries in default location" >&6;} +fi + + + + +ac_fn_cxx_check_header_mongrel "$LINENO" "libpq-fe.h" "ac_cv_header_libpq_fe_h" "$ac_includes_default" +if test "x$ac_cv_header_libpq_fe_h" = xyes; then : + +else + as_fn_error $? " +Can't find the main PostgreSQL client header, libpq-fe.h. Are you sure the +libpq headers are installed correctly, and that we've got the right path? +" "$LINENO" 5 +fi + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ability to compile source files using libpq" >&5 +$as_echo_n "checking for ability to compile source files using libpq... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +PQexec(nullptr,"") + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + as_fn_error $? " +Could not compile a call to a basic libpq function. There must be something +seriously wrong with the headers that \"pg_config --includedir\" or \"pkg-config +libpq --cflags\" pointed to; the contents of config.log may give you a clue +about the nature of the failure. +Source including the libpq header libpq-fe.h can be compiled, but a call to the +most basic libpq function PQexec() failed to compile successfully. This is the +litmus test for a working libpq. +" "$LINENO" 5 +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + + +if test "x${with_postgres_lib}" = "x"; then + with_postgres_libpath="" +else + with_postgres_libpath="-L${with_postgres_lib}" +fi +LDFLAGS="$LDFLAGS ${with_postgres_libpath}" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PQexec in -lpq" >&5 +$as_echo_n "checking for PQexec in -lpq... " >&6; } +if ${ac_cv_lib_pq_PQexec+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpq ${with_postgres_libpath} $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char PQexec (); +int +main () +{ +return PQexec (); + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + ac_cv_lib_pq_PQexec=yes +else + ac_cv_lib_pq_PQexec=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pq_PQexec" >&5 +$as_echo "$ac_cv_lib_pq_PQexec" >&6; } +if test "x$ac_cv_lib_pq_PQexec" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBPQ 1 +_ACEOF + + LIBS="-lpq $LIBS" + +else + as_fn_error $? " +Did not find the PQexec() function in libpq. This is the litmus test for a +working libpq installation. + +A source file using the PQexec() function did compile without problems, and the +libpq library is available for linking, but for some reason a call to PQexec() +failed to link properly to the libpq library. This may be because the libpq +library file is damaged, or in some incorrect format, or if your libpq is much +more recent than libpqxx version $PQXX_ABI, perhaps libpq has undergone a +radical ABI change. + +The last parts of config.log may give you a clue as to what really went wrong, +but be warned that this is no easy reading. Look for the last error message +occurring in the file. +" "$LINENO" 5 +fi + + + +# PQencryptPasswordConn was added in postgres 10. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PQencryptPasswordConn" >&5 +$as_echo_n "checking for PQencryptPasswordConn... " >&6; } +have_pqencryptpasswordconn=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + + extern PGconn *conn; + PQencryptPasswordConn( + conn, "pwd", "user", "scram-sha-256") + + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_PQENCRYPTPASSWORDCONN 1" >>confdefs.h + +else + have_pqencryptpasswordconn=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_pqencryptpasswordconn" >&5 +$as_echo "$have_pqencryptpasswordconn" >&6; } + + +# "Pipeline mode" was introduced in libpq 14. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for libpq pipeline mode" >&5 +$as_echo_n "checking for libpq pipeline mode... " >&6; } +have_pq_pipeline=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ + + extern PGconn *conn; + PQenterPipelineMode(conn) + + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_PQ_PIPELINE 1" >>confdefs.h + +else + have_pq_pipeline=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_pq_pipeline" >&5 +$as_echo "$have_pq_pipeline" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for usable std::filesystem::path" >&5 +$as_echo_n "checking for usable std::filesystem::path... " >&6; } +have_path=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Check for working std::filesystem support. + +#include + + + + + +int main() + +{ + + // Apparently some versions of MinGW lack this comparison operator. + + return std::filesystem::path{} != std::filesystem::path{}; + +} + + +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +$as_echo "#define PQXX_HAVE_PATH 1" >>confdefs.h + +else + have_path=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_path" >&5 +$as_echo "$have_path" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we need a link option for support" >&5 +$as_echo_n "checking whether we need a link option for support... " >&6; } +LIBS_SAVE="$LIBS" +found_fslib=no +for l in '' '-lstdc++fs' '-lc++fs' +do + if test "$found_fslib" != "yes" + then + LIBS="$LIBS $l" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +// Check whether we need to link to the stdc++fs library. + +// + +// We assume that the presence of the header means that we have + +// support for the basics of std::filesystem. This check will succeed if + +// either there is no header, or there is one and it works without + +// any special options. If the link fails, we assume that -lstdc++fs will fix + +// it for us. + + + +#include + + + +#if __has_include() + +# include + +#endif + + + + + +int main() + +{ + +#if __has_include() + + std::cout << std::filesystem::path{"foo.bar"}.c_str() << '\n'; + +#endif + +} + + +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + found_fslib=yes +else + LIBS="$LIBS_SAVE" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test "$found_fslib" = "yes" + then + result_msg="$l" + # (And keep our current value of $LIBS.) + fi + fi +done + +if test "$found_fslib" != "yes" +then + as_fn_error $? " +There seems to be support, but I could not figure out now to make +it work. You'll have to add set your own build options for this. + " "$LINENO" 5 +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $result_msg" >&5 +$as_echo "$result_msg" >&6; } + + +# Remove redundant occurrances of -lpq +LIBS=$(echo "$LIBS" | sed -e 's/-lpq * -lpq\>/-lpq/g') + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that type of libpq's Oid is as expected" >&5 +$as_echo_n "checking that type of libpq's Oid is as expected... " >&6; } +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + #include + #include"${srcdir}/include/pqxx/internal/libpq-forward.hxx" + extern void f(pqxx::oid&); + +int +main () +{ +Oid o;f(o) + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + as_fn_error $? " +The Oid typedef in libpq has changed. Please notify the libpqxx authors of the +change! +" "$LINENO" 5 +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + SET_MAKE= +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + + +ac_config_files="$ac_config_files Makefile config/Makefile doc/Makefile doc/Doxyfile src/Makefile test/Makefile tools/Makefile include/Makefile include/pqxx/Makefile libpqxx.pc" + + + +ac_config_commands="$ac_config_commands configitems" + + +ac_config_files="$ac_config_files compile_flags" + +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 +$as_echo_n "checking that generated files are newer than configure... " >&6; } + if test -n "$am_sleep_pid"; then + # Hide warnings about reused PIDs. + wait $am_sleep_pid 2>/dev/null + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 +$as_echo "done" >&6; } + if test -n "$EXEEXT"; then + am__EXEEXT_TRUE= + am__EXEEXT_FALSE='#' +else + am__EXEEXT_TRUE='#' + am__EXEEXT_FALSE= +fi + +if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then + as_fn_error $? "conditional \"AMDEP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCC\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${BUILD_REFERENCE_TRUE}" && test -z "${BUILD_REFERENCE_FALSE}"; then + as_fn_error $? "conditional \"BUILD_REFERENCE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then + as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -pR'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -pR' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -pR' + fi +else + as_ln_s='cp -pR' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + + +# as_fn_executable_p FILE +# ----------------------- +# Test if FILE is an executable regular file. +as_fn_executable_p () +{ + test -f "$1" && test -x "$1" +} # as_fn_executable_p +as_test_x='test -x' +as_executable_p=as_fn_executable_p + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by libpqxx $as_me 7.7.3, which was +generated by GNU Autoconf 2.69. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" +config_commands="$ac_config_commands" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +libpqxx config.status 7.7.3 +configured by $0, generated by GNU Autoconf 2.69, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2012 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +MKDIR_P='$MKDIR_P' +AWK='$AWK' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# +# INIT-COMMANDS +# +AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}" + + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' +macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' +macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' +enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' +pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' +enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' +shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`' +SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' +ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' +PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' +host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' +host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' +host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' +build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' +build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' +build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' +SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' +Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' +GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' +EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' +FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' +LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' +NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' +LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' +max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' +ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' +exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' +lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' +lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' +lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' +lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' +lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' +reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' +reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' +OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' +deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' +file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' +file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' +want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' +DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' +sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' +AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' +AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' +archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' +STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' +RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' +old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' +lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' +CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' +CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' +compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' +GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' +lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`' +nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' +lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' +lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`' +objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' +MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' +need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' +MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' +DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' +NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' +LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' +OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' +libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' +shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' +extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' +compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' +module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' +with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' +no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' +hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' +hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' +inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' +link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' +exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' +include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' +prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' +postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' +file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' +variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' +need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' +version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' +runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' +libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' +library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' +soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' +install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' +postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' +postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' +finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' +hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' +sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' +configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`' +configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`' +hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' +enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' +old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' +striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' +predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' +postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' +predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' +postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' +LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' +reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' +reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' +GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' +inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' +link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' +always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' +exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' +predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' +postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' +predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' +postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' + +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in SHELL \ +ECHO \ +PATH_SEPARATOR \ +SED \ +GREP \ +EGREP \ +FGREP \ +LD \ +NM \ +LN_S \ +lt_SP2NL \ +lt_NL2SP \ +reload_flag \ +OBJDUMP \ +deplibs_check_method \ +file_magic_cmd \ +file_magic_glob \ +want_nocaseglob \ +DLLTOOL \ +sharedlib_from_linklib_cmd \ +AR \ +AR_FLAGS \ +archiver_list_spec \ +STRIP \ +RANLIB \ +CC \ +CFLAGS \ +compiler \ +lt_cv_sys_global_symbol_pipe \ +lt_cv_sys_global_symbol_to_cdecl \ +lt_cv_sys_global_symbol_to_import \ +lt_cv_sys_global_symbol_to_c_name_address \ +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ +lt_cv_nm_interface \ +nm_file_list_spec \ +lt_cv_truncate_bin \ +lt_prog_compiler_no_builtin_flag \ +lt_prog_compiler_pic \ +lt_prog_compiler_wl \ +lt_prog_compiler_static \ +lt_cv_prog_compiler_c_o \ +need_locks \ +MANIFEST_TOOL \ +DSYMUTIL \ +NMEDIT \ +LIPO \ +OTOOL \ +OTOOL64 \ +shrext_cmds \ +export_dynamic_flag_spec \ +whole_archive_flag_spec \ +compiler_needs_object \ +with_gnu_ld \ +allow_undefined_flag \ +no_undefined_flag \ +hardcode_libdir_flag_spec \ +hardcode_libdir_separator \ +exclude_expsyms \ +include_expsyms \ +file_list_spec \ +variables_saved_for_relink \ +libname_spec \ +library_names_spec \ +soname_spec \ +install_override_mode \ +finish_eval \ +old_striplib \ +striplib \ +compiler_lib_search_dirs \ +predep_objects \ +postdep_objects \ +predeps \ +postdeps \ +compiler_lib_search_path \ +LD_CXX \ +reload_flag_CXX \ +compiler_CXX \ +lt_prog_compiler_no_builtin_flag_CXX \ +lt_prog_compiler_pic_CXX \ +lt_prog_compiler_wl_CXX \ +lt_prog_compiler_static_CXX \ +lt_cv_prog_compiler_c_o_CXX \ +export_dynamic_flag_spec_CXX \ +whole_archive_flag_spec_CXX \ +compiler_needs_object_CXX \ +with_gnu_ld_CXX \ +allow_undefined_flag_CXX \ +no_undefined_flag_CXX \ +hardcode_libdir_flag_spec_CXX \ +hardcode_libdir_separator_CXX \ +exclude_expsyms_CXX \ +include_expsyms_CXX \ +file_list_spec_CXX \ +compiler_lib_search_dirs_CXX \ +predep_objects_CXX \ +postdep_objects_CXX \ +predeps_CXX \ +postdeps_CXX \ +compiler_lib_search_path_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in reload_cmds \ +old_postinstall_cmds \ +old_postuninstall_cmds \ +old_archive_cmds \ +extract_expsyms_cmds \ +old_archive_from_new_cmds \ +old_archive_from_expsyms_cmds \ +archive_cmds \ +archive_expsym_cmds \ +module_cmds \ +module_expsym_cmds \ +export_symbols_cmds \ +prelink_cmds \ +postlink_cmds \ +postinstall_cmds \ +postuninstall_cmds \ +finish_cmds \ +sys_lib_search_path_spec \ +configure_time_dlsearch_path \ +configure_time_lt_sys_library_path \ +reload_cmds_CXX \ +old_archive_cmds_CXX \ +old_archive_from_new_cmds_CXX \ +old_archive_from_expsyms_cmds_CXX \ +archive_cmds_CXX \ +archive_expsym_cmds_CXX \ +module_cmds_CXX \ +module_expsym_cmds_CXX \ +export_symbols_cmds_CXX \ +prelink_cmds_CXX \ +postlink_cmds_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +ac_aux_dir='$ac_aux_dir' + +# See if we are running on zsh, and set the options that allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST +fi + + + PACKAGE='$PACKAGE' + VERSION='$VERSION' + RM='$RM' + ofile='$ofile' + + + + + + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "include/pqxx/config.h") CONFIG_HEADERS="$CONFIG_HEADERS include/pqxx/config.h" ;; + "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "config/Makefile") CONFIG_FILES="$CONFIG_FILES config/Makefile" ;; + "doc/Makefile") CONFIG_FILES="$CONFIG_FILES doc/Makefile" ;; + "doc/Doxyfile") CONFIG_FILES="$CONFIG_FILES doc/Doxyfile" ;; + "src/Makefile") CONFIG_FILES="$CONFIG_FILES src/Makefile" ;; + "test/Makefile") CONFIG_FILES="$CONFIG_FILES test/Makefile" ;; + "tools/Makefile") CONFIG_FILES="$CONFIG_FILES tools/Makefile" ;; + "include/Makefile") CONFIG_FILES="$CONFIG_FILES include/Makefile" ;; + "include/pqxx/Makefile") CONFIG_FILES="$CONFIG_FILES include/pqxx/Makefile" ;; + "libpqxx.pc") CONFIG_FILES="$CONFIG_FILES libpqxx.pc" ;; + "configitems") CONFIG_COMMANDS="$CONFIG_COMMANDS configitems" ;; + "compile_flags") CONFIG_FILES="$CONFIG_FILES compile_flags" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac + ac_MKDIR_P=$MKDIR_P + case $MKDIR_P in + [\\/$]* | ?:[\\/]* ) ;; + */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +s&@MKDIR_P@&$ac_MKDIR_P&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi +# Compute "$ac_file"'s index in $config_headers. +_am_arg="$ac_file" +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || +$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$_am_arg" : 'X\(//\)[^/]' \| \ + X"$_am_arg" : 'X\(//\)$' \| \ + X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$_am_arg" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'`/stamp-h$_am_stamp_count + ;; + + :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "depfiles":C) test x"$AMDEP_TRUE" != x"" || { + # Older Autoconf quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + # TODO: see whether this extra hack can be removed once we start + # requiring Autoconf 2.70 or later. + case $CONFIG_FILES in #( + *\'*) : + eval set x "$CONFIG_FILES" ;; #( + *) : + set x $CONFIG_FILES ;; #( + *) : + ;; +esac + shift + # Used to flag and report bootstrapping failures. + am_rc=0 + for am_mf + do + # Strip MF so we end up with the name of the file. + am_mf=`$as_echo "$am_mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile which includes + # dependency-tracking related rules and includes. + # Grep'ing the whole file directly is not great: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \ + || continue + am_dirpart=`$as_dirname -- "$am_mf" || +$as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$am_mf" : 'X\(//\)[^/]' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$am_mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + am_filepart=`$as_basename -- "$am_mf" || +$as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \ + X"$am_mf" : 'X\(//\)$' \| \ + X"$am_mf" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$am_mf" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + { echo "$as_me:$LINENO: cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles" >&5 + (cd "$am_dirpart" \ + && sed -e '/# am--include-marker/d' "$am_filepart" \ + | $MAKE -f - am--depfiles) >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } || am_rc=$? + done + if test $am_rc -ne 0; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "Something went wrong bootstrapping makefile fragments + for automatic dependency tracking. If GNU make was not used, consider + re-running the configure script with MAKE=\"gmake\" (or whatever is + necessary). You can also try re-running configure with the + '--disable-dependency-tracking' option to at least be able to build + the package (albeit without support for automatic dependency tracking). +See \`config.log' for more details" "$LINENO" 5; } + fi + { am_dirpart=; unset am_dirpart;} + { am_filepart=; unset am_filepart;} + { am_mf=; unset am_mf;} + { am_rc=; unset am_rc;} + rm -f conftest-deps.mk +} + ;; + "libtool":C) + + # See if we are running on zsh, and set the options that allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}"; then + setopt NO_GLOB_SUBST + fi + + cfgfile=${ofile}T + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL +# Generated automatically by $as_me ($PACKAGE) $VERSION +# NOTE: Changes made to this file will be lost: look at ltmain.sh. + +# Provide generalized library-building support services. +# Written by Gordon Matzigkeit, 1996 + +# Copyright (C) 2014 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program or library that is built +# using GNU Libtool, you may include this file under the same +# distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# The names of the tagged configurations supported by this script. +available_tags='CXX ' + +# Configured defaults for sys_lib_dlsearch_path munging. +: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} + +# ### BEGIN LIBTOOL CONFIG + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# What type of objects to build. +pic_mode=$pic_mode + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# Shared archive member basename,for filename based shared library versioning on AIX. +shared_archive_member_spec=$shared_archive_member_spec + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# An echo program that protects backslashes. +ECHO=$lt_ECHO + +# The PATH separator for the build system. +PATH_SEPARATOR=$lt_PATH_SEPARATOR + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP=$lt_GREP + +# An ERE matcher. +EGREP=$lt_EGREP + +# A literal string matcher. +FGREP=$lt_FGREP + +# A BSD- or MS-compatible name lister. +NM=$lt_NM + +# Whether we need soft or hard links. +LN_S=$lt_LN_S + +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len + +# Object file suffix (normally "o"). +objext=$ac_objext + +# Executable file suffix (normally ""). +exeext=$exeext + +# whether the shell understands "unset". +lt_unset=$lt_unset + +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL + +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP + +# convert \$build file names to \$host format. +to_host_file_cmd=$lt_cv_to_host_file_cmd + +# convert \$build files to toolchain format. +to_tool_file_cmd=$lt_cv_to_tool_file_cmd + +# An object symbol dumper. +OBJDUMP=$lt_OBJDUMP + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd=$lt_file_magic_cmd + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob=$lt_file_magic_glob + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob=$lt_want_nocaseglob + +# DLL creation program. +DLLTOOL=$lt_DLLTOOL + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd + +# The archiver. +AR=$lt_AR + +# Flags to create an archive. +AR_FLAGS=$lt_AR_FLAGS + +# How to feed a file listing to the archiver. +archiver_list_spec=$lt_archiver_list_spec + +# A symbol stripping program. +STRIP=$lt_STRIP + +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=$lock_old_archive_extraction + +# A C compiler. +LTCC=$lt_CC + +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm into a list of symbols to manually relocate. +global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +# The name lister interface. +nm_interface=$lt_lt_cv_nm_interface + +# Specify filename containing input files for \$NM. +nm_file_list_spec=$lt_nm_file_list_spec + +# The root where to search for dependent libraries,and where our libraries should be installed. +lt_sysroot=$lt_sysroot + +# Command to truncate a binary pipe. +lt_truncate_bin=$lt_lt_cv_truncate_bin + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Manifest tool. +MANIFEST_TOOL=$lt_MANIFEST_TOOL + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL + +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 + +# Old archive suffix (normally "a"). +libext=$libext + +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink + +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Library versioning type. +version_type=$version_type + +# Shared library runtime path variable. +runpath_var=$runpath_var + +# Shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Permission mode override for installation of shared libraries. +install_override_mode=$lt_install_override_mode + +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Detected run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path + +# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. +configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + + +# The linker used to build libraries. +LD=$lt_LD + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds + +# A language specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU compiler? +with_gcc=$GCC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects +postdep_objects=$lt_postdep_objects +predeps=$lt_predeps +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path + +# ### END LIBTOOL CONFIG + +_LT_EOF + + cat <<'_LT_EOF' >> "$cfgfile" + +# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE + +# func_munge_path_list VARIABLE PATH +# ----------------------------------- +# VARIABLE is name of variable containing _space_ separated list of +# directories to be munged by the contents of PATH, which is string +# having a format: +# "DIR[:DIR]:" +# string "DIR[ DIR]" will be prepended to VARIABLE +# ":DIR[:DIR]" +# string "DIR[ DIR]" will be appended to VARIABLE +# "DIRP[:DIRP]::[DIRA:]DIRA" +# string "DIRP[ DIRP]" will be prepended to VARIABLE and string +# "DIRA[ DIRA]" will be appended to VARIABLE +# "DIR[:DIR]" +# VARIABLE will be replaced by "DIR[ DIR]" +func_munge_path_list () +{ + case x$2 in + x) + ;; + *:) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" + ;; + x:*) + eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" + ;; + *::*) + eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" + eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" + ;; + *) + eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" + ;; + esac +} + + +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +func_cc_basename () +{ + for cc_temp in $*""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac + done + func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +} + + +# ### END FUNCTIONS SHARED WITH CONFIGURE + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test set != "${COLLECT_NAMES+set}"; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + +ltmain=$ac_aux_dir/ltmain.sh + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + + cat <<_LT_EOF >> "$ofile" + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# How to create reloadable object files. +reload_flag=$lt_reload_flag_CXX +reload_cmds=$lt_reload_cmds_CXX + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds_CXX + +# A language specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU compiler? +with_gcc=$GCC_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object_CXX + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld_CXX + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \$shlibpath_var if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute_CXX + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath_CXX + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds_CXX + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds_CXX + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec_CXX + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects_CXX +postdep_objects=$lt_postdep_objects_CXX +predeps=$lt_predeps_CXX +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX + +# ### END LIBTOOL TAG CONFIG: CXX +_LT_EOF + + ;; + "configitems":C) "${srcdir}/tools/splitconfig" "${srcdir}" ;; + + esac +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + diff --git a/ext/libpqxx-7.7.3/configure.ac b/ext/libpqxx-7.7.3/configure.ac new file mode 100644 index 000000000..d6351faf2 --- /dev/null +++ b/ext/libpqxx-7.7.3/configure.ac @@ -0,0 +1,738 @@ +# Generate configure script for libpqxx. This needs the autoconf archive +# package installed. (The configure script itself does not require it though.) + +AC_PREREQ(2.69) +AC_INIT( + libpqxx, + [m4_esyscmd_s([./tools/extract_version])], + [Jeroen T. Vermeulen]) +AC_LANG(C++) + +AC_CONFIG_SRCDIR([src/connection.cxx]) +AC_CONFIG_AUX_DIR(config) +AC_CONFIG_MACRO_DIR([config/m4]) +AM_INIT_AUTOMAKE([subdir-objects]) + +PQXX_ABI=m4_esyscmd_s([./tools/extract_version --abi]) +AC_SUBST(PQXXVERSION, $PACKAGE_VERSION) +AC_SUBST(PQXX_ABI) + +AC_CONFIG_HEADER([include/pqxx/config.h]) + +# Default prefix for installs. +AC_PREFIX_DEFAULT(/usr/local) + + +# Read test programme from config-test. +AC_DEFUN([read_test], [AC_LANG_SOURCE( + esyscmd(tools/m4esc.py --input=config-tests/$1))]) + + +# Checks for programs. +AC_PROG_CXX +AC_PROG_INSTALL +AC_DISABLE_SHARED +AC_PROG_LIBTOOL +AC_PROG_MAKE_SET +AC_PATH_PROG([MKDIR], [mkdir]) + +# Documentation. +AC_ARG_ENABLE( + documentation, + [AS_HELP_STRING([--enable-documentation], [Generate documentation])], + [], + [enable_documentation=auto]) +AC_ARG_VAR([DOXYGEN], + [Path to doxygen needed to build reference documentation]) +AC_PATH_TOOL([DOXYGEN], [doxygen], [nodoxygen]) +AC_ARG_VAR([HAVE_DOT], + [Variable used by doxygen to declare availibility of dot]) +AC_CHECK_TOOL([HAVE_DOT], [dot], [YES], [NO]) +AS_IF([test "$enable_documentation" = "yes" && test "$DOXYGEN" = "nodoxygen"], + [AC_MSG_ERROR([could not find tools necessary to build documentation])]) +AM_CONDITIONAL([BUILD_REFERENCE], + [test "$enable_documentation" != "no" -a "$DOXYGEN" != "nodoxygen"]) + +AM_MAINTAINER_MODE + +# See if we want stricter compiler warnings. +AC_MSG_CHECKING([maintainer mode]) +AC_ARG_ENABLE(maintainer-mode) +AC_MSG_RESULT(${enable_maintainer_mode}) + +# See if we want runtime debug checking. +AC_MSG_CHECKING([audit]) +AC_ARG_ENABLE(audit) +AC_MSG_RESULT(${enable_audit}) + +# See if we want "suggestions," such as "this class could be final." +# (The suggestions are often useful, but can also easily be wrong.) +AC_MSG_CHECKING([suggest]) +AC_ARG_ENABLE(suggest) +AC_MSG_RESULT(${enable_suggest}) + + +AC_ARG_ENABLE(shared) +AS_IF( + [test "${shared}" = "yes" ], + [CPPFLAGS="$CPPFLAGS -DPQXX_SHARED"]) + + +# Add options to compiler command line, if compiler accepts them. +add_compiler_opts_if_ok() { + for option in $* + do + ACO_SAVE_CXXFLAGS="$CXXFLAGS" + CXXFLAGS="$CXXFLAGS $option" + AC_MSG_CHECKING([whether $CXX accepts $option]) + AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM([], [])], + has_option=yes, + has_option=no) + AC_MSG_RESULT($has_option) + AS_IF( + [test "$has_option" = "no" ], + [CXXFLAGS="$ACO_SAVE_CXXFLAGS"]) + done +} + + +# Add options to compiler command line, unconditionally. +add_compiler_opts() { + CXXFLAGS="$CXXFLAGS $*" +} + + +# It's tempting to use Autoconf Archive's AX_CXX_COMPILE_STDCXX_17 for this, +# but it's 2022 and the C++20 equivalent isn't quite ready for use. +# Seems simpler and more reliable for the user to arrange for the desired +# language versions by setting the appropriate option for their compiler. +AC_MSG_CHECKING([for sufficient C++ language/library level]) +sufficient_cxx=yes +AC_COMPILE_IFELSE( + [AC_LANG_SOURCE([ + #if __cplusplus < 201611L + #error "Need C++17 or better." + #endif + ])], + sufficient_cxx=yes, + sufficient_cxx=no) +AC_MSG_RESULT($sufficient_cxx) +if test "$sufficient_cxx" != "yes" +then + AC_MSG_ERROR([This libpqxx version needs at least C++17.]) +fi + + +# Let's try to get the compiler to be helpful. +# +# (Omit options -Weffc++ and -Wabi because they currently yield too many +# warnings in gcc's standard headers; omit -Wunreachable-code because it isn't +# always right) +if test "$GCC" = "yes" +then + # In maintainer mode, enable all the warning options we can. + if test "$enable_maintainer_mode" = "yes" + then + # "Eternal" (FLW) g++ options. These have been around for + # ages, and both g++ and clang++ support them. Don't bother + # checking for support; just add them to the compiler options. + add_compiler_opts \ + -fstrict-enums \ + -Werror \ + -Wall \ + -pedantic \ + -Wcast-align \ + -Wcast-qual \ + -Wconversion \ + -Wctor-dtor-privacy \ + -Wendif-labels \ + -Wextra \ + -Wfloat-equal \ + -Wformat=2 \ + -Wformat-security \ + -Wmissing-include-dirs \ + -Wno-div-by-zero \ + -Wnon-virtual-dtor \ + -Wold-style-cast \ + -Woverlength-strings \ + -Woverloaded-virtual \ + -Wpointer-arith \ + -Wredundant-decls \ + -Wshadow \ + -Wsign-promo \ + -Wundef \ + -Wunused \ + -Wwrite-strings + + # "Iffy" g++ options. Some reasonably current g++-like + # compilers may not support these. + add_compiler_opts_if_ok \ + -fnothrow-opt \ + -Wattribute-alias=2 \ + -Wextra-semi \ + -Wlogical-op \ + -Wmismatched-tags \ + -Wnoexcept \ + -Wredundant-tags \ + -Wrestrict \ + -Wstringop-overflow \ + -Wzero-as-null-pointer-constant \ + -Warray-bounds=2 \ + -Wduplicated-branches \ + -Wduplicated-cond \ + -Wsuggest-attribute=noreturn \ + -Wsuggest-override \ + -Wtrampolines + fi + + # In "audit," enable all runtime checks we can. + if test "$enable_audit" = "yes" + then + add_compiler_opts_if_ok \ + -D_FORTIFY_SOURCE=2 \ + -fsanitize=address \ + -fsanitize-address-use-after-scope \ + -fsanitize=alignment \ + -fsanitize=bool \ + -fsanitize=bounds \ + -fsanitize=bounds-strict \ + -fsanitize=builtin \ + -fsanitize=enum \ + -fsanitize=float-cast-overflow \ + -fsanitize=float-divide-by-zero \ + -fsanitize=integer-divide-by-zero \ + -fsanitize=leak \ + -fsanitize=nonnull-attribute \ + -fsanitize=null \ + -fsanitize=object-size \ + -fsanitize=pointer-compare \ + -fsanitize=pointer-overflow \ + -fsanitize=pointer-subtract \ + -fsanitize=return \ + -fsanitize=returns-nonnull-attribute \ + -fsanitize=shift \ + -fsanitize=shift-base \ + -fsanitize=shift-exponent \ + -fsanitize=signed-integer-overflow \ + -fsanitize=undefined \ + -fsanitize=unreachable \ + -fsanitize=vla-bound \ + -fsanitize=vptr \ + -fstack-protector-all + fi + + # In "suggest" mode, enable a bunch of code suggestions. + if test "$enable_suggest" = "yes" + then + add_compiler_opts_if_ok \ + -Wsuggest-attribute=cold \ + -Wsuggest-attribute=const \ + -Wsuggest-attribute=malloc \ + -Wsuggest-attribute=pure \ + -Wsuggest-final-types \ + -Wsuggest-final-methods + fi + +AC_MSG_CHECKING([g++ visibility attribute]) +gcc_visibility=yes +SAVE_CXXFLAGS="$CXXFLAGS" +CXXFLAGS="$CXXFLAGS -Werror" +AC_COMPILE_IFELSE( + [read_test(gcc_visibility.cxx)], + AC_DEFINE( + [PQXX_HAVE_GCC_VISIBILITY], + 1, + [Define if g++ supports visibility attribute.]), + gcc_visibility=no) +AC_MSG_RESULT($gcc_visibility) +CXXFLAGS="$SAVE_CXXFLAGS" +if test "$gcc_visibility" = "yes" +then + # Make internal definitions accessible only to the library itself. + # Only definitions marked PQXX_LIBEXPORT will be accessible. + add_compiler_opts -fvisibility=hidden + add_compiler_opts -fvisibility-inlines-hidden +fi + +AC_MSG_CHECKING([g++ pure attribute]) +gcc_pure=yes +AC_COMPILE_IFELSE( + [read_test(gcc_pure.cxx)], + AC_DEFINE( + [PQXX_HAVE_GCC_PURE], + 1, + [Define if g++ supports pure attribute]), + gcc_pure=no) +AC_MSG_RESULT($gcc_pure) + +fi # End of gcc-specific part. + + +# Check for __cxa_demangle. +AC_MSG_CHECKING([__cxa_demangle]) +cxa_demangle=yes +AC_COMPILE_IFELSE( + [read_test(cxa_demangle.cxx)], + AC_DEFINE( + [PQXX_HAVE_CXA_DEMANGLE], + 1, + [Define if compiler supports __cxa_demangle]), + cxa_demangle=no) +AC_MSG_RESULT($cxa_demangle) + + +# Check for sufficient Concepts support, introduced with C++20. +AC_MSG_CHECKING([concepts]) +concepts=yes +AC_COMPILE_IFELSE( + [read_test(concepts.cxx)], + AC_DEFINE( + [PQXX_HAVE_CONCEPTS], + 1, + [Define if compiler supports Concepts and header.]), + concepts=no) +AC_MSG_RESULT($concepts) + + +# Check for C++20 std::span. +AC_MSG_CHECKING([std::span]) +span=yes +AC_COMPILE_IFELSE( + [read_test(span.cxx)], + AC_DEFINE([PQXX_HAVE_SPAN], 1, [Define if compiler has std::span.]), + span=no) +AC_MSG_RESULT($span) + + +# Check for multidimensional subscript operator support. Proposed for C++23. +AC_MSG_CHECKING([for multidimensional subscript operator support]) +multidim_subscript=yes +AC_COMPILE_IFELSE( + [read_test(multidim-subscript.cxx)], + AC_DEFINE( + [PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT], 1, + [Define if operator[] can take multiple arguments.]), + multidim_subscript=no) +AC_MSG_RESULT($multidim_subscript) + + +AC_MSG_CHECKING([for strerror_r()]) +strerror_r=yes +AC_LINK_IFELSE( + [read_test(strerror_r.cxx)], + AC_DEFINE( + [PQXX_HAVE_STRERROR_R], + 1, + [Define if strerror_r() is available.]), + strerror_r=no) +AC_MSG_RESULT($strerror_r) + + + +AC_MSG_CHECKING([for strerror_s()]) +strerror_s=yes +AC_LINK_IFELSE( + [read_test(strerror_s.cxx)], + AC_DEFINE( + [PQXX_HAVE_STRERROR_S], + 1, + [Define if strerror_s() is available.]), + strerror_s=no) +AC_MSG_RESULT($strerror_s) + + +AC_MSG_CHECKING([for std::chrono::year_month_day etc]) +year_month_day=yes +AC_LINK_IFELSE( + [read_test(year_month_day.cxx)], + AC_DEFINE( + [PQXX_HAVE_YEAR_MONTH_DAY], + 1, + [Define if std::chrono has year_month_day etc.]), + year_month_day=no) +AC_MSG_RESULT($year_month_day) + + +# Check for [[likely]] and [[unlikely]] attributes. +AC_MSG_CHECKING([attributes "likely" and "unlikely".]) +likely=yes +AC_COMPILE_IFELSE( + [read_test(likely.cxx)], + AC_DEFINE([PQXX_HAVE_LIKELY], 1, [Define if likely & unlikely work.]), + likely=no) +AC_MSG_RESULT($likely) + + +# It's mid-2019, and gcc's charconv supports integers but not yet floats. +# So for now, we test for int and float conversion... separately. +# +# It's worse for older clang versions, which compile the integer conversions +# but then fail at link time because of a missing symbol "__muloti4" with the +# "long long" version. I couldn't resolve that symbol by adding -lm either. +# So don't just compile these tests; link them as well. +AC_MSG_CHECKING([for C++17 charconv integer conversion]) +have_charconv_int=yes +AC_LINK_IFELSE( + [read_test(charconv_int.cxx)], + AC_DEFINE( + [PQXX_HAVE_CHARCONV_INT], + 1, + [Define if supports integer conversion.]), + have_charconv_int=no) +AC_MSG_RESULT($have_charconv_int) + +AC_MSG_CHECKING([for C++17 charconv floating-point conversion]) +have_charconv_float=yes +AC_LINK_IFELSE( + [read_test(charconv_float.cxx)], + AC_DEFINE( + [PQXX_HAVE_CHARCONV_FLOAT], + 1, + [Define if supports floating-point conversion.]), + have_charconv_float=no) +AC_MSG_RESULT($have_charconv_float) + +# As per #262, clang with libcxxrt does not support thread_local on non-POD +# objects. Luckily we can live without those, it's just less efficient. +AC_MSG_CHECKING([for full thread_local support]) +have_thread_local=yes +AC_LINK_IFELSE( + [read_test(thread_local.cxx)], + AC_DEFINE( + [PQXX_HAVE_THREAD_LOCAL], + 1, + [Define if thread_local is fully supported.]), + have_thread_local=no) +AC_MSG_RESULT($have_thread_local) + +AC_MSG_CHECKING([for std::this_thread::sleep_for]) +have_sleep_for=yes +AC_LINK_IFELSE( + [read_test(sleep_for.cxx)], + AC_DEFINE( + [PQXX_HAVE_SLEEP_FOR], + 1, + [Define if std::this_thread::sleep_for works.]), + have_sleep_for=no) +AC_MSG_RESULT($have_sleep_for) + + +AC_MSG_CHECKING([for std::cmp_greater, std::cmp_less_equal, etc]) +have_cmp=yes +AC_COMPILE_IFELSE( + [read_test(cmp.cxx)], + AC_DEFINE( + [PQXX_HAVE_CMP], + 1, + [Define if compiler has C++20 std::cmp_greater etc.]), + have_cmp=no) +AC_MSG_RESULT($have_cmp) + + +# Doing my own check for poll(). There's one built into autoconf-archive, but +# it produces warnings in C++ (about unnecessarily using "struct", and using 0 +# as a null pointer constant). In maintainer mode, those warnings turn into +# errors. +AC_MSG_CHECKING([for poll()]) +ax_cv_have_poll=yes +AC_LINK_IFELSE( + [read_test(poll.cxx)], + AC_DEFINE( + [PQXX_HAVE_POLL], + 1, + [Define if poll() is available.]), + ax_cv_have_poll=no) +AC_MSG_RESULT($ax_cv_have_poll) + +if test "$ax_cv_have_poll" != "yes" +then +# No poll(); we'll fall back to select(). + +# Some systems keep select() in a separate library which is not linked by +# default. See if we need one of those. +socklibok=no +AC_SEARCH_LIBS(select, socket nsl ws2_32 wsock32 winsock, [socklibok=yes]) + +# Microsoft proprietary libraries do not work with code that is generated with +# autoconf's SEARCH_LIBS macro, so we need to check manually and just use the +# first socket library available. +# We only do this if select() is not available by other means, to avoid picking +# up an unnecessary Windows compatibility library on a non-Windows system. +for l in ws2_32 wsock32 winsock +do + if test "${socklibok}" != "yes" + then + AC_CHECK_LIB($l,main,LIBS="$LIBS -l$l";[socklibok=yes]) + fi +done + +if test "${socklibok}" != "yes" +then + AC_MSG_ERROR([ +Could not figure out how to link a simple sockets-based program. Please read +the config.log file for more clues as to why this failed. +]) +fi + +fi # No poll() + + +# Find PostgreSQL includes and libraries +AC_PATH_PROG([PKG_CONFIG], [pkg-config]) +AC_PATH_PROGS(PG_CONFIG, pg_config) + +AC_ARG_WITH( + [postgres-include], + [AS_HELP_STRING( + [--with-postgres-include=DIR], + [Use PostgreSQL includes from DIR. Defaults to querying pg_config or pkg-config, whichever is available.])], + AS_IF( + [test "x$with_postgres_include" = "xyes"], + [with_postgres_include=""])) + +if test -n "$with_postgres_include" +then + POSTGRES_INCLUDE="-I$with_postgres_include" +else + if test -x "$PKG_CONFIG" || test -x "$PG_CONFIG" + then + # We should prefer pkg-config over pg_config, but there seems to be a + # problem in pkg-config 1.6.3. Until that's been resolved (#291), go + # with pg_config if we can. + if test -x "$PG_CONFIG" + then + # From pg_config we can either get the C compiler options used to + # compile postgres, which isn't quite what we want; or we can get + # the headers directory, without the full option. That's something + # we can work with. The compiler must support the "-I" option for + # that, but both scripts assume that anyway. + POSTGRES_INCLUDE="-I$($PG_CONFIG --includedir)" + else + # From pkg-config we can get the compiler options to extend the + # include path. We use that. + POSTGRES_INCLUDE=$($PKG_CONFIG libpq --cflags-only-I) + fi + AC_MSG_NOTICE([finding PostgreSQL headers using $POSTGRES_INCLUDE]) + else + POSTGRES_INCLUDE="" + + # We have nothing to tell us where the libpq headers are. That's fine + # if the compiler can find it, but if not, fail here. + AC_CHECK_HEADER( + [libpq-fe.h], + [], + [AC_MSG_ERROR([ +Can't find the main PostgreSQL client header, libpq-fe.h. Make sure that it +is installed, and either use the --with-postgres-include option or install +pkg-config. +])]) + fi +fi +AC_SUBST(POSTGRES_INCLUDE) + +# Add the compiler option so we can compile configure tests which rely on the +# libpq headers. +CPPFLAGS="$CPPFLAGS $POSTGRES_INCLUDE" + + +AC_ARG_WITH( + [postgres-lib], + [AS_HELP_STRING( + [--with-postgres-lib=DIR], + [Use PostgreSQL libraries from DIR. Defaults to querying pg_config.])], + AS_IF( + [test "x$with_postgres_lib" = "xyes"], + [with_postgres_lib=""])) + +# If no --with-postgres-lib was given, and we have pkg-config, use that. +AS_IF( + [test -z "$with_postgres_lib" -a -x "$PKG_CONFIG"], + [with_postgres_lib=$($PKG_CONFIG libpq --libs-only-L | sed 's/^-L//')]) + +# pg_config is deprecated, but for some users it may still provide the only +# right answer. For instance, `pkg-config` may not know where `libpq` is +# installed. +AS_IF( + [test -z "$with_postgres_lib" -a -x "$PG_CONFIG"], + [with_postgres_lib=$($PG_CONFIG --libdir)]) + +AS_IF( + [test -n "$with_postgres_lib"], + [AC_MSG_NOTICE([using PostgreSQL libraries at $with_postgres_lib])], + [AC_MSG_NOTICE([using PostgreSQL libraries in default location])]) + +AC_SUBST(with_postgres_lib) + + +AC_CHECK_HEADER( + [libpq-fe.h], + [], + [AC_MSG_ERROR([ +Can't find the main PostgreSQL client header, libpq-fe.h. Are you sure the +libpq headers are installed correctly, and that we've got the right path? +])]) + + +AC_MSG_CHECKING([for ability to compile source files using libpq]) +AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [[#include]], + [[PQexec(nullptr,"")]] + )], + [], + [AC_MSG_ERROR([ +Could not compile a call to a basic libpq function. There must be something +seriously wrong with the headers that "pg_config --includedir" or "pkg-config +libpq --cflags" pointed to; the contents of config.log may give you a clue +about the nature of the failure. +Source including the libpq header libpq-fe.h can be compiled, but a call to the +most basic libpq function PQexec() failed to compile successfully. This is the +litmus test for a working libpq. +])]) +AC_MSG_RESULT(yes) + + +if test "x${with_postgres_lib}" = "x"; then + with_postgres_libpath="" +else + with_postgres_libpath="-L${with_postgres_lib}" +fi +LDFLAGS="$LDFLAGS ${with_postgres_libpath}" + +AC_CHECK_LIB( + [pq], + [PQexec], + [], + [AC_MSG_ERROR([ +Did not find the PQexec() function in libpq. This is the litmus test for a +working libpq installation. + +A source file using the PQexec() function did compile without problems, and the +libpq library is available for linking, but for some reason a call to PQexec() +failed to link properly to the libpq library. This may be because the libpq +library file is damaged, or in some incorrect format, or if your libpq is much +more recent than libpqxx version $PQXX_ABI, perhaps libpq has undergone a +radical ABI change. + +The last parts of config.log may give you a clue as to what really went wrong, +but be warned that this is no easy reading. Look for the last error message +occurring in the file. +])], + ${with_postgres_libpath}) + + +# PQencryptPasswordConn was added in postgres 10. +AC_MSG_CHECKING([for PQencryptPasswordConn]) +have_pqencryptpasswordconn=yes +AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [#include], + [ + extern PGconn *conn; + PQencryptPasswordConn( + conn, "pwd", "user", "scram-sha-256") + ] + )], + AC_DEFINE( + [PQXX_HAVE_PQENCRYPTPASSWORDCONN], + 1, + [Define if libpq has PQencryptPasswordConn (since pg 10).]), + [have_pqencryptpasswordconn=no]) +AC_MSG_RESULT($have_pqencryptpasswordconn) + + +# "Pipeline mode" was introduced in libpq 14. +AC_MSG_CHECKING([for libpq pipeline mode]) +have_pq_pipeline=yes +AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [#include], + [ + extern PGconn *conn; + PQenterPipelineMode(conn) + ] + )], + AC_DEFINE( + [PQXX_HAVE_PQ_PIPELINE], + 1, + [Define if libpq has pipeline mode (since pg 14).]), + [have_pq_pipeline=no]) +AC_MSG_RESULT($have_pq_pipeline) + + +AC_MSG_CHECKING([for usable std::filesystem::path]) +have_path=yes +AC_COMPILE_IFELSE( + [read_test(fs.cxx)], + AC_DEFINE( + [PQXX_HAVE_PATH], + 1, + [Define if compiler has usable std::filesystem::path.]), + have_path=no) +AC_MSG_RESULT($have_path) + + +AC_MSG_CHECKING([whether we need a link option for support]) +LIBS_SAVE="$LIBS" +found_fslib=no +for l in '' '-lstdc++fs' '-lc++fs' +do + if test "$found_fslib" != "yes" + then + LIBS="$LIBS $l" + AC_LINK_IFELSE( + [read_test(need_fslib.cxx)], + [found_fslib=yes], + [LIBS="$LIBS_SAVE"]) + if test "$found_fslib" = "yes" + then + result_msg="$l" + # (And keep our current value of $LIBS.) + fi + fi +done + +if test "$found_fslib" != "yes" +then + AC_MSG_ERROR([ +There seems to be support, but I could not figure out now to make +it work. You'll have to add set your own build options for this. + ]) +fi +AC_MSG_RESULT($result_msg) + + +# Remove redundant occurrances of -lpq +LIBS=[$(echo "$LIBS" | sed -e 's/-lpq * -lpq\>/-lpq/g')] + + +AC_MSG_CHECKING([that type of libpq's Oid is as expected]) +AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [ + #include + #include"${srcdir}/include/pqxx/internal/libpq-forward.hxx" + extern void f(pqxx::oid&); + ], + [Oid o;f(o)], + )], + [], + [AC_MSG_ERROR([ +The Oid typedef in libpq has changed. Please notify the libpqxx authors of the +change! +])]) +AC_MSG_RESULT(yes) + + +AC_PROG_MAKE_SET + +AC_CONFIG_FILES([ + Makefile config/Makefile doc/Makefile doc/Doxyfile src/Makefile + test/Makefile tools/Makefile include/Makefile include/pqxx/Makefile + libpqxx.pc]) + + +AC_CONFIG_COMMANDS([configitems], ["${srcdir}/tools/splitconfig" "${srcdir}"]) + +AC_OUTPUT(compile_flags) diff --git a/ext/libpqxx-7.7.3/doc/CMakeLists.txt b/ext/libpqxx-7.7.3/doc/CMakeLists.txt new file mode 100644 index 000000000..d48d2a6f0 --- /dev/null +++ b/ext/libpqxx-7.7.3/doc/CMakeLists.txt @@ -0,0 +1,50 @@ +find_program(HAVE_DOXYGEN doxygen) + +if(NOT HAVE_DOXYGEN) + message(FATAL_ERROR "***************************************************** +Doxygen not found. +Install it, or configure with -DBUILD_DOC=OFF +*****************************************************" + ) +endif() + +set(PQXXVERSION "${CMAKE_PROJECT_VERSION}") +set(top_srcdir "${PROJECT_SOURCE_DIR}") +set(PQXX_ABI "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}") +set(PQXX_MAJOR "${PROJECT_VERSION_MAJOR}") +set(PQXX_MINOR "${PROJECT_VERSION_MINOR}") + +find_program(HAVE_DOT dot) +if(HAVE_DOT) + set(HAVE_DOT YES) +else() + set(HAVE_DOT NO) +endif() + +configure_file(Doxyfile.in Doxyfile) + +if(HAVE_DOXYGEN) + file( + GLOB DOXYGEN_SOURCES + "${PROJECT_SOURCE_DIR}/include/pqxx/*.hxx" + "${PROJECT_SOURCE_DIR}/include/pqxx/doc/*.md" + "${PROJECT_SOURCE_DIR}/*.cxx" + ) + set(DOXYGEN_STAMP_FILE "${CMAKE_CURRENT_BINARY_DIR}/doxygen.stamp") + add_custom_command(OUTPUT ${DOXYGEN_STAMP_FILE} + COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/html + COMMAND doxygen Doxyfile + COMMAND ${CMAKE_COMMAND} -E touch ${DOXYGEN_STAMP_FILE} + DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile ${DOXYGEN_SOURCES} + COMMENT "Generate API documentation" + VERBATIM + ) + add_custom_target(doxygen ALL + DEPENDS ${DOXYGEN_STAMP_FILE} + SOURCES ${DOXYGEN_SOURCES} + ) + install( + DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html + DESTINATION ${CMAKE_INSTALL_DOCDIR}/html + ) +endif() diff --git a/ext/libpqxx-7.7.3/doc/Doxyfile.in b/ext/libpqxx-7.7.3/doc/Doxyfile.in new file mode 100644 index 000000000..f349fe80c --- /dev/null +++ b/ext/libpqxx-7.7.3/doc/Doxyfile.in @@ -0,0 +1,1280 @@ +# Doxyfile 1.5.5 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project +# +# All text after a hash (#) is considered a comment and will be ignored +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" ") + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = libpqxx + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = @PQXXVERSION@ + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = html + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Farsi, Finnish, French, German, Greek, +# Hungarian, Italian, Japanese, Japanese-en (Japanese with English messages), +# Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, Polish, +# Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, +# and Ukrainian. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = NO + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = ../ + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful is your file systems +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = YES + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = NO + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also make the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = NO + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespace are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = NO + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = NO + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or define consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and defines in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = NO + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = YES + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = NO + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# This WARN_NO_PARAMDOC option can be abled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = @top_srcdir@/include/pqxx \ + @top_srcdir@/include/pqxx/doc \ + @top_srcdir@/src + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx +# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 + +FILE_PATTERNS = *.cxx \ + *.hxx \ + *.h \ + *.md + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix filesystem feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = *.o \ + *.lo \ + *.a \ + .cvsignore + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER +# is applied to all files. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES (the default) +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = YES + +# If the REFERENCES_RELATION tag is set to YES (the default) +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = YES + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. Otherwise they will link to the documentstion. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = . + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. For this to work a browser that supports +# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox +# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). + +HTML_DYNAMIC_SECTIONS = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [1..20]) +# that doxygen will group on one line in the generated HTML documentation. + +ENUM_VALUES_PER_LINE = 4 + +# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be +# generated containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, +# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are +# probably better off using the HTML help feature. + +GENERATE_TREEVIEW = YES + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, a4wide, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = YES + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. This is useful +# if you want to understand what is going on. On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = YES + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = YES + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = "../include" "@top_srcdir@/include" + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = *.h *.hxx + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = PQXX_ABI="@PQXX_ABI@" \ + PQXX_LIBEXPORT= \ + PQXX_NOVTABLE= \ + PQXX_PRIVATE= \ + PQXX_TYPENAME=typename \ + PQXX_VERSION="@PQXXVERSION@" \ + PQXX_MAJOR=@PQXX_MAJOR@ \ + PQXX_MINOR=@PQXX_MINOR@ + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all function-like macros that are alone +# on a line, have an all uppercase name, and do not end with a semicolon. Such +# function macros are typically used for boiler-plate code, and will confuse +# the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option is superseded by the HAVE_DOT option below. This is only a +# fallback. It is recommended to install and use dot, since it yields more +# powerful graphs. + +CLASS_DIAGRAMS = YES + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = @HAVE_DOT@ + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = NO + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, jpg, or gif +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MAX_DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is enabled by default, which results in a transparent +# background. Warning: Depending on the platform used, enabling this option +# may lead to badly anti-aliased labels on the edges of a graph (i.e. they +# become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to the search engine +#--------------------------------------------------------------------------- + +# The SEARCHENGINE tag specifies whether or not a search engine should be +# used. If set to NO the values of all tags below this one will be ignored. + +SEARCHENGINE = NO diff --git a/ext/libpqxx-7.7.3/doc/Makefile.am b/ext/libpqxx-7.7.3/doc/Makefile.am new file mode 100644 index 000000000..38caa9181 --- /dev/null +++ b/ext/libpqxx-7.7.3/doc/Makefile.am @@ -0,0 +1,51 @@ +MAINTAINERCLEANFILES = Makefile.in + +maintainer-clean-local: + $(RM) -rf html + $(RM) reference-stamp + $(MKDIR) html + +EXTRA_DIST= Doxyfile.in libpqxx.xml reference-stamp + +all-local: docs + +if BUILD_REFERENCE +DOCS = reference +else +DOCS = +endif + +if MAINTAINER_MODE +REFERENCE_STAMP_DEP = ../src/libpqxx.la +else +REFERENCE_STAMP_DEP = +endif + +docs: $(DOCS) + +reference: reference-stamp +reference-stamp: Doxyfile.in $(REFERENCE_STAMP_DEP) + if [ -x "$(DOXYGEN)" ]; then \ + $(MKDIR_P) html; \ + $(DOXYGEN) Doxyfile; \ + touch $@; \ + else \ + echo >&2; \ + echo >&2 "*****************************************************"; \ + echo >&2; \ + echo >&2 "Doxygen not found."; \ + echo >&2 "Install it, or configure with --disable-documentation"; \ + echo >&2; \ + echo >&2 "*****************************************************"; \ + exit 1; \ + fi + +../src/libpqxx.la: + cd ../src; \ + $(MAKE) libpqxx.la + + +dist-hook: reference + if [ -d $(srcdir)/html ]; then \ + cp -pR html $(distdir)/ ; \ + fi diff --git a/ext/libpqxx-7.7.3/doc/Makefile.in b/ext/libpqxx-7.7.3/doc/Makefile.in new file mode 100644 index 000000000..835845bf7 --- /dev/null +++ b/ext/libpqxx-7.7.3/doc/Makefile.in @@ -0,0 +1,507 @@ +# Makefile.in generated by automake 1.16.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2021 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = doc +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/config/m4/libtool.m4 \ + $(top_srcdir)/config/m4/ltoptions.m4 \ + $(top_srcdir)/config/m4/ltsugar.m4 \ + $(top_srcdir)/config/m4/ltversion.m4 \ + $(top_srcdir)/config/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs +CONFIG_HEADER = $(top_builddir)/include/pqxx/config.h +CONFIG_CLEAN_FILES = Doxyfile +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +am__DIST_COMMON = $(srcdir)/Doxyfile.in $(srcdir)/Makefile.in \ + $(top_srcdir)/config/mkinstalldirs +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ETAGS = @ETAGS@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_DOT = @HAVE_DOT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR = @MKDIR@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PG_CONFIG = @PG_CONFIG@ +PKG_CONFIG = @PKG_CONFIG@ +POSTGRES_INCLUDE = @POSTGRES_INCLUDE@ +PQXXVERSION = @PQXXVERSION@ +PQXX_ABI = @PQXX_ABI@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +with_postgres_lib = @with_postgres_lib@ +MAINTAINERCLEANFILES = Makefile.in +EXTRA_DIST = Doxyfile.in libpqxx.xml reference-stamp +@BUILD_REFERENCE_FALSE@DOCS = +@BUILD_REFERENCE_TRUE@DOCS = reference +@MAINTAINER_MODE_FALSE@REFERENCE_STAMP_DEP = +@MAINTAINER_MODE_TRUE@REFERENCE_STAMP_DEP = ../src/libpqxx.la +all: all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu doc/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): +Doxyfile: $(top_builddir)/config.status $(srcdir)/Doxyfile.in + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +tags TAGS: + +ctags CTAGS: + +cscope cscopelist: + +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$(top_distdir)" distdir="$(distdir)" \ + dist-hook +check-am: all-am +check: check-am +all-am: Makefile all-local +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic \ + maintainer-clean-local + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: all all-am all-local check check-am clean clean-generic \ + clean-libtool cscopelist-am ctags-am dist-hook distclean \ + distclean-generic distclean-libtool distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic maintainer-clean-local mostlyclean \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags-am uninstall uninstall-am + +.PRECIOUS: Makefile + + +maintainer-clean-local: + $(RM) -rf html + $(RM) reference-stamp + $(MKDIR) html + +all-local: docs + +docs: $(DOCS) + +reference: reference-stamp +reference-stamp: Doxyfile.in $(REFERENCE_STAMP_DEP) + if [ -x "$(DOXYGEN)" ]; then \ + $(MKDIR_P) html; \ + $(DOXYGEN) Doxyfile; \ + touch $@; \ + else \ + echo >&2; \ + echo >&2 "*****************************************************"; \ + echo >&2; \ + echo >&2 "Doxygen not found."; \ + echo >&2 "Install it, or configure with --disable-documentation"; \ + echo >&2; \ + echo >&2 "*****************************************************"; \ + exit 1; \ + fi + +../src/libpqxx.la: + cd ../src; \ + $(MAKE) libpqxx.la + +dist-hook: reference + if [ -d $(srcdir)/html ]; then \ + cp -pR html $(distdir)/ ; \ + fi + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/ext/libpqxx-7.7.3/doc/conf.py b/ext/libpqxx-7.7.3/doc/conf.py new file mode 100644 index 000000000..1cac1c2ca --- /dev/null +++ b/ext/libpqxx-7.7.3/doc/conf.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- +""" +libpqxx documentation build configuration file, created by +sphinx-quickstart on Sun Dec 3 00:43:33 2017. + +This file is execfile()d with the current directory set to its containing dir. + +All configuration values have a default; values that are commented out serve +to show the default. +""" + +import codecs +import os +from subprocess import check_call + + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import sys +# sys.path.insert(0, os.path.abspath(os.path.curdir)) + + +read_the_docs_build = os.environ.get('READTHEDOCS') == 'True' + +if read_the_docs_build: + check_call( + [os.path.join(os.path.curdir, 'configure'), 'CXXFLAGS=-std=c++17 -O0'], + cwd=os.path.pardir) + check_call('doxygen', cwd=os.path.join(os.path.pardir, 'doc')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.doctest', + ] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'libpqxx' +copyright = u'2000-2022, Jeroen T. Vermeulen' +author = u'Jeroen T. Vermeulen' + + +def read_version(): + """Return version number as specified in the VERSION file.""" + version_file = os.path.join( + os.path.dirname(__file__), os.path.pardir, 'VERSION') + with codecs.open(version_file, encoding='ascii') as stream: + return stream.read().strip() + + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = read_version() + +# The short X.Y version. +version = '.'.join(release.split('.')[:2]) + +html_title = "libpqxx %s" % release +html_short_title = "libpqxx" + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# This is required for the alabaster theme +# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars +html_sidebars = { + '**': [ + 'relations.html', # needs 'show_related': True theme option to display + 'searchbox.html', + ] +} + +# Looks like the setup is that our build generates the HTML itself, and then +# has readthedocs copy the full generated HTML tree to the output directory. +# +# Problem is, that doesn't seem to be working now. This needs debugging. +html_extra_path = ["html"] + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'libpqxxdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + 'libpqxx.tex', + u'libpqxx Documentation', + u'Jeroen T. Vermeulen', + 'manual', + ), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'libpqxx', u'libpqxx Documentation', [author], 1) +] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'libpqxx', u'libpqxx Documentation', + author, 'libpqxx', "C++ client API for PostgreSQL.", + 'Miscellaneous'), +] diff --git a/ext/libpqxx-7.7.3/doc/index.rst b/ext/libpqxx-7.7.3/doc/index.rst new file mode 100644 index 000000000..30f2cfdc1 --- /dev/null +++ b/ext/libpqxx-7.7.3/doc/index.rst @@ -0,0 +1,20 @@ +.. x documentation master file, created by + sphinx-quickstart on Sun Dec 3 01:30:12 2017. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +libpqxx +======= + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/ext/libpqxx-7.7.3/include/CMakeLists.txt b/ext/libpqxx-7.7.3/include/CMakeLists.txt new file mode 100644 index 000000000..263dd2e9e --- /dev/null +++ b/ext/libpqxx-7.7.3/include/CMakeLists.txt @@ -0,0 +1,68 @@ +# ############################################################################## +# AUTOMATICALLY GENERATED FILE -- DO NOT EDIT. +# +# This file is generated automatically by libpqxx's template2mak.py script, and +# will be rewritten from time to time. +# +# If you modify this file, chances are your modifications will be lost. +# +# The template2mak.py script should be available in the tools directory of the +# libpqxx source archive. +# +# Generated from template './include/CMakeLists.txt.template'. +# ############################################################################## +install( + DIRECTORY pqxx "${PROJECT_BINARY_DIR}/include/pqxx" + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + FILES_MATCHING + # For each X.hxx, install both X.hxx itself and plain X. + PATTERN *.hxx + # TODO: Is there any way to do this with CMake's globbing? + PATTERN array + PATTERN binarystring + PATTERN blob + PATTERN composite + PATTERN connection + PATTERN cursor + PATTERN dbtransaction + PATTERN errorhandler + PATTERN except + PATTERN field + PATTERN isolation + PATTERN largeobject + PATTERN nontransaction + PATTERN notification + PATTERN params + PATTERN pipeline + PATTERN prepared_statement + PATTERN range + PATTERN result + PATTERN robusttransaction + PATTERN row + PATTERN separated_list + PATTERN strconv + PATTERN stream_from + PATTERN stream_to + PATTERN subtransaction + PATTERN time + PATTERN transaction + PATTERN transaction_base + PATTERN transaction_focus + PATTERN transactor + PATTERN types + PATTERN util + PATTERN version + PATTERN zview + PATTERN internal/*.hxx + PATTERN internal/gates/*.hxx + PATTERN config-public-compiler.h + PATTERN pqxx + PATTERN doc EXCLUDE +) + +install( + DIRECTORY pqxx/doc/ + DESTINATION ${CMAKE_INSTALL_DOCDIR} + FILES_MATCHING + PATTERN *.md +) diff --git a/ext/libpqxx-7.7.3/include/CMakeLists.txt.template b/ext/libpqxx-7.7.3/include/CMakeLists.txt.template new file mode 100644 index 000000000..5ebc6664e --- /dev/null +++ b/ext/libpqxx-7.7.3/include/CMakeLists.txt.template @@ -0,0 +1,23 @@ +install( + DIRECTORY pqxx "${PROJECT_BINARY_DIR}/include/pqxx" + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + FILES_MATCHING + # For each X.hxx, install both X.hxx itself and plain X. + PATTERN *.hxx + # TODO: Is there any way to do this with CMake's globbing? +###MAKTEMPLATE:FOREACH include/pqxx/*.hxx + PATTERN ###BASENAME### +###MAKTEMPLATE:ENDFOREACH + PATTERN internal/*.hxx + PATTERN internal/gates/*.hxx + PATTERN config-public-compiler.h + PATTERN pqxx + PATTERN doc EXCLUDE +) + +install( + DIRECTORY pqxx/doc/ + DESTINATION ${CMAKE_INSTALL_DOCDIR} + FILES_MATCHING + PATTERN *.md +) diff --git a/ext/libpqxx-7.7.3/include/Makefile.am b/ext/libpqxx-7.7.3/include/Makefile.am new file mode 100644 index 000000000..83d2b1970 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/Makefile.am @@ -0,0 +1,79 @@ +SUBDIRS = pqxx + +nobase_include_HEADERS= pqxx/pqxx \ + pqxx/array pqxx/array.hxx \ + pqxx/binarystring pqxx/binarystring.hxx \ + pqxx/blob pqxx/blob.hxx \ + pqxx/composite pqxx/composite.hxx \ + pqxx/connection pqxx/connection.hxx \ + pqxx/cursor pqxx/cursor.hxx \ + pqxx/dbtransaction pqxx/dbtransaction.hxx \ + pqxx/errorhandler pqxx/errorhandler.hxx \ + pqxx/except pqxx/except.hxx \ + pqxx/field pqxx/field.hxx \ + pqxx/isolation pqxx/isolation.hxx \ + pqxx/largeobject pqxx/largeobject.hxx \ + pqxx/nontransaction pqxx/nontransaction.hxx \ + pqxx/notification pqxx/notification.hxx \ + pqxx/params pqxx/params.hxx \ + pqxx/pipeline pqxx/pipeline.hxx \ + pqxx/prepared_statement pqxx/prepared_statement.hxx \ + pqxx/range pqxx/range.hxx \ + pqxx/result pqxx/result.hxx \ + pqxx/robusttransaction pqxx/robusttransaction.hxx \ + pqxx/separated_list pqxx/separated_list.hxx \ + pqxx/strconv pqxx/strconv.hxx \ + pqxx/stream_from pqxx/stream_from.hxx \ + pqxx/stream_to pqxx/stream_to.hxx \ + pqxx/subtransaction pqxx/subtransaction.hxx \ + pqxx/time pqxx/time.hxx \ + pqxx/transaction pqxx/transaction.hxx \ + pqxx/transaction_base pqxx/transaction_base.hxx \ + pqxx/transaction_focus pqxx/transaction_focus.hxx \ + pqxx/transactor pqxx/transactor.hxx \ + pqxx/row pqxx/row.hxx \ + pqxx/util pqxx/util.hxx \ + pqxx/types pqxx/types.hxx \ + pqxx/zview pqxx/zview.hxx \ + pqxx/version pqxx/version.hxx \ + pqxx/internal/array-composite.hxx \ + pqxx/internal/callgate.hxx \ + pqxx/internal/concat.hxx \ + pqxx/internal/conversions.hxx \ + pqxx/internal/encoding_group.hxx \ + pqxx/internal/encodings.hxx \ + pqxx/internal/header-pre.hxx \ + pqxx/internal/header-post.hxx \ + pqxx/internal/ignore-deprecated-post.hxx \ + pqxx/internal/ignore-deprecated-pre.hxx \ + pqxx/internal/libpq-forward.hxx \ + pqxx/internal/result_iter.hxx \ + pqxx/internal/result_iterator.hxx \ + pqxx/internal/sql_cursor.hxx \ + pqxx/internal/statement_parameters.hxx \ + pqxx/internal/stream_iterator.hxx \ + pqxx/internal/wait.hxx \ + pqxx/internal/gates/connection-errorhandler.hxx \ + pqxx/internal/gates/connection-largeobject.hxx \ + pqxx/internal/gates/connection-notification_receiver.hxx \ + pqxx/internal/gates/connection-pipeline.hxx \ + pqxx/internal/gates/connection-sql_cursor.hxx \ + pqxx/internal/gates/connection-transaction.hxx \ + pqxx/internal/gates/errorhandler-connection.hxx \ + pqxx/internal/gates/icursorstream-icursor_iterator.hxx \ + pqxx/internal/gates/icursor_iterator-icursorstream.hxx \ + pqxx/internal/gates/result-connection.hxx \ + pqxx/internal/gates/result-creation.hxx \ + pqxx/internal/gates/result-pipeline.hxx \ + pqxx/internal/gates/result-sql_cursor.hxx \ + pqxx/internal/gates/transaction-sql_cursor.hxx \ + pqxx/internal/gates/transaction-transaction_focus.hxx + + +nobase_nodist_include_HEADERS = \ + pqxx/config-public-compiler.h + +EXTRA_DIST = \ + pqxx/doc/*.md \ + pqxx/doc/mainpage.md.template \ + pqxx/version.hxx.template diff --git a/ext/libpqxx-7.7.3/include/Makefile.in b/ext/libpqxx-7.7.3/include/Makefile.in new file mode 100644 index 000000000..6b9eac914 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/Makefile.in @@ -0,0 +1,802 @@ +# Makefile.in generated by automake 1.16.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2021 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = include +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/config/m4/libtool.m4 \ + $(top_srcdir)/config/m4/ltoptions.m4 \ + $(top_srcdir)/config/m4/ltsugar.m4 \ + $(top_srcdir)/config/m4/ltversion.m4 \ + $(top_srcdir)/config/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(nobase_include_HEADERS) \ + $(am__DIST_COMMON) +mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs +CONFIG_HEADER = $(top_builddir)/include/pqxx/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \ + ctags-recursive dvi-recursive html-recursive info-recursive \ + install-data-recursive install-dvi-recursive \ + install-exec-recursive install-html-recursive \ + install-info-recursive install-pdf-recursive \ + install-ps-recursive install-recursive installcheck-recursive \ + installdirs-recursive pdf-recursive ps-recursive \ + tags-recursive uninstall-recursive +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(includedir)" "$(DESTDIR)$(includedir)" +HEADERS = $(nobase_include_HEADERS) $(nobase_nodist_include_HEADERS) +RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \ + distclean-recursive maintainer-clean-recursive +am__recursive_targets = \ + $(RECURSIVE_TARGETS) \ + $(RECURSIVE_CLEAN_TARGETS) \ + $(am__extra_recursive_targets) +AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \ + distdir distdir-am +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +DIST_SUBDIRS = $(SUBDIRS) +am__DIST_COMMON = $(srcdir)/Makefile.in \ + $(top_srcdir)/config/mkinstalldirs +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +am__relativize = \ + dir0=`pwd`; \ + sed_first='s,^\([^/]*\)/.*$$,\1,'; \ + sed_rest='s,^[^/]*/*,,'; \ + sed_last='s,^.*/\([^/]*\)$$,\1,'; \ + sed_butlast='s,/*[^/]*$$,,'; \ + while test -n "$$dir1"; do \ + first=`echo "$$dir1" | sed -e "$$sed_first"`; \ + if test "$$first" != "."; then \ + if test "$$first" = ".."; then \ + dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \ + dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \ + else \ + first2=`echo "$$dir2" | sed -e "$$sed_first"`; \ + if test "$$first2" = "$$first"; then \ + dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \ + else \ + dir2="../$$dir2"; \ + fi; \ + dir0="$$dir0"/"$$first"; \ + fi; \ + fi; \ + dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \ + done; \ + reldir="$$dir2" +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ETAGS = @ETAGS@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_DOT = @HAVE_DOT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR = @MKDIR@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PG_CONFIG = @PG_CONFIG@ +PKG_CONFIG = @PKG_CONFIG@ +POSTGRES_INCLUDE = @POSTGRES_INCLUDE@ +PQXXVERSION = @PQXXVERSION@ +PQXX_ABI = @PQXX_ABI@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +with_postgres_lib = @with_postgres_lib@ +SUBDIRS = pqxx +nobase_include_HEADERS = pqxx/pqxx \ + pqxx/array pqxx/array.hxx \ + pqxx/binarystring pqxx/binarystring.hxx \ + pqxx/blob pqxx/blob.hxx \ + pqxx/composite pqxx/composite.hxx \ + pqxx/connection pqxx/connection.hxx \ + pqxx/cursor pqxx/cursor.hxx \ + pqxx/dbtransaction pqxx/dbtransaction.hxx \ + pqxx/errorhandler pqxx/errorhandler.hxx \ + pqxx/except pqxx/except.hxx \ + pqxx/field pqxx/field.hxx \ + pqxx/isolation pqxx/isolation.hxx \ + pqxx/largeobject pqxx/largeobject.hxx \ + pqxx/nontransaction pqxx/nontransaction.hxx \ + pqxx/notification pqxx/notification.hxx \ + pqxx/params pqxx/params.hxx \ + pqxx/pipeline pqxx/pipeline.hxx \ + pqxx/prepared_statement pqxx/prepared_statement.hxx \ + pqxx/range pqxx/range.hxx \ + pqxx/result pqxx/result.hxx \ + pqxx/robusttransaction pqxx/robusttransaction.hxx \ + pqxx/separated_list pqxx/separated_list.hxx \ + pqxx/strconv pqxx/strconv.hxx \ + pqxx/stream_from pqxx/stream_from.hxx \ + pqxx/stream_to pqxx/stream_to.hxx \ + pqxx/subtransaction pqxx/subtransaction.hxx \ + pqxx/time pqxx/time.hxx \ + pqxx/transaction pqxx/transaction.hxx \ + pqxx/transaction_base pqxx/transaction_base.hxx \ + pqxx/transaction_focus pqxx/transaction_focus.hxx \ + pqxx/transactor pqxx/transactor.hxx \ + pqxx/row pqxx/row.hxx \ + pqxx/util pqxx/util.hxx \ + pqxx/types pqxx/types.hxx \ + pqxx/zview pqxx/zview.hxx \ + pqxx/version pqxx/version.hxx \ + pqxx/internal/array-composite.hxx \ + pqxx/internal/callgate.hxx \ + pqxx/internal/concat.hxx \ + pqxx/internal/conversions.hxx \ + pqxx/internal/encoding_group.hxx \ + pqxx/internal/encodings.hxx \ + pqxx/internal/header-pre.hxx \ + pqxx/internal/header-post.hxx \ + pqxx/internal/ignore-deprecated-post.hxx \ + pqxx/internal/ignore-deprecated-pre.hxx \ + pqxx/internal/libpq-forward.hxx \ + pqxx/internal/result_iter.hxx \ + pqxx/internal/result_iterator.hxx \ + pqxx/internal/sql_cursor.hxx \ + pqxx/internal/statement_parameters.hxx \ + pqxx/internal/stream_iterator.hxx \ + pqxx/internal/wait.hxx \ + pqxx/internal/gates/connection-errorhandler.hxx \ + pqxx/internal/gates/connection-largeobject.hxx \ + pqxx/internal/gates/connection-notification_receiver.hxx \ + pqxx/internal/gates/connection-pipeline.hxx \ + pqxx/internal/gates/connection-sql_cursor.hxx \ + pqxx/internal/gates/connection-transaction.hxx \ + pqxx/internal/gates/errorhandler-connection.hxx \ + pqxx/internal/gates/icursorstream-icursor_iterator.hxx \ + pqxx/internal/gates/icursor_iterator-icursorstream.hxx \ + pqxx/internal/gates/result-connection.hxx \ + pqxx/internal/gates/result-creation.hxx \ + pqxx/internal/gates/result-pipeline.hxx \ + pqxx/internal/gates/result-sql_cursor.hxx \ + pqxx/internal/gates/transaction-sql_cursor.hxx \ + pqxx/internal/gates/transaction-transaction_focus.hxx + +nobase_nodist_include_HEADERS = \ + pqxx/config-public-compiler.h + +EXTRA_DIST = \ + pqxx/doc/*.md \ + pqxx/doc/mainpage.md.template \ + pqxx/version.hxx.template + +all: all-recursive + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu include/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu include/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs +install-nobase_includeHEADERS: $(nobase_include_HEADERS) + @$(NORMAL_INSTALL) + @list='$(nobase_include_HEADERS)'; test -n "$(includedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(includedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(includedir)" || exit 1; \ + fi; \ + $(am__nobase_list) | while read dir files; do \ + xfiles=; for file in $$files; do \ + if test -f "$$file"; then xfiles="$$xfiles $$file"; \ + else xfiles="$$xfiles $(srcdir)/$$file"; fi; done; \ + test -z "$$xfiles" || { \ + test "x$$dir" = x. || { \ + echo " $(MKDIR_P) '$(DESTDIR)$(includedir)/$$dir'"; \ + $(MKDIR_P) "$(DESTDIR)$(includedir)/$$dir"; }; \ + echo " $(INSTALL_HEADER) $$xfiles '$(DESTDIR)$(includedir)/$$dir'"; \ + $(INSTALL_HEADER) $$xfiles "$(DESTDIR)$(includedir)/$$dir" || exit $$?; }; \ + done + +uninstall-nobase_includeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(nobase_include_HEADERS)'; test -n "$(includedir)" || list=; \ + $(am__nobase_strip_setup); files=`$(am__nobase_strip)`; \ + dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir) +install-nobase_nodist_includeHEADERS: $(nobase_nodist_include_HEADERS) + @$(NORMAL_INSTALL) + @list='$(nobase_nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + if test -n "$$list"; then \ + echo " $(MKDIR_P) '$(DESTDIR)$(includedir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(includedir)" || exit 1; \ + fi; \ + $(am__nobase_list) | while read dir files; do \ + xfiles=; for file in $$files; do \ + if test -f "$$file"; then xfiles="$$xfiles $$file"; \ + else xfiles="$$xfiles $(srcdir)/$$file"; fi; done; \ + test -z "$$xfiles" || { \ + test "x$$dir" = x. || { \ + echo " $(MKDIR_P) '$(DESTDIR)$(includedir)/$$dir'"; \ + $(MKDIR_P) "$(DESTDIR)$(includedir)/$$dir"; }; \ + echo " $(INSTALL_HEADER) $$xfiles '$(DESTDIR)$(includedir)/$$dir'"; \ + $(INSTALL_HEADER) $$xfiles "$(DESTDIR)$(includedir)/$$dir" || exit $$?; }; \ + done + +uninstall-nobase_nodist_includeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(nobase_nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \ + $(am__nobase_strip_setup); files=`$(am__nobase_strip)`; \ + dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir) + +# This directory's subdirectories are mostly independent; you can cd +# into them and run 'make' without going through this Makefile. +# To change the values of 'make' variables: instead of editing Makefiles, +# (1) if the variable is set in 'config.status', edit 'config.status' +# (which will cause the Makefiles to be regenerated when you run 'make'); +# (2) otherwise, pass the desired values on the 'make' command line. +$(am__recursive_targets): + @fail=; \ + if $(am__make_keepgoing); then \ + failcom='fail=yes'; \ + else \ + failcom='exit 1'; \ + fi; \ + dot_seen=no; \ + target=`echo $@ | sed s/-recursive//`; \ + case "$@" in \ + distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \ + *) list='$(SUBDIRS)' ;; \ + esac; \ + for subdir in $$list; do \ + echo "Making $$target in $$subdir"; \ + if test "$$subdir" = "."; then \ + dot_seen=yes; \ + local_target="$$target-am"; \ + else \ + local_target="$$target"; \ + fi; \ + ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \ + || eval $$failcom; \ + done; \ + if test "$$dot_seen" = "no"; then \ + $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \ + fi; test -z "$$fail" + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-recursive +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \ + include_option=--etags-include; \ + empty_fix=.; \ + else \ + include_option=--include; \ + empty_fix=; \ + fi; \ + list='$(SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + test ! -f $$subdir/TAGS || \ + set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \ + fi; \ + done; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-recursive + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-recursive + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + @list='$(DIST_SUBDIRS)'; for subdir in $$list; do \ + if test "$$subdir" = .; then :; else \ + $(am__make_dryrun) \ + || test -d "$(distdir)/$$subdir" \ + || $(MKDIR_P) "$(distdir)/$$subdir" \ + || exit 1; \ + dir1=$$subdir; dir2="$(distdir)/$$subdir"; \ + $(am__relativize); \ + new_distdir=$$reldir; \ + dir1=$$subdir; dir2="$(top_distdir)"; \ + $(am__relativize); \ + new_top_distdir=$$reldir; \ + echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \ + echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \ + ($(am__cd) $$subdir && \ + $(MAKE) $(AM_MAKEFLAGS) \ + top_distdir="$$new_top_distdir" \ + distdir="$$new_distdir" \ + am__remove_distdir=: \ + am__skip_length_check=: \ + am__skip_mode_fix=: \ + distdir) \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-recursive +all-am: Makefile $(HEADERS) +installdirs: installdirs-recursive +installdirs-am: + for dir in "$(DESTDIR)$(includedir)" "$(DESTDIR)$(includedir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-recursive +install-exec: install-exec-recursive +install-data: install-data-recursive +uninstall: uninstall-recursive + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-recursive +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-recursive + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-recursive + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-tags + +dvi: dvi-recursive + +dvi-am: + +html: html-recursive + +html-am: + +info: info-recursive + +info-am: + +install-data-am: install-nobase_includeHEADERS \ + install-nobase_nodist_includeHEADERS + +install-dvi: install-dvi-recursive + +install-dvi-am: + +install-exec-am: + +install-html: install-html-recursive + +install-html-am: + +install-info: install-info-recursive + +install-info-am: + +install-man: + +install-pdf: install-pdf-recursive + +install-pdf-am: + +install-ps: install-ps-recursive + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-recursive + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-recursive + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-recursive + +pdf-am: + +ps: ps-recursive + +ps-am: + +uninstall-am: uninstall-nobase_includeHEADERS \ + uninstall-nobase_nodist_includeHEADERS + +.MAKE: $(am__recursive_targets) install-am install-strip + +.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \ + check-am clean clean-generic clean-libtool cscopelist-am ctags \ + ctags-am distclean distclean-generic distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-nobase_includeHEADERS \ + install-nobase_nodist_includeHEADERS install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs installdirs-am \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags tags-am uninstall uninstall-am \ + uninstall-nobase_includeHEADERS \ + uninstall-nobase_nodist_includeHEADERS + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/ext/libpqxx-7.7.3/include/pqxx/Makefile.am b/ext/libpqxx-7.7.3/include/pqxx/Makefile.am new file mode 100644 index 000000000..c7f68813d --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/Makefile.am @@ -0,0 +1,13 @@ +MAINTAINERCLEANFILES=Makefile.in stamp-h.in + +noinst_HEADERS = \ + config-internal-autotools.h + +nodist_noinst_HEADERS = \ + config.h \ + config-internal-compiler.h + +DISTCLEANFILES = \ + config-internal-autotools.h \ + config-internal-compiler.h \ + config-public-compiler.h diff --git a/ext/libpqxx-7.7.3/include/pqxx/Makefile.in b/ext/libpqxx-7.7.3/include/pqxx/Makefile.in new file mode 100644 index 000000000..4b02fb771 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/Makefile.in @@ -0,0 +1,556 @@ +# Makefile.in generated by automake 1.16.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2021 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = include/pqxx +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/config/m4/libtool.m4 \ + $(top_srcdir)/config/m4/ltoptions.m4 \ + $(top_srcdir)/config/m4/ltsugar.m4 \ + $(top_srcdir)/config/m4/ltversion.m4 \ + $(top_srcdir)/config/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(noinst_HEADERS) \ + $(am__DIST_COMMON) +mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs +CONFIG_HEADER = config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +SOURCES = +DIST_SOURCES = +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +HEADERS = $(nodist_noinst_HEADERS) $(noinst_HEADERS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) \ + config.h.in +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/config.h.in \ + $(top_srcdir)/config/mkinstalldirs +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ETAGS = @ETAGS@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_DOT = @HAVE_DOT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR = @MKDIR@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PG_CONFIG = @PG_CONFIG@ +PKG_CONFIG = @PKG_CONFIG@ +POSTGRES_INCLUDE = @POSTGRES_INCLUDE@ +PQXXVERSION = @PQXXVERSION@ +PQXX_ABI = @PQXX_ABI@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +with_postgres_lib = @with_postgres_lib@ +MAINTAINERCLEANFILES = Makefile.in stamp-h.in +noinst_HEADERS = \ + config-internal-autotools.h + +nodist_noinst_HEADERS = \ + config.h \ + config-internal-compiler.h + +DISTCLEANFILES = \ + config-internal-autotools.h \ + config-internal-compiler.h \ + config-public-compiler.h + +all: config.h + $(MAKE) $(AM_MAKEFLAGS) all-am + +.SUFFIXES: +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu include/pqxx/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu include/pqxx/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +config.h: stamp-h1 + @test -f $@ || rm -f stamp-h1 + @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1 + +stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status + @rm -f stamp-h1 + cd $(top_builddir) && $(SHELL) ./config.status include/pqxx/config.h +$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) + rm -f stamp-h1 + touch $@ + +distclean-hdr: + -rm -f config.h stamp-h1 + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(HEADERS) config.h +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -test -z "$(DISTCLEANFILES)" || rm -f $(DISTCLEANFILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic clean-libtool mostlyclean-am + +distclean: distclean-am + -rm -f Makefile +distclean-am: clean-am distclean-generic distclean-hdr distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-generic mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: all install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \ + clean-libtool cscopelist-am ctags ctags-am distclean \ + distclean-generic distclean-hdr distclean-libtool \ + distclean-tags distdir dvi dvi-am html html-am info info-am \ + install install-am install-data install-data-am install-dvi \ + install-dvi-am install-exec install-exec-am install-html \ + install-html-am install-info install-info-am install-man \ + install-pdf install-pdf-am install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic mostlyclean \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags tags-am uninstall uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/ext/libpqxx-7.7.3/include/pqxx/array b/ext/libpqxx-7.7.3/include/pqxx/array new file mode 100644 index 000000000..689f5b27b --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/array @@ -0,0 +1,6 @@ +/** Handling of SQL arrays. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/array.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/array.hxx b/ext/libpqxx-7.7.3/include/pqxx/array.hxx new file mode 100644 index 000000000..8440a244f --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/array.hxx @@ -0,0 +1,103 @@ +/* Handling of SQL arrays. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/field instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ARRAY +#define PQXX_H_ARRAY + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/internal/encoding_group.hxx" +#include "pqxx/internal/encodings.hxx" + + +namespace pqxx +{ +/// Low-level array parser. +/** Use this to read an array field retrieved from the database. + * + * This parser will only work reliably if your client encoding is UTF-8, ASCII, + * or a single-byte encoding which is a superset of ASCII (such as Latin-1). + * + * Also, the parser only supports array element types which use either a comma + * or a semicolon ("," or ";") as the separator between array elements. All + * built-in types use comma, except for one which uses semicolon, but some + * custom types may not work. + * + * The input is a C-style string containing the textual representation of an + * array, as returned by the database. The parser reads this representation + * on the fly. The string must remain in memory until parsing is done. + * + * Parse the array by making calls to @ref get_next until it returns a + * @ref juncture of "done". The @ref juncture tells you what the parser found + * in that step: did the array "nest" to a deeper level, or "un-nest" back up? + */ +class PQXX_LIBEXPORT array_parser +{ +public: + /// What's the latest thing found in the array? + enum class juncture + { + /// Starting a new row. + row_start, + /// Ending the current row. + row_end, + /// Found a NULL value. + null_value, + /// Found a string value. + string_value, + /// Parsing has completed. + done, + }; + + // TODO: constexpr noexcept. Breaks ABI. + /// Constructor. You don't need this; use @ref field::as_array instead. + /** The parser only remains valid while the data underlying the @ref result + * remains valid. Once all `result` objects referring to that data have been + * destroyed, the parser will no longer refer to valid memory. + */ + explicit array_parser( + std::string_view input, + internal::encoding_group = internal::encoding_group::MONOBYTE); + + /// Parse the next step in the array. + /** Returns what it found. If the juncture is @ref juncture::string_value, + * the string will contain the value. Otherwise, it will be empty. + * + * Call this until the @ref array_parser::juncture it returns is + * @ref juncture::done. + */ + std::pair get_next(); + +private: + std::string_view m_input; + internal::glyph_scanner_func *const m_scan; + + /// Current parsing position in the input. + std::string::size_type m_pos = 0u; + + std::string::size_type scan_single_quoted_string() const; + std::string parse_single_quoted_string(std::string::size_type end) const; + std::string::size_type scan_double_quoted_string() const; + std::string parse_double_quoted_string(std::string::size_type end) const; + std::string::size_type scan_unquoted_string() const; + std::string parse_unquoted_string(std::string::size_type end) const; + + std::string::size_type scan_glyph(std::string::size_type pos) const; + std::string::size_type + scan_glyph(std::string::size_type pos, std::string::size_type end) const; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/binarystring b/ext/libpqxx-7.7.3/include/pqxx/binarystring new file mode 100644 index 000000000..77551d9f7 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/binarystring @@ -0,0 +1,6 @@ +/** BYTEA (binary string) conversions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/binarystring.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/binarystring.hxx b/ext/libpqxx-7.7.3/include/pqxx/binarystring.hxx new file mode 100644 index 000000000..47c82a035 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/binarystring.hxx @@ -0,0 +1,236 @@ +/* Deprecated representation for raw, binary data. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/binarystring instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_BINARYSTRING +#define PQXX_H_BINARYSTRING + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/result.hxx" +#include "pqxx/strconv.hxx" + +namespace pqxx +{ +class binarystring; +template<> struct string_traits; + + +/// Binary data corresponding to PostgreSQL's "BYTEA" binary-string type. +/** @ingroup escaping-functions + * @deprecated Use @c std::basic_string and + * @c std::basic_string_view for binary data. In C++20 or better, + * any @c contiguous_range of @c std::byte will do. + * + * This class represents a binary string as stored in a field of type @c bytea. + * + * Internally a binarystring is zero-terminated, but it may also contain null + * bytes, they're just like any other byte value. So don't assume that it's + * safe to treat the contents as a C-style string. + * + * The binarystring retains its value even if the result it was obtained from + * is destroyed, but it cannot be copied or assigned. + * + * \relatesalso transaction_base::quote_raw + * + * To include a @c binarystring value in an SQL query, escape and quote it + * using the transaction's @c quote_raw function. + * + * @warning This class is implemented as a reference-counting smart pointer. + * Copying, swapping, and destroying binarystring objects that refer to the + * same underlying data block is not thread-safe. If you wish to pass + * binarystrings around between threads, make sure that each of these + * operations is protected against concurrency with similar operations on the + * same object, or other objects pointing to the same data block. + */ +class PQXX_LIBEXPORT binarystring +{ +public: + using char_type = unsigned char; + using value_type = std::char_traits::char_type; + using size_type = std::size_t; + using difference_type = long; + using const_reference = value_type const &; + using const_pointer = value_type const *; + using const_iterator = const_pointer; + using const_reverse_iterator = std::reverse_iterator; + + [[deprecated("Use std::byte for binary data.")]] binarystring( + binarystring const &) = default; + + /// Read and unescape bytea field. + /** The field will be zero-terminated, even if the original bytea field + * isn't. + * @param F the field to read; must be a bytea field + */ + [[deprecated("Use std::byte for binary data.")]] explicit binarystring( + field const &); + + /// Copy binary data from std::string_view on binary data. + /** This is inefficient in that it copies the data to a buffer allocated on + * the heap. + */ + [[deprecated("Use std::byte for binary data.")]] explicit binarystring( + std::string_view); + + /// Copy binary data of given length straight out of memory. + [[deprecated("Use std::byte for binary data.")]] binarystring( + void const *, std::size_t); + + /// Efficiently wrap a buffer of binary data in a @c binarystring. + [[deprecated("Use std::byte for binary data.")]] binarystring( + std::shared_ptr ptr, size_type size) : + m_buf{std::move(ptr)}, m_size{size} + {} + + /// Size of converted string in bytes. + [[nodiscard]] size_type size() const noexcept { return m_size; } + /// Size of converted string in bytes. + [[nodiscard]] size_type length() const noexcept { return size(); } + [[nodiscard]] bool empty() const noexcept { return size() == 0; } + + [[nodiscard]] const_iterator begin() const noexcept { return data(); } + [[nodiscard]] const_iterator cbegin() const noexcept { return begin(); } + [[nodiscard]] const_iterator end() const noexcept { return data() + m_size; } + [[nodiscard]] const_iterator cend() const noexcept { return end(); } + + [[nodiscard]] const_reference front() const noexcept { return *begin(); } + [[nodiscard]] const_reference back() const noexcept + { + return *(data() + m_size - 1); + } + + [[nodiscard]] const_reverse_iterator rbegin() const + { + return const_reverse_iterator{end()}; + } + [[nodiscard]] const_reverse_iterator crbegin() const { return rbegin(); } + [[nodiscard]] const_reverse_iterator rend() const + { + return const_reverse_iterator{begin()}; + } + [[nodiscard]] const_reverse_iterator crend() const { return rend(); } + + /// Unescaped field contents. + [[nodiscard]] value_type const *data() const noexcept { return m_buf.get(); } + + [[nodiscard]] const_reference operator[](size_type i) const noexcept + { + return data()[i]; + } + + [[nodiscard]] PQXX_PURE bool operator==(binarystring const &) const noexcept; + [[nodiscard]] bool operator!=(binarystring const &rhs) const noexcept + { + return not operator==(rhs); + } + + binarystring &operator=(binarystring const &); + + /// Index contained string, checking for valid index. + const_reference at(size_type) const; + + /// Swap contents with other binarystring. + void swap(binarystring &); + + /// Raw character buffer (no terminating zero is added). + /** @warning No terminating zero is added! If the binary data did not end in + * a null character, you will not find one here. + */ + [[nodiscard]] char const *get() const noexcept + { + return reinterpret_cast(m_buf.get()); + } + + /// Read contents as a std::string_view. + [[nodiscard]] std::string_view view() const noexcept + { + return std::string_view(get(), size()); + } + + /// Read as regular C++ string (may include null characters). + /** This creates and returns a new string object. Don't call this + * repeatedly; retrieve your string once and keep it in a local variable. + * Also, do not expect to be able to compare the string's address to that of + * an earlier invocation. + */ + [[nodiscard]] std::string str() const; + + /// Access data as a pointer to @c std::byte. + [[nodiscard]] std::byte const *bytes() const + { + return reinterpret_cast(get()); + } + + /// Read data as a @c std::basic_string_view. + [[nodiscard]] std::basic_string_view bytes_view() const + { + return std::basic_string_view{bytes(), size()}; + } + +private: + std::shared_ptr m_buf; + size_type m_size{0}; +}; + + +template<> struct nullness : no_null +{}; + + +/// String conversion traits for @c binarystring. +/** Defines the conversions between a @c binarystring and its PostgreSQL + * textual format, for communication with the database. + * + * These conversions rely on the "hex" format which was introduced in + * PostgreSQL 9.0. Both your libpq and the server must be recent enough to + * speak this format. + */ +template<> struct string_traits +{ + static std::size_t size_buffer(binarystring const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview to_buf(char *begin, char *end, binarystring const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, binarystring const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + std::string_view text{value.view()}; + internal::esc_bin(binary_cast(text), begin); + return begin + budget; + } + + static binarystring from_string(std::string_view text) + { + auto const size{pqxx::internal::size_unesc_bin(std::size(text))}; + std::shared_ptr buf{ + new unsigned char[size], [](unsigned char const *x) { delete[] x; }}; + pqxx::internal::unesc_bin(text, reinterpret_cast(buf.get())); +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return binarystring{std::move(buf), size}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + } +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/blob b/ext/libpqxx-7.7.3/include/pqxx/blob new file mode 100644 index 000000000..3fd0afac9 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/blob @@ -0,0 +1,6 @@ +/** Binary Large Objects interface. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/blob.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/blob.hxx b/ext/libpqxx-7.7.3/include/pqxx/blob.hxx new file mode 100644 index 000000000..6d77be724 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/blob.hxx @@ -0,0 +1,351 @@ +/* Binary Large Objects interface. + * + * Read or write large objects, stored in their own storage on the server. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/largeobject instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_BLOB +#define PQXX_H_BLOB + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#if defined(PQXX_HAVE_PATH) +# include +#endif + +#if defined(PQXX_HAVE_RANGES) && __has_include() +# include +#endif + +#if defined(PQXX_HAVE_SPAN) && __has_include() +# include +#endif + +#include "pqxx/dbtransaction.hxx" + + +namespace pqxx +{ +/** Binary large object. + * + * This is how you store data that may be too large for the `BYTEA` type. + * Access operations are similar to those for a file: you can read, write, + * query or set the current reading/writing position, and so on. + * + * These large objects live in their own storage on the server, indexed by an + * integer object identifier ("oid"). + * + * Two `blob` objects may refer to the same actual large object in the + * database at the same time. Each will have its own reading/writing position, + * but writes to the one will of course affect what the other sees. + */ +class PQXX_LIBEXPORT blob +{ +public: + /// Create a new, empty large object. + /** You may optionally specify an oid for the new blob. If you do, then + * the new object will have that oid -- or creation will fail if there + * already is an object with that oid. + */ + [[nodiscard]] static oid create(dbtransaction &, oid = 0); + + /// Delete a large object, or fail if it does not exist. + static void remove(dbtransaction &, oid); + + /// Open blob for reading. Any attempt to write to it will fail. + [[nodiscard]] static blob open_r(dbtransaction &, oid); + // Open blob for writing. Any attempt to read from it will fail. + [[nodiscard]] static blob open_w(dbtransaction &, oid); + // Open blob for reading and/or writing. + [[nodiscard]] static blob open_rw(dbtransaction &, oid); + + /// You can default-construct a blob, but it won't do anything useful. + /** Most operations on a default-constructed blob will throw @ref + * usage_error. + */ + blob() = default; + + /// You can move a blob, but not copy it. The original becomes unusable. + blob(blob &&); + /// You can move a blob, but not copy it. The original becomes unusable. + blob &operator=(blob &&); + + blob(blob const &) = delete; + blob &operator=(blob const &) = delete; + ~blob(); + + /// Maximum number of bytes that can be read or written at a time. + /** The underlying protocol only supports reads and writes up to 2 GB + * exclusive. + * + * If you need to read or write more data to or from a binary large object, + * you'll have to break it up into chunks. + */ + static constexpr std::size_t chunk_limit = 0x7fffffff; + + /// Read up to `size` bytes of the object into `buf`. + /** Uses a buffer that you provide, resizing it as needed. If it suits you, + * this lets you allocate the buffer once and then re-use it multiple times. + * + * Resizes `buf` as needed. + * + * @warning The underlying protocol only supports reads up to 2GB at a time. + * If you need to read more, try making repeated calls to @ref append_to_buf. + */ + std::size_t read(std::basic_string &buf, std::size_t size); + +#if defined(PQXX_HAVE_SPAN) + /// Read up to `std::size(buf)` bytes from the object. + /** Retrieves bytes from the blob, at the current position, until `buf` is + * full or there are no more bytes to read, whichever comes first. + * + * Returns the filled portion of `buf`. This may be empty. + */ + template + std::span read(std::span buf) + { + return buf.subspan(0, raw_read(std::data(buf), std::size(buf))); + } +#endif // PQXX_HAVE_SPAN + +#if defined(PQXX_HAVE_CONCEPTS) && defined(PQXX_HAVE_SPAN) + /// Read up to `std::size(buf)` bytes from the object. + /** Retrieves bytes from the blob, at the current position, until `buf` is + * full or there are no more bytes to read, whichever comes first. + * + * Returns the filled portion of `buf`. This may be empty. + */ + template std::span read(DATA &buf) + { + return {std::data(buf), raw_read(std::data(buf), std::size(buf))}; + } +#else // PQXX_HAVE_CONCEPTS && PQXX_HAVE_SPAN + /// Read up to `std::size(buf)` bytes from the object. + /** @deprecated As libpqxx moves to C++20 as its baseline language version, + * this will take and return `std::span`. + * + * Retrieves bytes from the blob, at the current position, until `buf` is + * full (i.e. its current size is reached), or there are no more bytes to + * read, whichever comes first. + * + * This function will not change either the size or the capacity of `buf`, + * only its contents. + * + * Returns the filled portion of `buf`. This may be empty. + */ + template + std::basic_string_view read(std::vector &buf) + { + return {std::data(buf), raw_read(std::data(buf), std::size(buf))}; + } +#endif // PQXX_HAVE_CONCEPTS && PQXX_HAVE_SPAN + +#if defined(PQXX_HAVE_CONCEPTS) + /// Write `data` to large object, at the current position. + /** If the writing position is at the end of the object, this will append + * `data` to the object's contents and move the writing position so that + * it's still at the end. + * + * If the writing position was not at the end, writing will overwrite the + * prior data, but it will not remove data that follows the part where you + * wrote your new data. + * + * @warning This is a big difference from writing to a file. You can + * overwrite some data in a large object, but this does not truncate the + * data that was already there. For example, if the object contained binary + * data "abc", and you write "12" at the starting position, the object will + * contain "12c". + * + * @warning The underlying protocol only supports writes up to 2 GB at a + * time. If you need to write more, try making repeated calls to + * @ref append_from_buf. + */ + template void write(DATA const &data) + { + raw_write(std::data(data), std::size(data)); + } +#else + /// Write `data` large object, at the current position. + /** If the writing position is at the end of the object, this will append + * `data` to the object's contents and move the writing position so that + * it's still at the end. + * + * If the writing position was not at the end, writing will overwrite the + * prior data, but it will not remove data that follows the part where you + * wrote your new data. + * + * @warning This is a big difference from writing to a file. You can + * overwrite some data in a large object, but this does not truncate the + * data that was already there. For example, if the object contained binary + * data "abc", and you write "12" at the starting position, the object will + * contain "12c". + * + * @warning The underlying protocol only supports writes up to 2 GB at a + * time. If you need to write more, try making repeated calls to + * @ref append_from_buf. + */ + template void write(DATA const &data) + { + raw_write(std::data(data), std::size(data)); + } +#endif + + /// Resize large object to `size` bytes. + /** If the blob is more than `size` bytes long, this removes the end so as + * to make the blob the desired length. + * + * If the blob is less than `size` bytes long, it adds enough zero bytes to + * make it the desired length. + */ + void resize(std::int64_t size); + + /// Return the current reading/writing position in the large object. + [[nodiscard]] std::int64_t tell() const; + + /// Set the current reading/writing position to an absolute offset. + /** Returns the new file offset. */ + std::int64_t seek_abs(std::int64_t offset = 0); + /// Move the current reading/writing position forwards by an offset. + /** To move backwards, pass a negative offset. + * + * Returns the new file offset. + */ + std::int64_t seek_rel(std::int64_t offset = 0); + /// Set the current position to an offset relative to the end of the blob. + /** You'll probably want an offset of zero or less. + * + * Returns the new file offset. + */ + std::int64_t seek_end(std::int64_t offset = 0); + + /// Create a binary large object containing given `data`. + /** You may optionally specify an oid for the new object. If you do, and an + * object with that oid already exists, creation will fail. + */ + static oid from_buf( + dbtransaction &tx, std::basic_string_view data, oid id = 0); + + /// Append `data` to binary large object. + /** The underlying protocol only supports appending blocks up to 2 GB. + */ + static void append_from_buf( + dbtransaction &tx, std::basic_string_view data, oid id); + + /// Read client-side file and store it server-side as a binary large object. + [[nodiscard]] static oid from_file(dbtransaction &, char const path[]); + +#if defined(PQXX_HAVE_PATH) && !defined(_WIN32) + /// Read client-side file and store it server-side as a binary large object. + /** This overload is not available on Windows, where `std::filesystem::path` + * converts to a `wchar_t` string rather than a `char` string. + */ + [[nodiscard]] static oid + from_file(dbtransaction &tx, std::filesystem::path const &path) + { + return from_file(tx, path.c_str()); + } +#endif + + /// Read client-side file and store it server-side as a binary large object. + /** In this version, you specify the binary large object's oid. If that oid + * is already in use, the operation will fail. + */ + static oid from_file(dbtransaction &, char const path[], oid); + +#if defined(PQXX_HAVE_PATH) && !defined(_WIN32) + /// Read client-side file and store it server-side as a binary large object. + /** In this version, you specify the binary large object's oid. If that oid + * is already in use, the operation will fail. + * + * This overload is not available on Windows, where `std::filesystem::path` + * converts to a `wchar_t` string rather than a `char` string. + */ + static oid + from_file(dbtransaction &tx, std::filesystem::path const &path, oid id) + { + return from_file(tx, path.c_str(), id); + } +#endif + + /// Convenience function: Read up to `max_size` bytes from blob with `id`. + /** You could easily do this yourself using the @ref open_r and @ref read + * functions, but it can save you a bit of code to do it this way. + */ + static void to_buf( + dbtransaction &, oid, std::basic_string &, + std::size_t max_size); + + /// Read part of the binary large object with `id`, and append it to `buf`. + /** Use this to break up a large read from one binary large object into one + * massive buffer. Just keep calling this function until it returns zero. + * + * The `offset` is how far into the large object your desired chunk is, and + * `append_max` says how much to try and read in one go. + */ + static std::size_t append_to_buf( + dbtransaction &tx, oid id, std::int64_t offset, + std::basic_string &buf, std::size_t append_max); + + /// Write a binary large object's contents to a client-side file. + static void to_file(dbtransaction &, oid, char const path[]); + +#if defined(PQXX_HAVE_PATH) && !defined(_WIN32) + /// Write a binary large object's contents to a client-side file. + /** This overload is not available on Windows, where `std::filesystem::path` + * converts to a `wchar_t` string rather than a `char` string. + */ + static void + to_file(dbtransaction &tx, oid id, std::filesystem::path const &path) + { + to_file(tx, id, path.c_str()); + } +#endif + + /// Close this blob. + /** This does not delete the blob from the database; it only terminates your + * local object for accessing the blob. + * + * Resets the blob to a useless state similar to one that was + * default-constructed. + * + * The destructor will do this for you automatically. Still, there is a + * reason to `close()` objects explicitly where possible: if an error should + * occur while closing, `close()` can throw an exception. A destructor + * cannot. + */ + void close(); + +private: + PQXX_PRIVATE blob(connection &conn, int fd) noexcept : + m_conn{&conn}, m_fd{fd} + {} + static PQXX_PRIVATE blob open_internal(dbtransaction &, oid, int); + static PQXX_PRIVATE pqxx::internal::pq::PGconn * + raw_conn(pqxx::connection *) noexcept; + static PQXX_PRIVATE pqxx::internal::pq::PGconn * + raw_conn(pqxx::dbtransaction const &) noexcept; + static PQXX_PRIVATE std::string errmsg(connection const *); + static PQXX_PRIVATE std::string errmsg(dbtransaction const &tx) + { + return errmsg(&tx.conn()); + } + PQXX_PRIVATE std::string errmsg() const { return errmsg(m_conn); } + PQXX_PRIVATE std::int64_t seek(std::int64_t offset, int whence); + std::size_t raw_read(std::byte buf[], std::size_t size); + void raw_write(std::byte const buf[], std::size_t size); + + connection *m_conn = nullptr; + int m_fd = -1; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/composite b/ext/libpqxx-7.7.3/include/pqxx/composite new file mode 100644 index 000000000..2bfa7ade9 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/composite @@ -0,0 +1,6 @@ +/** Handling of SQL "composite types." + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/composite.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/composite.hxx b/ext/libpqxx-7.7.3/include/pqxx/composite.hxx new file mode 100644 index 000000000..439b133a8 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/composite.hxx @@ -0,0 +1,149 @@ +#ifndef PQXX_H_COMPOSITE +#define PQXX_H_COMPOSITE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/internal/array-composite.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/util.hxx" + +namespace pqxx +{ +/// Parse a string representation of a value of a composite type. +/** @warning This code is still experimental. Use with care. + * + * You may use this as a helper while implementing your own @ref string_traits + * for a composite type. + * + * This function interprets `text` as the string representation of a value of + * some composite type, and sets each of `fields` to the respective values of + * its fields. The field types must be copy-assignable. + * + * The number of fields must match the number of fields in the composite type, + * and there must not be any other text in the input. The function is meant to + * handle any value string that the backend can produce, but not necessarily + * every valid alternative spelling. + * + * Fields in composite types can be null. When this happens, the C++ type of + * the corresponding field reference must be of a type that can handle nulls. + * If you are working with a type that does not have an inherent null value, + * such as e.g. `int`, consider using `std::optional`. + */ +template +inline void parse_composite( + pqxx::internal::encoding_group enc, std::string_view text, T &...fields) +{ + static_assert(sizeof...(fields) > 0); + + auto const scan{pqxx::internal::get_glyph_scanner(enc)}; + auto const data{std::data(text)}; + auto const size{std::size(text)}; + if (size == 0) + throw conversion_error{"Cannot parse composite value from empty string."}; + + std::size_t here{0}, next{scan(data, size, here)}; + if (next != 1 or data[here] != '(') + throw conversion_error{ + internal::concat("Invalid composite value string: ", text)}; + + here = next; + + constexpr auto num_fields{sizeof...(fields)}; + std::size_t index{0}; + (pqxx::internal::parse_composite_field( + index, text, here, fields, scan, num_fields - 1), + ...); + if (here != std::size(text)) + throw conversion_error{internal::concat( + "Composite value did not end at the closing parenthesis: '", text, + "'.")}; + if (text[here - 1] != ')') + throw conversion_error{internal::concat( + "Composive value did not end in parenthesis: '", text, "'")}; +} + + +/// Parse a string representation of a value of a composite type. +/** @warning This version only works for UTF-8 and single-byte encodings. + * + * For proper encoding support, use the composite-type support in the + * `field` class. + */ +template +inline void parse_composite(std::string_view text, T &...fields) +{ + parse_composite(pqxx::internal::encoding_group::MONOBYTE, text, fields...); +} +} // namespace pqxx + + +namespace pqxx::internal +{ +constexpr char empty_composite_str[]{"()"}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/// Estimate the buffer size needed to represent a value of a composite type. +/** Returns a conservative estimate. + */ +template +[[nodiscard]] inline std::size_t +composite_size_buffer(T const &...fields) noexcept +{ + constexpr auto num{sizeof...(fields)}; + + // Size for a multi-field composite includes room for... + // + opening parenthesis + // + field budgets + // + separating comma per field + // - comma after final field + // + closing parenthesis + // + terminating zero + + if constexpr (num == 0) + return std::size(pqxx::internal::empty_composite_str); + else + return 1 + (pqxx::internal::size_composite_field_buffer(fields) + ...) + + num + 1; +} + + +/// Render a series of values as a single composite SQL value. +/** @warning This code is still experimental. Use with care. + * + * You may use this as a helper while implementing your own `string_traits` + * for a composite type. + */ +template +inline char *composite_into_buf(char *begin, char *end, T const &...fields) +{ + if (std::size_t(end - begin) < composite_size_buffer(fields...)) + throw conversion_error{ + "Buffer space may not be enough to represent composite value."}; + + constexpr auto num_fields{sizeof...(fields)}; + if constexpr (num_fields == 0) + { + constexpr char empty[]{"()"}; + std::memcpy(begin, empty, std::size(empty)); + return begin + std::size(empty); + } + + char *pos{begin}; + *pos++ = '('; + + (pqxx::internal::write_composite_field(pos, end, fields), ...); + + // If we've got multiple fields, "backspace" that last comma. + if constexpr (num_fields > 1) + --pos; + *pos++ = ')'; + *pos++ = '\0'; + return pos; +} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/config.h.in b/ext/libpqxx-7.7.3/include/pqxx/config.h.in new file mode 100644 index 000000000..743579967 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/config.h.in @@ -0,0 +1,121 @@ +/* include/pqxx/config.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the header file. */ +#undef HAVE_DLFCN_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the `pq' library (-lpq). */ +#undef HAVE_LIBPQ + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#undef LT_OBJDIR + +/* Name of package */ +#undef PACKAGE + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define if supports floating-point conversion. */ +#undef PQXX_HAVE_CHARCONV_FLOAT + +/* Define if supports integer conversion. */ +#undef PQXX_HAVE_CHARCONV_INT + +/* Define if compiler has C++20 std::cmp_greater etc. */ +#undef PQXX_HAVE_CMP + +/* Define if compiler supports Concepts and header. */ +#undef PQXX_HAVE_CONCEPTS + +/* Define if compiler supports __cxa_demangle */ +#undef PQXX_HAVE_CXA_DEMANGLE + +/* Define if g++ supports pure attribute */ +#undef PQXX_HAVE_GCC_PURE + +/* Define if g++ supports visibility attribute. */ +#undef PQXX_HAVE_GCC_VISIBILITY + +/* Define if likely & unlikely work. */ +#undef PQXX_HAVE_LIKELY + +/* Define if operator[] can take multiple arguments. */ +#undef PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT + +/* Define if compiler has usable std::filesystem::path. */ +#undef PQXX_HAVE_PATH + +/* Define if poll() is available. */ +#undef PQXX_HAVE_POLL + +/* Define if libpq has PQencryptPasswordConn (since pg 10). */ +#undef PQXX_HAVE_PQENCRYPTPASSWORDCONN + +/* Define if libpq has pipeline mode (since pg 14). */ +#undef PQXX_HAVE_PQ_PIPELINE + +/* Define if std::this_thread::sleep_for works. */ +#undef PQXX_HAVE_SLEEP_FOR + +/* Define if compiler has std::span. */ +#undef PQXX_HAVE_SPAN + +/* Define if strerror_r() is available. */ +#undef PQXX_HAVE_STRERROR_R + +/* Define if strerror_s() is available. */ +#undef PQXX_HAVE_STRERROR_S + +/* Define if thread_local is fully supported. */ +#undef PQXX_HAVE_THREAD_LOCAL + +/* Define if std::chrono has year_month_day etc. */ +#undef PQXX_HAVE_YEAR_MONTH_DAY + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Version number of package */ +#undef VERSION diff --git a/ext/libpqxx-7.7.3/include/pqxx/connection b/ext/libpqxx-7.7.3/include/pqxx/connection new file mode 100644 index 000000000..82ff43aa5 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/connection @@ -0,0 +1,8 @@ +/** pqxx::connection class. + * + * pqxx::connection encapsulates a connection to a database. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/connection.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/connection.hxx b/ext/libpqxx-7.7.3/include/pqxx/connection.hxx new file mode 100644 index 000000000..92454bb47 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/connection.hxx @@ -0,0 +1,1261 @@ +/* Definition of the connection class. + * + * pqxx::connection encapsulates a connection to a database. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/connection instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_CONNECTION +#define PQXX_H_CONNECTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Double-check in order to suppress an overzealous Visual C++ warning (#418). +#if defined(PQXX_HAVE_CONCEPTS) && __has_include() +# include +#endif + +#include "pqxx/errorhandler.hxx" +#include "pqxx/except.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/params.hxx" +#include "pqxx/separated_list.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/types.hxx" +#include "pqxx/util.hxx" +#include "pqxx/zview.hxx" + + +/** + * @addtogroup connections + * + * Use of the libpqxx library starts here. + * + * Everything that can be done with a database through libpqxx must go through + * a @ref pqxx::connection object. It connects to a database when you create + * it, and it terminates that communication during destruction. + * + * Many things come together in this class. Handling of error and warning + * messages, for example, is defined by @ref pqxx::errorhandler objects in the + * context of a connection. Prepared statements are also defined here. + * + * When you connect to a database, you pass a connection string containing any + * parameters and options, such as the server address and the database name. + * + * These are identical to the ones in libpq, the C language binding upon which + * libpqxx itself is built: + * + * https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING + * + * There are also environment variables you can set to provide defaults, again + * as defined by libpq: + * + * https://www.postgresql.org/docs/current/libpq-envars.html + * + * You can also create a database connection _asynchronously_ using an + * intermediate @ref pqxx::connecting object. + */ + +namespace pqxx::internal +{ +class sql_cursor; + +#if defined(PQXX_HAVE_CONCEPTS) +/// Concept: T is a range of pairs of zero-terminated strings. +template +concept ZKey_ZValues = std::ranges::input_range and requires(T t) +{ + {std::cbegin(t)}; + { + std::get<0>(*std::cbegin(t)) + } -> ZString; + { + std::get<1>(*std::cbegin(t)) + } -> ZString; +} and std::tuple_size_v::value_type> +== 2; +#endif // PQXX_HAVE_CONCEPTS +} // namespace pqxx::internal + + +namespace pqxx::internal::gate +{ +class connection_dbtransaction; +class connection_errorhandler; +class connection_largeobject; +class connection_notification_receiver; +class connection_pipeline; +class connection_sql_cursor; +class connection_stream_from; +class connection_stream_to; +class connection_transaction; +class const_connection_largeobject; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +/// Representation of a PostgreSQL table path. +/** A "table path" consists of a table name, optionally prefixed by a schema + * name, which in turn is optionally prefixed by a database name. + * + * A minimal example of a table path would be `{mytable}`. But a table path + * may also take the forms `{myschema,mytable}` or + * `{mydb,myschema,mytable}`. + */ +using table_path = std::initializer_list; + + +/// Encrypt a password. @deprecated Use connection::encrypt_password instead. +[[nodiscard, + deprecated("Use connection::encrypt_password instead.")]] std::string + PQXX_LIBEXPORT + encrypt_password(char const user[], char const password[]); + +/// Encrypt password. @deprecated Use connection::encrypt_password instead. +[[nodiscard, + deprecated("Use connection::encrypt_password instead.")]] inline std::string +encrypt_password(zview user, zview password) +{ +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return encrypt_password(user.c_str(), password.c_str()); +#include "pqxx/internal/ignore-deprecated-post.hxx" +} + + +/// Error verbosity levels. +enum class error_verbosity : int +{ + // These values must match those in libpq's PGVerbosity enum. + terse = 0, + normal = 1, + verbose = 2 +}; + + +/// Connection to a database. +/** This is the first class to look at when you wish to work with a database + * through libpqxx. The connection opens during construction, and closes upon + * destruction. + * + * When creating a connection, you can pass a connection URI or a postgres + * connection string, to specify the database server's address, a login + * username, and so on. If you don't, the connection will try to obtain them + * from certain environment variables. If those are not set either, the + * default is to try and connect to the local system's port 5432. + * + * Find more about connection strings here: + * + * https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING + * + * The variables are documented here: + * + * https://www.postgresql.org/docs/current/libpq-envars.html + * + * To query or manipulate the database once connected, use one of the + * transaction classes (see pqxx/transaction_base.hxx) and perhaps also the + * transactor framework (see pqxx/transactor.hxx). + * + * When a connection breaks, you will typically get a @ref broken_connection + * exception. This can happen at almost any point. + * + * @warning On Unix-like systems, including GNU and BSD systems, your program + * may receive the SIGPIPE signal when the connection to the backend breaks. By + * default this signal will abort your program. Use "signal(SIGPIPE, SIG_IGN)" + * if you want your program to continue running after a connection fails. + */ +class PQXX_LIBEXPORT connection +{ +public: + connection() : connection{""} {} + + /// Connect to a database, using `options` string. + explicit connection(char const options[]) + { + check_version(); + init(options); + } + + /// Connect to a database, using `options` string. + explicit connection(zview options) : connection{options.c_str()} + { + // (Delegates to other constructor which calls check_version for us.) + } + + /// Move constructor. + /** Moving a connection is not allowed if it has an open transaction, or has + * error handlers or notification receivers registered on it. In those + * situations, other objects may hold references to the old object which + * would become invalid and might produce hard-to-diagnose bugs. + */ + connection(connection &&rhs); + +#if defined(PQXX_HAVE_CONCEPTS) + /// Connect to a database, passing options as a range of key/value pairs. + /** @warning Experimental. Requires C++20 "concepts" support. Define + * `PQXX_HAVE_CONCEPTS` to enable it. + * + * There's no need to escape the parameter values. + * + * See the PostgreSQL libpq documentation for the full list of possible + * options: + * + * https://postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS + * + * The options can be anything that can be iterated as a series of pairs of + * zero-terminated strings: `std::pair`, or + * `std::tuple`, or + * `std::map`, and so on. + */ + template + inline connection(MAPPING const ¶ms); +#endif // PQXX_HAVE_CONCEPTS + + ~connection() + { + try + { + close(); + } + catch (std::exception const &) + {} + } + + /// Move assignment. + /** Neither connection can have an open transaction, registered error + * handlers, or registered notification receivers. + */ + connection &operator=(connection &&rhs); + + connection(connection const &) = delete; + connection &operator=(connection const &) = delete; + + /// Is this connection open at the moment? + /** @warning This function is **not** needed in most code. Resist the + * temptation to check it after opening a connection. The `connection` + * constructor will throw a @ref broken_connection exception if can't connect + * to the database. + */ + [[nodiscard]] bool PQXX_PURE is_open() const noexcept; + + /// Invoke notice processor function. The message should end in newline. + void process_notice(char const[]) noexcept; + /// Invoke notice processor function. Newline at end is recommended. + /** The zview variant, with a message ending in newline, is the most + * efficient way to call process_notice. + */ + void process_notice(zview) noexcept; + + /// Enable tracing to a given output stream, or nullptr to disable. + void trace(std::FILE *) noexcept; + + /** + * @name Connection properties + * + * These are probably not of great interest, since most are derived from + * information supplied by the client program itself, but they are included + * for completeness. + * + * The connection needs to be currently active for these to work. + */ + //@{ + /// Name of database we're connected to, if any. + [[nodiscard]] char const *dbname() const; + + /// Database user ID we're connected under, if any. + [[nodiscard]] char const *username() const; + + /// Address of server, or nullptr if none specified (i.e. default or local) + [[nodiscard]] char const *hostname() const; + + /// Server port number we're connected to. + [[nodiscard]] char const *port() const; + + /// Process ID for backend process, or 0 if inactive. + [[nodiscard]] int PQXX_PURE backendpid() const &noexcept; + + /// Socket currently used for connection, or -1 for none. Use with care! + /** Query the current socket number. This is intended for event loops based + * on functions such as select() or poll(), where you're waiting for any of + * multiple file descriptors to become ready for communication. + * + * Please try to stay away from this function. It is really only meant for + * event loops that need to wait on more than one file descriptor. If all + * you need is to block until a notification arrives, for instance, use + * await_notification(). If you want to issue queries and retrieve results + * in nonblocking fashion, check out the pipeline class. + */ + [[nodiscard]] int PQXX_PURE sock() const &noexcept; + + /// What version of the PostgreSQL protocol is this connection using? + /** The answer can be 0 (when there is no connection); 3 for protocol 3.0; or + * possibly higher values as newer protocol versions come into use. + */ + [[nodiscard]] int PQXX_PURE protocol_version() const noexcept; + + /// What version of the PostgreSQL server are we connected to? + /** The result is a bit complicated: each of the major, medium, and minor + * release numbers is written as a two-digit decimal number, and the three + * are then concatenated. Thus server version 9.4.2 will be returned as the + * decimal number 90402. If there is no connection to the server, this + * returns zero. + * + * @warning When writing version numbers in your code, don't add zero at the + * beginning! Numbers beginning with zero are interpreted as octal (base-8) + * in C++. Thus, 070402 is not the same as 70402, and 080000 is not a number + * at all because there is no digit "8" in octal notation. Use strictly + * decimal notation when it comes to these version numbers. + */ + [[nodiscard]] int PQXX_PURE server_version() const noexcept; + //@} + + /// @name Text encoding + /** + * Each connection is governed by a "client encoding," which dictates how + * strings and other text is represented in bytes. The database server will + * send text data to you in this encoding, and you should use it for the + * queries and data which you send to the server. + * + * Search the PostgreSQL documentation for "character set encodings" to find + * out more about the available encodings, how to extend them, and how to use + * them. Not all server-side encodings are compatible with all client-side + * encodings or vice versa. + * + * Encoding names are case-insensitive, so e.g. "UTF8" is equivalent to + * "utf8". + * + * You can change the client encoding, but this may not work when the + * connection is in a special state, such as when streaming a table. It's + * not clear what happens if you change the encoding during a transaction, + * and then abort the transaction. + */ + //@{ + /// Get client-side character encoding, by name. + [[nodiscard]] std::string get_client_encoding() const; + + /// Set client-side character encoding, by name. + /** + * @param encoding Name of the character set encoding to use. + */ + void set_client_encoding(zview encoding) & + { + set_client_encoding(encoding.c_str()); + } + + /// Set client-side character encoding, by name. + /** + * @param encoding Name of the character set encoding to use. + */ + void set_client_encoding(char const encoding[]) &; + + /// Get the connection's encoding, as a PostgreSQL-defined code. + [[nodiscard]] int PQXX_PRIVATE encoding_id() const; + + //@} + + /// Set session variable, using SQL's `SET` command. + /** @deprecated To set a session variable, use @ref set_session_var. To set + * a transaction-local variable, execute an SQL `SET` command. + * + * @warning When setting a string value, you must escape and quote it first. + * Use the @ref quote() function to do that. + * + * @warning This executes an SQL query, so do not get or set variables while + * a table stream or pipeline is active on the same connection. + * + * @param var Variable to set. + * @param value New value for Var. This can be any SQL expression. If it's + * a string, be sure that it's properly escaped and quoted. + */ + [[deprecated("To set session variables, use set_session_var.")]] void + set_variable(std::string_view var, std::string_view value) &; + + /// Set one of the session variables to a new value. + /** This executes SQL, so do not do it while a pipeline or stream is active + * on the connection. + * + * The value you set here will last for the rest of the connection's + * duration, or until you set a new value. + * + * If you set the value while in a @ref dbtransaction (i.e. any transaction + * that is not a @ref nontransaction), then rolling back the transaction will + * undo the change. + * + * All applies to setting _session_ variables. You can also set the same + * variables as _local_ variables, in which case they will always revert to + * their previous value when the transaction ends (or when you overwrite them + * of course). To set a local variable, simply execute an SQL statement + * along the lines of "`SET LOCAL var = 'value'`" inside your transaction. + * + * @param var The variable to set. + * @param value The new value for the variable. + * @throw @ref variable_set_to_null if the value is null; this is not + * allowed. + */ + template + void set_session_var(std::string_view var, TYPE const &value) & + { + if constexpr (nullness::has_null) + { + if (nullness::is_null(value)) + throw variable_set_to_null{ + internal::concat("Attempted to set variable ", var, " to null.")}; + } + exec(internal::concat("SET ", quote_name(var), "=", quote(value))); + } + + /// Read session variable, using SQL's `SHOW` command. + /** @warning This executes an SQL query, so do not get or set variables while + * a table stream or pipeline is active on the same connection. + */ + [[deprecated("Use get_var instead.")]] std::string + get_variable(std::string_view); + + /// Read currently applicable value of a variable. + /** This function executes an SQL statement, so it won't work while a + * @ref pipeline or query stream is active on the connection. + * + * @return a blank `std::optional` if the variable's value is null, or its + * string value otherwise. + */ + std::string get_var(std::string_view var); + + /// Read currently applicable value of a variable. + /** This function executes an SQL statement, so it won't work while a + * @ref pipeline or query stream is active on the connection. + * + * If there is any possibility that the variable is null, ensure that `TYPE` + * can represent null values. + */ + template TYPE get_var_as(std::string_view var) + { + return from_string(get_var(var)); + } + + /** + * @name Notifications and Receivers + */ + //@{ + /// Check for pending notifications and take appropriate action. + /** This does not block. To wait for incoming notifications, either call + * await_notification() (it calls this function); or wait for incoming data + * on the connection's socket (i.e. wait to read), and then call this + * function repeatedly until it returns zero. After that, there are no more + * pending notifications so you may want to wait again. + * + * If any notifications are pending when you call this function, it + * processes them by finding any receivers that match the notification string + * and invoking those. If no receivers match, there is nothing to invoke but + * we do consider the notification processed. + * + * If any of the client-registered receivers throws an exception, the + * function will report it using the connection's errorhandlers. It does not + * re-throw the exceptions. + * + * @return Number of notifications processed. + */ + int get_notifs(); + + /// Wait for a notification to come in. + /** There are other events that will also terminate the wait, such as the + * backend failing. It will also wake up periodically. + * + * If a notification comes in, the call will process it, along with any other + * notifications that may have been pending. + * + * To wait for notifications into your own event loop instead, wait until + * there is incoming data on the connection's socket to be read, then call + * @ref get_notifs() repeatedly until it returns zero. + * + * @return Number of notifications processed. + */ + int await_notification(); + + /// Wait for a notification to come in, or for given timeout to pass. + /** There are other events that will also terminate the wait, such as the + * backend failing, or timeout expiring. + * + * If a notification comes in, the call will process it, along with any other + * notifications that may have been pending. + * + * To wait for notifications into your own event loop instead, wait until + * there is incoming data on the connection's socket to be read, then call + * @ref get_notifs repeatedly until it returns zero. + * + * @return Number of notifications processed + */ + int await_notification(std::time_t seconds, long microseconds); + //@} + + /** + * @name Password encryption + * + * Use this when setting a new password for the user if password encryption + * is enabled. Inputs are the SQL name for the user for whom you with to + * encrypt a password; the plaintext password; and the hash algorithm. + * + * The algorithm must be one of "md5", "scram-sha-256" (introduced in + * PostgreSQL 10), or `nullptr`. If the pointer is null, this will query + * the `password_encryption setting` from the server, and use the default + * algorithm as defined there. + * + * @return encrypted version of the password, suitable for encrypted + * PostgreSQL authentication. + * + * Thus you can change a user's password with: + * ```cxx + * void setpw(transaction_base &t, string const &user, string const &pw) + * { + * t.exec0("ALTER USER " + user + " " + * "PASSWORD '" + t.conn().encrypt_password(user,pw) + "'"); + * } + * ``` + * + * When building this against a libpq older than version 10, this will use + * an older function which only supports md5. In that case, requesting a + * different algorithm than md5 will result in a @ref feature_not_supported + * exception. + */ + //@{ + /// Encrypt a password for a given user. + [[nodiscard]] std::string + encrypt_password(zview user, zview password, zview algorithm) + { + return encrypt_password(user.c_str(), password.c_str(), algorithm.c_str()); + } + /// Encrypt a password for a given user. + [[nodiscard]] std::string encrypt_password( + char const user[], char const password[], char const *algorithm = nullptr); + //@} + + /** + * @name Prepared statements + * + * PostgreSQL supports prepared SQL statements, i.e. statements that you can + * register under a name you choose, optimized once by the backend, and + * executed any number of times under the given name. + * + * Prepared statement definitions are not sensitive to transaction + * boundaries. A statement defined inside a transaction will remain defined + * outside that transaction, even if the transaction itself is subsequently + * aborted. Once a statement has been prepared, it will only go away if you + * close the connection or explicitly "unprepare" the statement. + * + * Use the `pqxx::transaction_base::exec_prepared` functions to execute a + * prepared statement. See @ref prepared for a full discussion. + * + * @warning Using prepared statements can save time, but if your statement + * takes parameters, it may also make your application significantly slower! + * The reason is that the server works out a plan for executing the query + * when you prepare it. At that time, of course it does not know the values + * for the parameters that you will pass. If you execute a query without + * preparing it, then the server works out the plan on the spot, with full + * knowledge of the parameter values. + * + * A statement's definition can refer to its parameters as `$1`, `$2`, etc. + * The first parameter you pass to the call provides a value for `$1`, and + * so on. + * + * Here's an example of how to use prepared statements. + * + * ```cxx + * using namespace pqxx; + * void foo(connection &c) + * { + * c.prepare("findtable", "select * from pg_tables where name=$1"); + * work tx{c}; + * result r = tx.exec_prepared("findtable", "mytable"); + * if (std::empty(r)) throw runtime_error{"mytable not found!"}; + * } + * ``` + */ + //@{ + + /// Define a prepared statement. + /** + * @param name unique name for the new prepared statement. + * @param definition SQL statement to prepare. + */ + void prepare(zview name, zview definition) & + { + prepare(name.c_str(), definition.c_str()); + } + + /** + * @param name unique name for the new prepared statement. + * @param definition SQL statement to prepare. + */ + void prepare(char const name[], char const definition[]) &; + + /// Define a nameless prepared statement. + /** + * This can be useful if you merely want to pass large binary parameters to a + * statement without otherwise wishing to prepare it. If you use this + * feature, always keep the definition and the use close together to avoid + * the nameless statement being redefined unexpectedly by code somewhere + * else. + */ + void prepare(char const definition[]) &; + void prepare(zview definition) & { return prepare(definition.c_str()); } + + /// Drop prepared statement. + void unprepare(std::string_view name); + + //@} + + // C++20: constexpr. Breaks ABI. + /// Suffix unique number to name to make it unique within session context. + /** Used internally to generate identifiers for SQL objects (such as cursors + * and nested transactions) based on a given human-readable base name. + */ + [[nodiscard]] std::string adorn_name(std::string_view); + + /** + * @defgroup escaping-functions String-escaping functions + */ + //@{ + + /// Escape string for use as SQL string literal on this connection. + /** @warning This accepts a length, and it does not require a terminating + * zero byte. But if there is a zero byte, escaping stops there even if + * it's not at the end of the string! + */ + [[deprecated("Use std::string_view or pqxx:zview.")]] std::string + esc(char const text[], std::size_t maxlen) const + { + return esc(std::string_view{text, maxlen}); + } + + /// Escape string for use as SQL string literal on this connection. + [[nodiscard]] std::string esc(char const text[]) const + { + return esc(std::string_view{text}); + } + +#if defined(PQXX_HAVE_SPAN) + /// Escape string for use as SQL string literal, into `buffer`. + /** Use this variant when you want to re-use the same buffer across multiple + * calls. If that's not the case, or convenience and simplicity are more + * important, use the single-argument variant. + * + * For every byte in `text`, there must be at least 2 bytes of space in + * `buffer`; plus there must be one byte of space for a trailing zero. + * Throws @ref range_error if this space is not available. + * + * Returns a reference to the escaped string, which is actually stored in + * `buffer`. + */ + [[nodiscard]] std::string_view + esc(std::string_view text, std::span buffer) + { + auto const size{std::size(text)}, space{std::size(buffer)}; + auto const needed{2 * size + 1}; + if (space < needed) + throw range_error{internal::concat( + "Not enough room to escape string of ", size, " byte(s): need ", + needed, " bytes of buffer space, but buffer size is ", space, ".")}; + auto const data{buffer.data()}; + return {data, esc_to_buf(text, data)}; + } +#endif + + /// Escape string for use as SQL string literal on this connection. + /** @warning This is meant for text strings only. It cannot contain bytes + * whose value is zero ("nul bytes"). + */ + [[nodiscard]] std::string esc(std::string_view text) const; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Escape binary string for use as SQL string literal on this connection. + /** This is identical to `esc_raw(data)`. */ + template [[nodiscard]] std::string esc(DATA const &data) const + { + return esc_raw(data); + } +#endif + +#if defined(PQXX_HAVE_CONCEPTS) && defined(PQXX_HAVE_SPAN) + /// Escape binary string for use as SQL string literal, into `buffer`. + /** Use this variant when you want to re-use the same buffer across multiple + * calls. If that's not the case, or convenience and simplicity are more + * important, use the single-argument variant. + * + * For every byte in `data`, there must be at least two bytes of space in + * `buffer`; plus there must be two bytes of space for a header and one for + * a trailing zero. Throws @ref range_error if this space is not available. + * + * Returns a reference to the escaped string, which is actually stored in + * `buffer`. + */ + template + [[nodiscard]] zview esc(DATA const &data, std::span buffer) const + { + auto const size{std::size(data)}, space{std::size(buffer)}; + auto const needed{internal::size_esc_bin(std::size(data))}; + if (space < needed) + throw range_error{internal::concat( + "Not enough room to escape binary string of ", size, " byte(s): need ", + needed, " bytes of buffer space, but buffer size is ", space, ".")}; + + std::basic_string_view view{std::data(data), std::size(data)}; + auto const out{std::data(buffer)}; + // Actually, in the modern format, we know beforehand exactly how many + // bytes we're going to fill. Just leave out the trailing zero. + internal::esc_bin(view, out); + return zview{out, needed - 1}; + } +#endif + + /// Escape binary string for use as SQL string literal on this connection. + [[deprecated("Use std::byte for binary data.")]] std::string + esc_raw(unsigned char const bin[], std::size_t len) const; + + /// Escape binary string for use as SQL string literal on this connection. + /** You can also just use @ref esc with a binary string. */ + [[nodiscard]] std::string esc_raw(std::basic_string_view) const; + +#if defined(PQXX_HAVE_SPAN) + /// Escape binary string for use as SQL string literal, into `buffer`. + /** You can also just use @ref esc with a binary string. */ + [[nodiscard]] std::string + esc_raw(std::basic_string_view, std::span buffer) const; +#endif + +#if defined(PQXX_HAVE_CONCEPTS) + /// Escape binary string for use as SQL string literal on this connection. + /** You can also just use @ref esc with a binary string. */ + template + [[nodiscard]] std::string esc_raw(DATA const &data) const + { + return esc_raw( + std::basic_string_view{std::data(data), std::size(data)}); + } +#endif + +#if defined(PQXX_HAVE_CONCEPTS) && defined(PQXX_HAVE_SPAN) + /// Escape binary string for use as SQL string literal, into `buffer`. + template + [[nodiscard]] zview esc_raw(DATA const &data, std::span buffer) const + { + return this->esc(binary_cast(data), buffer); + } +#endif + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(zview text) const + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return unesc_raw(text.c_str()); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(char const text[]) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + * + * (The data must be encoded in PostgreSQL's "hex" format. The legacy + * "bytea" escape format, used prior to PostgreSQL 9.0, is no longer + * supported.) + */ + [[nodiscard]] std::basic_string + unesc_bin(std::string_view text) const + { + std::basic_string buf; + buf.resize(pqxx::internal::size_unesc_bin(std::size(text))); + pqxx::internal::unesc_bin(text, buf.data()); + return buf; + } + + /// Escape and quote a string of binary data. + [[deprecated("Use quote(std::basic_string_view).")]] std::string + quote_raw(unsigned char const bin[], std::size_t len) const; + + /// Escape and quote a string of binary data. + std::string quote_raw(std::basic_string_view) const; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Escape and quote a string of binary data. + /** You can also just use @ref quote with binary data. */ + template + [[nodiscard]] std::string quote_raw(DATA const &data) const + { + return quote_raw( + std::basic_string_view{std::data(data), std::size(data)}); + } +#endif + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote an SQL identifier for use in a query. + [[nodiscard]] std::string quote_name(std::string_view identifier) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote a table name. + /** When passing just a table name, this is just another name for + * @ref quote_name. + */ + [[nodiscard]] std::string quote_table(std::string_view name) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote a table path. + /** A table path consists of a table name, optionally prefixed by a schema + * name; and if both are given, they are in turn optionally prefixed by a + * database name. + * + * Each portion of the path (database name, schema name, table name) will be + * quoted separately, and they will be joined together by dots. So for + * example, `myschema.mytable` will become `"myschema"."mytable"`. + */ + [[nodiscard]] std::string quote_table(table_path) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Quote and comma-separate a series of column names. + /** Use this to save a bit of work in cases where you repeatedly need to pass + * the same list of column names, e.g. with @ref stream_to and @ref + * stream_from. Some functions that need to quote the columns list + * internally, will have a "raw" alternative which let you do the quoting + * yourself. It's a bit of extra work, but it can in rare cases let you + * eliminate some duplicate work in quoting them repeatedly. + */ + template + inline std::string quote_columns(STRINGS const &columns) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Represent object as SQL string, including quoting & escaping. + /** + * Recognises nulls and represents them as SQL nulls. They get no quotes. + */ + template + [[nodiscard]] inline std::string quote(T const &t) const; + + [[deprecated("Use std::byte for binary data.")]] std::string + quote(binarystring const &) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote binary data for use as a BYTEA value in SQL statement. + [[nodiscard]] std::string + quote(std::basic_string_view bytes) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape string for literal LIKE match. + /** Use this when part of an SQL "LIKE" pattern should match only as a + * literal string, not as a pattern, even if it contains "%" or "_" + * characters that would normally act as wildcards. + * + * The string does not get string-escaped or quoted. You do that later. + * + * For instance, let's say you have a string `name` entered by the user, + * and you're searching a `file` column for items that match `name` + * followed by a dot and three letters. Even if `name` contains wildcard + * characters "%" or "_", you only want those to match literally, so "_" + * only matches "_" and "%" only matches a single "%". + * + * You do that by "like-escaping" `name`, appending the wildcard pattern + * `".___"`, and finally, escaping and quoting the result for inclusion in + * your query: + * + * ```cxx + * tx.exec( + * "SELECT file FROM item WHERE file LIKE " + + * tx.quote(tx.esc_like(name) + ".___")); + * ``` + * + * The SQL "LIKE" operator also lets you choose your own escape character. + * This is supported, but must be a single-byte character. + */ + [[nodiscard]] std::string + esc_like(std::string_view text, char escape_char = '\\') const; + //@} + + /// Attempt to cancel the ongoing query, if any. + /** You can use this from another thread, and/or while a query is executing + * in a pipeline, but it's up to you to ensure that you're not canceling the + * wrong query. This may involve locking. + */ + void cancel_query(); + +#if defined(_WIN32) || __has_include() + /// Set socket to blocking (true) or nonblocking (false). + /** @warning Do not use this unless you _really_ know what you're doing. + * @warning This function is available on most systems, but not necessarily + * all. + */ + void set_blocking(bool block) &; +#endif // defined(_WIN32) || __has_include() + + /// Set session verbosity. + /** Set the verbosity of error messages to "terse", "normal" (the default), + * or "verbose." + * + * If "terse", returned messages include severity, primary text, and + * position only; this will normally fit on a single line. "normal" produces + * messages that include the above plus any detail, hint, or context fields + * (these might span multiple lines). "verbose" includes all available + * fields. + */ + void set_verbosity(error_verbosity verbosity) &noexcept; + + /// Return pointers to the active errorhandlers. + /** The entries are ordered from oldest to newest handler. + * + * You may use this to find errorhandlers that your application wants to + * delete when destroying the connection. Be aware, however, that libpqxx + * may also add errorhandlers of its own, and those will be included in the + * list. If this is a problem for you, derive your errorhandlers from a + * custom base class derived from pqxx::errorhandler. Then use dynamic_cast + * to find which of the error handlers are yours. + * + * The pointers point to the real errorhandlers. The container it returns + * however is a copy of the one internal to the connection, not a reference. + */ + [[nodiscard]] std::vector get_errorhandlers() const; + + /// Return a connection string encapsulating this connection's options. + /** The connection must be currently open for this to work. + * + * Returns a reconstruction of this connection's connection string. It may + * not exactly match the connection string you passed in when creating this + * connection. + */ + [[nodiscard]] std::string connection_string() const; + + /// Explicitly close the connection. + /** The destructor will do this for you automatically. Still, there is a + * reason to `close()` objects explicitly where possible: if an error should + * occur while closing, `close()` can throw an exception. A destructor + * cannot. + * + * Closing a connection is idempotent. Closing a connection that's already + * closed does nothing. + */ + void close(); + + /// Seize control of a raw libpq connection. + /** @warning Do not do this. Please. It's for very rare, very specific + * use-cases. The mechanism may change (or break) in unexpected ways in + * future versions. + * + * @param raw_conn a raw libpq `PQconn` pointer. + */ + static connection seize_raw_connection(internal::pq::PGconn *raw_conn) + { + return connection{raw_conn}; + } + + /// Release the raw connection without closing it. + /** @warning Do not do this. It's for very rare, very specific use-cases. + * The mechanism may change (or break) in unexpected ways in future versions. + * + * The `connection` object becomes unusable after this. + */ + internal::pq::PGconn *release_raw_connection() && + { + return std::exchange(m_conn, nullptr); + } + +private: + friend class connecting; + enum connect_mode + { + connect_nonblocking + }; + connection(connect_mode, zview connection_string); + + /// For use by @ref seize_raw_connection. + explicit connection(internal::pq::PGconn *raw_conn) : m_conn{raw_conn} {} + + /// Poll for ongoing connection, try to progress towards completion. + /** Returns a pair of "now please wait to read data from socket" and "now + * please wait to write data to socket." Both will be false when done. + * + * Throws an exception if polling indicates that the connection has failed. + */ + std::pair poll_connect(); + + // Initialise based on connection string. + void init(char const options[]); + // Initialise based on parameter names and values. + void init(char const *params[], char const *values[]); + void complete_init(); + + result make_result( + internal::pq::PGresult *pgr, std::shared_ptr const &query, + std::string_view desc = ""sv); + + void PQXX_PRIVATE set_up_state(); + + int PQXX_PRIVATE PQXX_PURE status() const noexcept; + + /// Escape a string, into a buffer allocated by the caller. + /** The buffer must have room for at least `2*std::size(text) + 1` bytes. + * + * Returns the number of bytes written, including the trailing zero. + */ + std::size_t esc_to_buf(std::string_view text, char *buf) const; + + friend class internal::gate::const_connection_largeobject; + char const *PQXX_PURE err_msg() const noexcept; + + void PQXX_PRIVATE process_notice_raw(char const msg[]) noexcept; + + result exec_prepared(std::string_view statement, internal::c_params const &); + + /// Throw @ref usage_error if this connection is not in a movable state. + void check_movable() const; + /// Throw @ref usage_error if not in a state where it can be move-assigned. + void check_overwritable() const; + + friend class internal::gate::connection_errorhandler; + void PQXX_PRIVATE register_errorhandler(errorhandler *); + void PQXX_PRIVATE unregister_errorhandler(errorhandler *) noexcept; + + friend class internal::gate::connection_transaction; + result exec(std::string_view, std::string_view = ""sv); + result + PQXX_PRIVATE exec(std::shared_ptr, std::string_view = ""sv); + void PQXX_PRIVATE register_transaction(transaction_base *); + void PQXX_PRIVATE unregister_transaction(transaction_base *) noexcept; + + friend class internal::gate::connection_stream_from; + std::pair>, std::size_t> + PQXX_PRIVATE read_copy_line(); + + friend class internal::gate::connection_stream_to; + void PQXX_PRIVATE write_copy_line(std::string_view); + void PQXX_PRIVATE end_copy_write(); + + friend class internal::gate::connection_largeobject; + internal::pq::PGconn *raw_connection() const { return m_conn; } + + friend class internal::gate::connection_notification_receiver; + void add_receiver(notification_receiver *); + void remove_receiver(notification_receiver *) noexcept; + + friend class internal::gate::connection_pipeline; + void PQXX_PRIVATE start_exec(char const query[]); + bool PQXX_PRIVATE consume_input() noexcept; + bool PQXX_PRIVATE is_busy() const noexcept; + internal::pq::PGresult *get_result(); + + friend class internal::gate::connection_dbtransaction; + friend class internal::gate::connection_sql_cursor; + + result exec_params(std::string_view query, internal::c_params const &args); + + /// Connection handle. + internal::pq::PGconn *m_conn = nullptr; + + /// Active transaction on connection, if any. + /** We don't use this for anything, except to check for open transactions + * when we close the connection or start a new transaction. + * + * We also don't allow move construction or move assignment while there's a + * transaction, since moving the connection in that case would leave one or + * more pointers back from the transaction to the connection dangling. + */ + transaction_base const *m_trans = nullptr; + + std::list m_errorhandlers; + + using receiver_list = + std::multimap; + /// Notification receivers. + receiver_list m_receivers; + + /// Unique number to use as suffix for identifiers (see adorn_name()). + int m_unique_id = 0; +}; + + +/// @deprecated Old base class for connection. They are now the same class. +using connection_base = connection; + + +/// An ongoing, non-blocking stepping stone to a connection. +/** Use this when you want to create a connection to the database, but without + * blocking your whole thread. It is only available on systems that have + * the `` header, and Windows. + * + * Connecting in this way is probably not "faster" (it's more complicated and + * has some extra overhead), but in some situations you can use it to make your + * application as a whole faster. It all depends on having other useful work + * to do in the same thread, and being able to wait on a socket. If you have + * other I/O going on at the same time, your event loop can wait for both the + * libpqxx socket and your own sockets, and wake up whenever any of them is + * ready to do work. + * + * Connecting in this way is not properly "asynchronous;" it's merely + * "nonblocking." This means it's not a super-high-performance mechanism like + * you might get with e.g. `io_uring`. In particular, if we need to look up + * the database hostname in DNS, that will happen synchronously. + * + * To use this, create the `connecting` object, passing a connection string. + * Then loop: If @ref wait_to_read returns true, wait for the socket to have + * incoming data on it. If @ref wait_to_write returns true, wait for the + * socket to be ready for writing. Then call @ref process to process any + * incoming or outgoing data. Do all of this until @ref done returns true (or + * there is an exception). Finally, call @ref produce to get the completed + * connection. + * + * For example: + * + * ```cxx + * pqxx::connecting cg{}; + * + * // Loop until we're done connecting. + * while (!cg.done()) + * { + * wait_for_fd(cg.sock(), cg.wait_to_read(), cg.wait_to_write()); + * cg.process(); + * } + * + * pqxx::connection conn = std::move(cg).produce(); + * + * // At this point, conn is a working connection. You can no longer use + * // cg at all. + * ``` + */ +class PQXX_LIBEXPORT connecting +{ +public: + /// Start connecting. + connecting(zview connection_string = ""_zv); + + connecting(connecting const &) = delete; + connecting(connecting &&) = default; + connecting &operator=(connecting const &) = delete; + connecting &operator=(connecting &&) = default; + + /// Get the socket. The socket may change during the connection process. + [[nodiscard]] int sock() const &noexcept { return m_conn.sock(); } + + /// Should we currently wait to be able to _read_ from the socket? + [[nodiscard]] constexpr bool wait_to_read() const &noexcept + { + return m_reading; + } + + /// Should we currently wait to be able to _write_ to the socket? + [[nodiscard]] constexpr bool wait_to_write() const &noexcept + { + return m_writing; + } + + /// Progress towards completion (but don't block). + void process() &; + + /// Is our connection finished? + [[nodiscard]] constexpr bool done() const &noexcept + { + return not m_reading and not m_writing; + } + + /// Produce the completed connection object. + /** Use this only once, after @ref done returned `true`. Once you have + * called this, the `connecting` instance has no more use or meaning. You + * can't call any of its member functions afterwards. + * + * This member function is rvalue-qualified, meaning that you can only call + * it on an rvalue instance of the class. If what you have is not an rvalue, + * turn it into one by wrapping it in `std::move()`. + */ + [[nodiscard]] connection produce() &&; + +private: + connection m_conn; + bool m_reading{false}; + bool m_writing{true}; +}; + + +template inline std::string connection::quote(T const &t) const +{ + if constexpr (nullness::always_null) + { + return "NULL"; + } + else + { + if (is_null(t)) + return "NULL"; + auto const text{to_string(t)}; + + // Okay, there's an easy way to do this and there's a hard way. The easy + // way was "quote, esc(to_string(t)), quote". I'm going with the hard way + // because it's going to save some string manipulation that will probably + // incur some unnecessary memory allocations and deallocations. + std::string buf{'\''}; + buf.resize(2 + 2 * std::size(text) + 1); + auto const content_bytes{esc_to_buf(text, buf.data() + 1)}; + auto const closing_quote{1 + content_bytes}; + buf[closing_quote] = '\''; + auto const end{closing_quote + 1}; + buf.resize(end); + return buf; + } +} + + +template +inline std::string connection::quote_columns(STRINGS const &columns) const +{ + return separated_list( + ","sv, std::cbegin(columns), std::cend(columns), + [this](auto col) { return this->quote_name(*col); }); +} + + +#if defined(PQXX_HAVE_CONCEPTS) +template +inline connection::connection(MAPPING const ¶ms) +{ + check_version(); + + std::vector keys, values; + if constexpr (std::ranges::sized_range) + { + auto const size{std::ranges::size(params) + 1}; + keys.reserve(size); + values.reserve(size); + } + for (auto const &[key, value] : params) + { + keys.push_back(internal::as_c_string(key)); + values.push_back(internal::as_c_string(value)); + } + keys.push_back(nullptr); + values.push_back(nullptr); + init(std::data(keys), std::data(values)); +} +#endif // PQXX_HAVE_CONCEPTS +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/cursor b/ext/libpqxx-7.7.3/include/pqxx/cursor new file mode 100644 index 000000000..e20b3a4fa --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/cursor @@ -0,0 +1,8 @@ +/** Definition of the iterator/container-style cursor classes. + * + * C++-style wrappers for SQL cursors + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/cursor.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/cursor.hxx b/ext/libpqxx-7.7.3/include/pqxx/cursor.hxx new file mode 100644 index 000000000..b392e2407 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/cursor.hxx @@ -0,0 +1,483 @@ +/* Definition of the iterator/container-style cursor classes. + * + * C++-style wrappers for SQL cursors. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/cursor instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_CURSOR +#define PQXX_H_CURSOR + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/result.hxx" +#include "pqxx/transaction_base.hxx" + + +namespace pqxx +{ +/// Common definitions for cursor types +/** In C++ terms, fetches are always done in pre-increment or pre-decrement + * fashion--i.e. the result does not include the row the cursor is on at the + * beginning of the fetch, and the cursor ends up being positioned on the last + * row in the result. + * + * There are singular positions akin to `end()` at both the beginning and the + * end of the cursor's range of movement, although these fit in so naturally + * with the semantics that one rarely notices them. The cursor begins at the + * first of these, but any fetch in the forward direction will move the cursor + * off this position and onto the first row before returning anything. + */ +class PQXX_LIBEXPORT cursor_base +{ +public: + using size_type = result_size_type; + using difference_type = result_difference_type; + + /// Cursor access-pattern policy + /** Allowing a cursor to move forward only can result in better performance, + * so use this access policy whenever possible. + */ + enum access_policy + { + /// Cursor can move forward only + forward_only, + /// Cursor can move back and forth + random_access + }; + + /// Cursor update policy + /** + * @warning Not all PostgreSQL versions support updatable cursors. + */ + enum update_policy + { + /// Cursor can be used to read data but not to write + read_only, + /// Cursor can be used to update data as well as read it + update + }; + + /// Cursor destruction policy + /** The normal thing to do is to make a cursor object the owner of the SQL + * cursor it represents. There may be cases, however, where a cursor needs + * to persist beyond the end of the current transaction (and thus also beyond + * the lifetime of the cursor object that created it!), where it can be + * "adopted" into a new cursor object. See the basic_cursor documentation + * for an explanation of cursor adoption. + * + * If a cursor is created with "loose" ownership policy, the object + * representing the underlying SQL cursor will not take the latter with it + * when its own lifetime ends, nor will its originating transaction. + * + * @warning Use this feature with care and moderation. Only one cursor + * object should be responsible for any one underlying SQL cursor at any + * given time. + */ + enum ownership_policy + { + /// Destroy SQL cursor when cursor object is closed at end of transaction + owned, + /// Leave SQL cursor in existence after close of object and transaction + loose + }; + + cursor_base() = delete; + cursor_base(cursor_base const &) = delete; + cursor_base &operator=(cursor_base const &) = delete; + + /** + * @name Special movement distances. + */ + //@{ + + // TODO: Make constexpr inline (but breaks ABI). + /// Special value: read until end. + /** @return Maximum value for result::difference_type, so the cursor will + * attempt to read the largest possible result set. + */ + [[nodiscard]] static difference_type all() noexcept; + + /// Special value: read one row only. + /** @return Unsurprisingly, 1. + */ + [[nodiscard]] static constexpr difference_type next() noexcept { return 1; } + + /// Special value: read backwards, one row only. + /** @return Unsurprisingly, -1. + */ + [[nodiscard]] static constexpr difference_type prior() noexcept + { + return -1; + } + + // TODO: Make constexpr inline (but breaks ABI). + /// Special value: read backwards from current position back to origin. + /** @return Minimum value for result::difference_type. + */ + [[nodiscard]] static difference_type backward_all() noexcept; + + //@} + + /// Name of underlying SQL cursor + /** + * @returns Name of SQL cursor, which may differ from original given name. + * @warning Don't use this to access the SQL cursor directly without going + * through the provided wrapper classes! + */ + [[nodiscard]] constexpr std::string const &name() const noexcept + { + return m_name; + } + +protected: + cursor_base(connection &, std::string_view Name, bool embellish_name = true); + + std::string const m_name; +}; +} // namespace pqxx + + +#include + + +namespace pqxx +{ +/// "Stateless cursor" class: easy API for retrieving parts of result sets +/** This is a front-end for SQL cursors, but with a more C++-like API. + * + * Actually, stateless_cursor feels entirely different from SQL cursors. You + * don't keep track of positions, fetches, and moves; you just say which rows + * you want. See the retrieve() member function. + */ +template +class stateless_cursor +{ +public: + using size_type = result_size_type; + using difference_type = result_difference_type; + + /// Create cursor. + /** + * @param tx The transaction within which you want to create the cursor. + * @param query The SQL query whose results the cursor should traverse. + * @param cname A hint for the cursor's name. The actual SQL cursor's name + * will be based on this (though not necessarily identical). + * @param hold Create a `WITH HOLD` cursor? Such cursors stay alive after + * the transaction has ended, so you can continue to use it. + */ + stateless_cursor( + transaction_base &tx, std::string_view query, std::string_view cname, + bool hold) : + m_cur{tx, query, cname, cursor_base::random_access, up, op, hold} + {} + + /// Adopt an existing scrolling SQL cursor. + /** This lets you define a cursor yourself, and then wrap it in a + * libpqxx-managed `stateless_cursor` object. + * + * @param tx The transaction within which you want to manage the cursor. + * @param adopted_cursor Your cursor's SQL name. + */ + stateless_cursor(transaction_base &tx, std::string_view adopted_cursor) : + m_cur{tx, adopted_cursor, op} + { + // Put cursor in known position + m_cur.move(cursor_base::backward_all()); + } + + /// Close this cursor. + /** The destructor will do this for you automatically. + * + * Closing a cursor is idempotent. Closing a cursor that's already closed + * does nothing. + */ + void close() noexcept { m_cur.close(); } + + /// Number of rows in cursor's result set + /** @note This function is not const; it may need to scroll to find the size + * of the result set. + */ + [[nodiscard]] size_type size() + { + return internal::obtain_stateless_cursor_size(m_cur); + } + + /// Retrieve rows from begin_pos (inclusive) to end_pos (exclusive) + /** Rows are numbered starting from 0 to size()-1. + * + * @param begin_pos First row to retrieve. May be one row beyond the end of + * the result set, to avoid errors for empty result sets. Otherwise, must be + * a valid row number in the result set. + * @param end_pos Row up to which to fetch. Rows are returned ordered from + * begin_pos to end_pos, i.e. in ascending order if begin_pos < end_pos but + * in descending order if begin_pos > end_pos. The end_pos may be + * arbitrarily inside or outside the result set; only existing rows are + * included in the result. + */ + result retrieve(difference_type begin_pos, difference_type end_pos) + { + return internal::stateless_cursor_retrieve( + m_cur, result::difference_type(size()), begin_pos, end_pos); + } + + /// Return this cursor's name. + [[nodiscard]] constexpr std::string const &name() const noexcept + { + return m_cur.name(); + } + +private: + internal::sql_cursor m_cur; +}; + + +class icursor_iterator; +} // namespace pqxx + + +namespace pqxx::internal::gate +{ +class icursor_iterator_icursorstream; +class icursorstream_icursor_iterator; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +/// Simple read-only cursor represented as a stream of results +/** SQL cursors can be tricky, especially in C++ since the two languages seem + * to have been designed on different planets. An SQL cursor has two singular + * positions akin to `end()` on either side of the underlying result set. + * + * These cultural differences are hidden from view somewhat by libpqxx, which + * tries to make SQL cursors behave more like familiar C++ entities such as + * iterators, sequences, streams, and containers. + * + * Data is fetched from the cursor as a sequence of result objects. Each of + * these will contain the number of rows defined as the stream's stride, except + * of course the last block of data which may contain fewer rows. + * + * This class can create or adopt cursors that live outside any backend + * transaction, which your backend version may not support. + */ +class PQXX_LIBEXPORT icursorstream +{ +public: + using size_type = cursor_base::size_type; + using difference_type = cursor_base::difference_type; + + /// Set up a read-only, forward-only cursor. + /** Roughly equivalent to a C++ Standard Library istream, this cursor type + * supports only two operations: reading a block of rows while moving + * forward, and moving forward without reading any data. + * + * @param context Transaction context in which this cursor will be active. + * @param query SQL query whose results this cursor shall iterate. + * @param basename Suggested name for the SQL cursor; the library will append + * a unique code to ensure its uniqueness. + * @param sstride Number of rows to fetch per read operation; must be a + * positive number. + */ + icursorstream( + transaction_base &context, std::string_view query, + std::string_view basename, difference_type sstride = 1); + + /// Adopt existing SQL cursor. Use with care. + /** Forms a cursor stream around an existing SQL cursor, as returned by e.g. + * a server-side function. The SQL cursor will be cleaned up by the stream's + * destructor as if it had been created by the stream; cleaning it up by hand + * or adopting the same cursor twice is an error. + * + * Passing the name of the cursor as a string is not allowed, both to avoid + * confusion with the other constructor and to discourage unnecessary use of + * adopted cursors. + * + * @warning It is technically possible to adopt a "WITH HOLD" cursor, i.e. a + * cursor that stays alive outside its creating transaction. However, any + * cursor stream (including the underlying SQL cursor, naturally) must be + * destroyed before its transaction context object is destroyed. Therefore + * the only way to use SQL's WITH HOLD feature is to adopt the cursor, but + * defer doing so until after entering the transaction context that will + * eventually destroy it. + * + * @param context Transaction context in which this cursor will be active. + * @param cname Result field containing the name of the SQL cursor to adopt. + * @param sstride Number of rows to fetch per read operation; must be a + * positive number. + * @param op Ownership policy. Determines whether the cursor underlying this + * stream will be destroyed when the stream is closed. + */ + icursorstream( + transaction_base &context, field const &cname, difference_type sstride = 1, + cursor_base::ownership_policy op = cursor_base::owned); + + /// Return `true` if this stream may still return more data. + constexpr operator bool() const &noexcept { return not m_done; } + + /// Read new value into given result object; same as operator `>>`. + /** The result set may continue any number of rows from zero to the chosen + * stride, inclusive. An empty result will only be returned if there are no + * more rows to retrieve. + * + * @param res Write the retrieved data into this result object. + * @return Reference to this very stream, to facilitate "chained" invocations + * ("C.get(r1).get(r2);") + */ + icursorstream &get(result &res) + { + res = fetchblock(); + return *this; + } + /// Read new value into given result object; same as `get(result&)`. + /** The result set may continue any number of rows from zero to the chosen + * stride, inclusive. An empty result will only be returned if there are no + * more rows to retrieve. + * + * @param res Write the retrieved data into this result object. + * @return Reference to this very stream, to facilitate "chained" invocations + * ("C >> r1 >> r2;") + */ + icursorstream &operator>>(result &res) { return get(res); } + + /// Move given number of rows forward without reading data. + /** Ignores any stride that you may have set. It moves by a given number of + * rows, not a number of strides. + * + * @return Reference to this stream itself, to facilitate "chained" + * invocations. + */ + icursorstream &ignore(std::streamsize n = 1) &; + + /// Change stride, i.e. the number of rows to fetch per read operation. + /** + * @param stride Must be a positive number. + */ + void set_stride(difference_type stride) &; + [[nodiscard]] constexpr difference_type stride() const noexcept + { + return m_stride; + } + +private: + result fetchblock(); + + friend class internal::gate::icursorstream_icursor_iterator; + size_type forward(size_type n = 1); + void insert_iterator(icursor_iterator *) noexcept; + void remove_iterator(icursor_iterator *) const noexcept; + + void service_iterators(difference_type); + + internal::sql_cursor m_cur; + + difference_type m_stride; + difference_type m_realpos, m_reqpos; + + mutable icursor_iterator *m_iterators; + + bool m_done; +}; + + +/// Approximate istream_iterator for icursorstream. +/** Intended as an implementation of an input_iterator (as defined by the C++ + * Standard Library), this class supports only two basic operations: reading + * the current element, and moving forward. In addition to the minimal + * guarantees for istream_iterators, this class supports multiple successive + * reads of the same position (the current result set is cached in the + * iterator) even after copying and even after new data have been read from the + * stream. This appears to be a requirement for input_iterators. Comparisons + * are also supported in the general case. + * + * The iterator does not care about its own position, however. Moving an + * iterator forward moves the underlying stream forward and reads the data from + * the new stream position, regardless of the iterator's old position in the + * stream. + * + * The stream's stride defines the granularity for all iterator movement or + * access operations, i.e. "ici += 1" advances the stream by one stride's worth + * of rows, and "*ici++" reads one stride's worth of rows from the stream. + * + * @warning Do not read from the underlying stream or its cursor, move its read + * position, or change its stride, between the time the first icursor_iterator + * on it is created and the time its last icursor_iterator is destroyed. + * + * @warning Manipulating these iterators within the context of a single cursor + * stream is not thread-safe. Creating a new iterator, copying one, + * or destroying one affects the stream as a whole. + */ +class PQXX_LIBEXPORT icursor_iterator +{ +public: + using iterator_category = std::input_iterator_tag; + using value_type = result; + using pointer = result const *; + using reference = result const &; + using istream_type = icursorstream; + using size_type = istream_type::size_type; + using difference_type = istream_type::difference_type; + + icursor_iterator() noexcept; + explicit icursor_iterator(istream_type &) noexcept; + icursor_iterator(icursor_iterator const &) noexcept; + ~icursor_iterator() noexcept; + + result const &operator*() const + { + refresh(); + return m_here; + } + result const *operator->() const + { + refresh(); + return &m_here; + } + icursor_iterator &operator++(); + icursor_iterator operator++(int); + icursor_iterator &operator+=(difference_type); + icursor_iterator &operator=(icursor_iterator const &) noexcept; + + [[nodiscard]] bool operator==(icursor_iterator const &rhs) const; + [[nodiscard]] bool operator!=(icursor_iterator const &rhs) const noexcept + { + return not operator==(rhs); + } + [[nodiscard]] bool operator<(icursor_iterator const &rhs) const; + [[nodiscard]] bool operator>(icursor_iterator const &rhs) const + { + return rhs < *this; + } + [[nodiscard]] bool operator<=(icursor_iterator const &rhs) const + { + return not(*this > rhs); + } + [[nodiscard]] bool operator>=(icursor_iterator const &rhs) const + { + return not(*this < rhs); + } + +private: + void refresh() const; + + friend class internal::gate::icursor_iterator_icursorstream; + difference_type pos() const noexcept { return m_pos; } + void fill(result const &); + + icursorstream *m_stream{nullptr}; + result m_here; + difference_type m_pos; + icursor_iterator *m_prev{nullptr}, *m_next{nullptr}; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/dbtransaction b/ext/libpqxx-7.7.3/include/pqxx/dbtransaction new file mode 100644 index 000000000..fa8d26476 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/dbtransaction @@ -0,0 +1,8 @@ +/** pqxx::dbtransaction abstract base class. + * + * pqxx::dbransaction defines a real transaction on the database. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/dbtransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/dbtransaction.hxx b/ext/libpqxx-7.7.3/include/pqxx/dbtransaction.hxx new file mode 100644 index 000000000..d85cb170f --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/dbtransaction.hxx @@ -0,0 +1,70 @@ +/* Definition of the pqxx::dbtransaction abstract base class. + * + * pqxx::dbransaction defines a real transaction on the database. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/dbtransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_DBTRANSACTION +#define PQXX_H_DBTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/transaction_base.hxx" + +namespace pqxx +{ +/// Abstract transaction base class: bracket transactions on the database. +/** + * @ingroup transactions + * + * Use a dbtransaction-derived object such as "work" (transaction<>) to enclose + * operations on a database in a single "unit of work." This ensures that the + * whole series of operations either succeeds as a whole or fails completely. + * In no case will it leave half-finished work behind in the database. + * + * Once processing on a transaction has succeeded and any changes should be + * allowed to become permanent in the database, call commit(). If something + * has gone wrong and the changes should be forgotten, call abort() instead. + * If you do neither, an implicit abort() is executed at destruction time. + * + * It is an error to abort a transaction that has already been committed, or to + * commit a transaction that has already been aborted. Aborting an already + * aborted transaction or committing an already committed one is allowed, to + * make error handling easier. Repeated aborts or commits have no effect after + * the first one. + * + * Database transactions are not suitable for guarding long-running processes. + * If your transaction code becomes too long or too complex, consider ways to + * break it up into smaller ones. Unfortunately there is no universal recipe + * for this. + * + * The actual operations for committing/aborting the backend transaction are + * implemented by a derived class. The implementing concrete class must also + * call @ref close from its destructor. + */ +class PQXX_LIBEXPORT PQXX_NOVTABLE dbtransaction : public transaction_base +{ +protected: + /// Begin transaction. + explicit dbtransaction(connection &c) : transaction_base{c} {} + /// Begin transaction. + dbtransaction(connection &c, std::string_view tname) : + transaction_base{c, tname} + {} + /// Begin transaction. + dbtransaction( + connection &c, std::string_view tname, + std::shared_ptr rollback_cmd) : + transaction_base{c, tname, rollback_cmd} + {} +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/accessing-results.md b/ext/libpqxx-7.7.3/include/pqxx/doc/accessing-results.md new file mode 100644 index 000000000..920fb6f3b --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/accessing-results.md @@ -0,0 +1,157 @@ +Accessing results and result rows {#accessing-results} +--------------------------------- + +When you execute a query using one of the transaction `exec` functions, you +normally get a `result` object back. A `result` is a container of `row`s. + +(There are exceptions. The `exec1` functions expect exactly one row of data, +so they return just a `row`, not a full `result`.) + +Result objects are an all-or-nothing affair. The `exec` function waits until +it's received all the result data, and then gives it to you in the form of the +`result`. _(There is a faster, easier way of executing simple queries, so see +"streaming rows" below as well.)_ + +For example, your code might do: + +```cxx + pqxx::result r = tx.exec("SELECT * FROM mytable"); +``` + +Now, how do you access the data inside `r`? + +Result sets act as standard C++ containers of rows. Rows act as standard +C++ containers of fields. So the easiest way to go through them is: + +```cxx + for (auto const &row: r) + { + for (auto const &field: row) std::cout << field.c_str() << '\t'; + std::cout << '\n'; + } +``` + +But results and rows also support other kinds of access. Array-style +indexing, for instance, such as `r[rownum]`: + +```cxx + std::size_t const num_rows = std::size(r); + for (std::size_t rownum=0u; rownum < num_rows; ++rownum) + { + pqxx::row const row = r[rownum]; + std::size_t const num_cols = std::size(row); + for (std::size_t colnum=0u; colnum < num_cols; ++colnum) + { + pqxx::field const field = row[colnum]; + std::cout << field.c_str() << '\t'; + } + + std::cout << '\n'; + } +``` + +Every row in the result has the same number of columns, so you don't need to +look up the number of fields again for each one: + +```cxx + std::size_t const num_rows = std::size(r); + std::size_t const num_cols = r.columns(); + for (std::size_t rownum=0u; rownum < num_rows; ++rownum) + { + pqxx::row const row = r[rownum]; + for (std::size_t colnum=0u; colnum < num_cols; ++colnum) + { + pqxx::field const field = row[colnum]; + std::cout << field.c_str() << '\t'; + } + + std::cout << '\n'; + } +``` + +You can even address a field by indexing the `row` using the field's _name:_ + +```cxx + std::cout << row["salary"] << '\n'; +``` + +But try not to do that if speed matters, because looking up the column by name +takes time. At least you'd want to look up the column index before your loop +and then use numerical indexes inside the loop. + +For C++23 or better, there's also a two-dimensional array access operator: + +```cxx + for (std::size_t rownum=0u; rownum < num_rows; ++rownum) + { + for (std::size_t colnum=0u; colnum < num_cols; ++colnum) + std::cout result[rownum, colnum].c_str() << '\t'; + std::cout << '\n'; + } +``` + +And of course you can use classic "begin/end" loops: + +```cxx + for (auto row = std::begin(r); row != std::end(r); row++) + { + for (auto field = std::begin(row); field != std::end(row); field++) + std::cout << field->c_str() << '\t'; + std::cout << '\n'; + } +``` + +Result sets are immutable, so all iterators on results and rows are actually +`const_iterator`s. There are also `const_reverse_iterator` types, which +iterate backwards from `rbegin()` to `rend()` exclusive. + +All these iterator types provide one extra bit of convenience that you won't +normally find in C++ iterators: referential transparency. You don't need to +dereference them to get to the row or field they refer to. That is, instead +of `row->end()` you can also choose to say `row.end()`. Similarly, you +may prefer `field.c_str()` over `field->c_str()`. + +This becomes really helpful with the array-indexing operator. With regular +C++ iterators you would need ugly expressions like `(*row)[0]` or +`row->operator[](0)`. With the iterator types defined by the result and +row classes you can simply say `row[0]`. + + +Streaming rows +-------------- + +There's another way to go through the rows coming out of a query. It's +usually easier and faster, but there are drawbacks. + +**One,** you start getting rows before all the data has come in from the +database. That speeds things up, but what happens if you lose your network +connection while transferring the data? Your application may already have +processed some of the data before finding out that the rest isn't coming. If +that is a problem for your application, streaming may not be the right choice. + +**Two,** streaming only works for some types of query. The `stream()` function +wraps your query in a PostgreSQL `COPY` command, and `COPY` only supports a few +commands: `SELECT`, `VALUES`, `or an `INSERT`, `UPDATE`, or `DELETE` with a +`RETURNING` clause. See the `COPY` documentation here: +https://www.postgresql.org/docs/current/sql-copy.html + +**Three,** when you convert a field to a "view" type (such as +`std::string_view` or `std::basic_string_view`), the view points to +underlying data which only stays valid until you iterate to the next row or +exit the loop. So if you want to use that data for longer than a single +iteration of the streaming loop, you'll have to store it somewhere yourself. + +Now for the good news. Streaming does make it very easy to query data and loop +over it: + +```cxx + for (auto [id, name, x, y] : + tx.stream( + "SELECT id, name, x, y FROM point")) + process(id + 1, "point-" + name, x * 10.0, y * 10.0); +``` + +The conversion to C++ types (here `int`, `std::string_view`, and two `float`s) +is built into the function. You never even see `row` objects, `field` objects, +iterators, or conversion methods. You just put in your query and you receive +your data. diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/binary-data.md b/ext/libpqxx-7.7.3/include/pqxx/doc/binary-data.md new file mode 100644 index 000000000..20da8dc0c --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/binary-data.md @@ -0,0 +1,56 @@ +Binary data {#binary} +=========== + +The database has two ways of storing binary data: `BYTEA` is like a string, but +containing bytes rather than text characters. And _large objects_ are more +like a separate table containing binary objects. + +Generally you'll want to use `BYTEA` for reasonably-sized values, and large +objects for very large values. + +That's the database side. On the C++ side, in libpqxx, all binary data must be +either `std::basic_string` or `std::basic_string_view`; +or if you're building in C++20 or better, anything that's a block of +contiguous `std::byte` in memory. + +So for example, if you want to write a large object, you'd create a +`pqxx::blob` object. And you might use that to write data in the form of +`std::basic_string_view`. + +Your particular binary data may look different though. You may have it in a +`std::string`, or a `std::vector`, or a pointer to `char` +accompanied by a size (which could be signed or unsigned, and of any of a few +different widths). Sometimes that's your choice, or sometimes some other +library will dictate what form it takes. + +So long as it's _basically_ still a block of bytes though, you can use +`pqxx::binary_cast` to construct a `std::basic_string_view` from it. + +There are two forms of `binary_cast`. One takes a single argument that must +support `std::data()` and `std::size()`: + + std::string hi{"Hello binary world"}; + my_blob.write(pqxx::binary_cast(hi); + +The other takes a pointer and a size: + + char const greeting[] = "Hello binary world"; + char const *hi = greeting; + my_blob.write(pqxx::binary_cast(hi, sizeof(greeting))); + + +Caveats +------- + +There are some restrictions on `binary_cast` that you must be aware of. + +First, your data must of a type that gives us _bytes._ So: `char`, +`unsigned char`, `signed char`, `int8_t`, `uint8_t`, or of course `std::byte`. +You can't feed in a vector of `double`, or anything like that. + +Second, the data must be laid out as a contiguous block in memory. If there's +no `std::data()` implementation for your type, it's not suitable. + +Third, `binary_cast` only constructs something like a `std::string_view`. It +does not make a copy of your actual data. So, make sure that your data remains +alive and in the same place while you're using it. diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/datatypes.md b/ext/libpqxx-7.7.3/include/pqxx/doc/datatypes.md new file mode 100644 index 000000000..bc14c8b90 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/datatypes.md @@ -0,0 +1,373 @@ +Supporting additional data types {#datatypes} +================================ + +Communication with the database mostly happens in a text format. When you +include an integer value in a query, you use `to_string` to convert it to that +text format. When you get a query result field "as a float," it converts from +the text format to a floating-point type. These conversions are everywhere in +libpqxx. + +The conversion sydstem supports many built-in types, but it is also extensible. +You can "teach" libpqxx (in the scope of your own application) to convert +additional types of values to and from PostgreSQL's string format. + +This is massively useful, but it's not for the faint of heart. You'll need to +specialise some templates. And, **the API for doing this can change with any +major libpqxx release.** + + +Converting types +---------------- + +In your application, a conversion is driven entirely by a C++ type you specify. +The value's SQL type has nothing to do with it, nor is there anything in the +string that would identify its type. + +So, if you've SELECTed a 64-bit integer from the database, and you try to +convert it to a C++ "short," one of two things will happen: either the number +is small enough to fit in your `short` (and it just works), or else it throws a +conversion exception. + +Or, your database table might have a text column, but a given field may contain +a string that _looks_ just like a number. You can convert that value to an +integer type just fine. Or to a floating-point type. All that matters to the +conversion is the actual value, and the type. + +In some cases the templates for these conversions can tell the type from the +arguments you pass them: + + auto x = to_string(99); + +In other cases you may need to instantiate template explicitly: + + auto y = from_string("99"); + + +Supporting a new type +--------------------- + +Let's say you have some other SQL type which you want to be able to store in, +or retrieve from, the database. What would it take to support that? + +Sometimes you do not need _complete_ support. You might need a conversion _to_ +a string but not _from_ a string, for example. The conversion is defined at +compile time, so don't be too afraid to be incomplete. If you leave out one of +these steps, it's not going to crash at run time or mess up your data. The +worst that can happen is that your code won't build. + +So what do you need for a complete conversion? + +First off, of course, you need a C++ type. It may be your own, but it +doesn't have to be. It could be a type from a third-party library, or even one +from the standard library that libpqxx does not yet support. + +You also specialise the `pqxx::type_name` variable to specify the type's name. +This is important for all code which mentions your type in human-readable text, +such as error messages. + +Then, does your type have a built-in null value? You specialise the +`pqxx::nullness` template to specify the details. + +Finally, you specialise the `pqxx::string_traits` template. This is where you +define the actual conversions. + +Let's go through these steps one by one. + + +Your type +--------- + +You'll need a type for which the conversions are not yet defined, because the +C++ type is what determines the right conversion. One type, one set of +conversions. + +The type doesn't have to be one that you create. The conversion logic was +designed such that you can build it around any type. So you can just as +easily build a conversion for a type that's defined somewhere else. There's +no need to include any special methods or other members inside it. That's also +how libpqxx can support converting built-in types like `int`. + +By the way, if the type is an enum, you don't need to do any of this. Just +invoke the preprocessor macro `PQXX_DECLARE_ENUM_CONVERSION`, from the global +namespace near the top of your translation unit, and pass the type as an +argument. + +The library also provides specialisations for `std::optional`, +`std::shared_ptr`, and `std::unique_ptr`. If you have conversions for +`T`, you'll also have conversions for those. + + +Specialise `type_name` +---------------------- + +When errors happen during conversion, libpqxx will compose error messages for +the user. Sometimes these will include the name of the type that's being +converted. + +To tell libpqxx the name of each type, there's a template variable called +`pqxx::type_name`. For any given type `T`, it should have a specialisation +that provides that `T`'s human-readable name: + + namespace pqxx + { + template<> std::string const type_name{"T"}; + } + +(Yes, this means that you need to define something inside the pqxx namespace. +Future versions of libpqxx may move this into a separate namespace.) + +Define this early on in your translation unit, before any code that might cause +libpqxx to need the name. That way, the libpqxx code which needs to know the +type's name can see your definition. + + +Specialise `nullness` +--------------------- + +A struct template `pqxx::nullness` defines whether your type has a natural +"null value" built in. If so, it also provides member functions for producing +and recognising null values. + +The simplest scenario is also the most common: most types don't have a null +value built in. In that case, derive your nullness traits from +`pqxx::no_null`: + + namespace pqxx + { + template<> struct nullness : pqxx::no_null {}; + } + +(Here again you're defining this in the pqxx namespace.) + +If your type does have a natural null value, the definition gets a little more +complex: + + namespace pqxx + { + template<> struct nullness + { + static constexpr bool has_null{true}; + static constexpr bool always_null{false}; + + static bool is_null(T const &value) + { + // Return whether "value" is null. + return ...; + } + + [[nodiscard]] static T null() + { + // Return a null value. + return ...; + } + }; + } + +You may be wondering why there's a function to produce a null value, but also a +function to check whether a value is null. Why not just compare the value to +the result of `null()`? Because two null values may not be equal. `T` may +have several different null values. Or it may override the comparison +operator, similar to SQL where NULL is not equal to NULL. + +As a third case, your type may be one that _always_ represents a null value. +This is the case for `std::nullptr_t` and `std::nullopt_t`. In that case, you +set `nullness::always_null` to `true` (as well as `has_null` of course), +and you won't need to define any actual conversions. + + +Specialise `string_traits` +------------------------- + +This part is more work. (You can skip it for types that are _always_ null, +but those will be rare.) Specialise the `pqxx::string_traits` template: + + namespace pqxx + { + template<> struct string_traits + { + static T from_string(std::string_view text); + static zview to_buf(char *begin, char *end, T const &value); + static char *into_buf(char *begin, char *end, T const &value); + static std::size_t size_buffer(T const &value) noexcept; + }; + } + +You'll also need to write those member functions, or as many of them as needed +to get your code to build. + + +### `from_string` + +We start off simple: `from_string` parses a string as a value of `T`, and +returns that value. + +The string may not be zero-terminated; it's just the `string_view` from +beginning to end (exclusive). In your tests, cover cases where the string +does not end in a zero byte. + +It's perfectly possible that the string isn't actually a `T` value. Mistakes +happen. In that case, throw a `pqxx::conversion_error`. + +(Of course it's also possible that you run into some other error, so it's fine +to throw different exceptions as well. But when it's definitely "this is not +the right format for a `T`," throw `conversion_error`.) + + +### `to_buf` + +In this function, you convert a value of `T` into a string that the postgres +server will understand. + +The caller will provide you with a buffer where you can write the string, if +you need it: from `begin` to `end` exclusive. It's a half-open interval, so +don't access `*end`. + +If the buffer is insufficient for you to do the conversion, throw a +`pqxx::conversion_overrun`. It doesn't have to be exact: you can be a little +pessimistic and demand a bit more space than you need. Just be sure to throw +the exception if there's any risk of overrunning the buffer. + +You don't _have_ to use the buffer for this function though. For example, +`pqxx::string_traits::to_buf` returns a compile-time constant string and +ignores the buffer. + +Even if you do use the buffer, your string does not _have_ to start at the +beginning of the buffer. For example, the integer conversions start by writing +the _least_ significant digit to the _end_ of the buffer, and then writes the +more significant digits before it. It was just more convenient. + +Return a `pqxx::zview`. This is basically a `std::string_view`, but with one +difference: a `zview` guarantees that there will be a valid zero byte right +after the `string_view`. The zero byte is not counted as part of its size, but +it will be there. + +Expressed in code, this rule must hold: + + void invariant(zview z) + { + assert(z[std::size(z)] == 0); + } + +Make sure you write your trailing zero _before_ the `end`. If the trailing +zero doesn't fit in the buffer, then there's just not enough room to perform +the conversion. + +Beware of locales when converting. If you use standard library features like +`sprintf`, they may obey whatever locale is currently set on the system. That +means that a simple integer like 1000000 may come out as "1000000" on your +system, but as "1,000,000" on mine, or as "1.000.000" for somebody else, and on +an Indian system it may be "1,00,000". Values coming from or going to the +database should be in non-localised formats. You can use libpqxx functions for +those conversions: `pqxx::from_string`, `pqxx::to_string`, `pqxx::to_buf`. + + +### `into_buf` + +This is a stricter version of `to_buf`. All the same requirements apply, but +in addition you must write your string into the buffer provided, starting +_exactly_ at `begin`. + +That's why this function returns just a simple pointer: the address right +behind the trailing zero. If the caller wants to use the string, they can +find it at `begin`. If they want to write a different value into the rest of +the buffer, they can start at the location you returned. + + +### `size_buffer` + +Here you estimate how much buffer space you need for converting a `T` to a +string. Be precise if you can, but pessimistic if you must. It's usually +better to waste a few unnecessary bytes than to spend a lot of time computing +the exact buffer space you need. And failing the conversion because you +under-budgeted the buffer is worst of all. + +Include the trailing zero in the buffer size. If your `to_buf` takes more +space than just what's needed to store the result, include that too. + +Make `size_buffer` a `constexpr` function if you can. It can allow the caller +to allocate the buffer on the stack, with a size known at compile time. + + +Optional: Specialise `is_unquoted_safe` +--------------------------------------- + +When converting arrays or composite values to strings, libpqxx may need to +quote values and escape any special characters. This takes time. + +Some types though, such as integral or floating-point types, can never have +any special characters such as quotes, commas, or backslashes in their string +representations. In such cases, there's no need to quote or escape such values +in arrays or composite types. + +If your type is like that, you can tell libpqxx about this by defining: + + namespace pqxx + { + template<> inline constexpr bool is_unquoted_safe{true}; + } + +The code that converts this type of field to strings in an array or a composite +type can then use a simpler, more efficient variant of the code. It's always +safe to leave this out; it's _just_ an optimisation for when you're completely +sure that it's safe. + +Do not do this if a string representation of your type may contain a comma; +semicolon; parenthesis; brace; quote; backslash; newline; or any other +character that might need escaping. + + +Optional: Specialise `param_format` +----------------------------------- + +This one you don't generally need to worry about. Read on if you're writing a +type which represents raw binary data, or if you're writing a template where +_some specialisations_ may contain raw binary data. + +When you call parameterised statements, or prepared statements with parameters, +libpqxx needs to your parameters on to libpq, the underlying C-level PostgreSQL +client library. + +There are two formats for doing that: _text_ and _binary._ In the first, we +represent all values as strings, and the server then converts them into its own +internal binary representation. That's what the string conversions are all +about, and it's what we do for almost all types of parameters. + +But we do it differently when the parameter is a contiguous series of raw bytes +and the corresponding SQL type is `BYTEA`. There is a text format for those, +but we bypass it for efficiency. The server can use the binary data in the +exact same form, without any conversion or extra processing. The binary data +is also twice as compact during transport. + +(People sometimes ask why we can't just treat all types as binary. However the +general case isn't so clear-cut. The binary formats are not documented, there +are no guarantees that they will be platform-independent or that they will +remain stable, and there's no really solid way to detect when we might get the +format wrong. But also, the conversions aren't necessarily as straightforward +and efficient as they sound. So, for the general case, libpqxx sticks with the +text formats. Raw binary data alone stands out as a clear win.) + +Long story short, the machinery for passing parameters needs to know: is this +parameter a binary string, or not? In the normal case it can assume "no," and +that's what it does. The text format is always a safe choice; we just try to +use the binary format where it's faster. + +The `param_format` function template is what makes the decision. We specialise +it for types which may be binary strings, and use the default for all other +types. + +"Types which _may_ be binary"? You might think we know whether a type is a +binary type or not. But there are some complications with generic types. + +Templates like `std::shared_ptr`, `std::optional`, and so on act like +"wrappers" for another type. A `std::optional` is binary if `T` is binary. +Otherwise, it's not. If you're building support for a template of this nature, +you'll probably want to implement `param_format` for it. + +The decision to use binary format is made based on a given object, not +necessarily based on the type in general. Look at `std::variant`. If you have +a `std::variant` type which can hold an `int` or a binary string, is that a +binary parameter? We can't decide without knowing the individual object. + +Containers are another hard case. Should we pass `std::vector` in binary? +Even when `T` is a binary type, we don't currently have any way to pass an +array in binary format, so we always pass it as text. diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/escaping.md b/ext/libpqxx-7.7.3/include/pqxx/doc/escaping.md new file mode 100644 index 000000000..2ad9fe3db --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/escaping.md @@ -0,0 +1,74 @@ +String escaping {#escaping} +=============== + +Writing queries as strings is easy. But sometimes you need a variable in +there: `"SELECT id FROM user WHERE name = '" + name + "'"`. + +This is dangerous. See the bug? If `name` can contain quotes, you may have +an SQL injection vulnerability there, where users can enter nasty stuff like +"`.'; DROP TABLE user`". Or if you're lucky, it's just a nasty bug that you +discover when `name` happens to be "d'Arcy". + +So, you'll need to _escape_ the `name` before you insert it. This is where +quotes and other problematic characters are marked as "this is just a character +in the string, not the end of the string." There are +[several functions](@ref escaping-functions) in libpqxx to do this for you. + + +SQL injection +------------- + +To understand what SQL injection vulnerabilities are and why they should be +prevented, imagine you use the following SQL statement somewhere in your +program: + + TX.exec( + "SELECT number,amount " + "FROM accounts " + "WHERE allowed_to_see('" + userid + "','" + password + "')"); + +This shows a logged-in user important information on all accounts he is +authorized to view. The userid and password strings are variables entered +by the user himself. + +Now, if the user is actually an attacker who knows (or can guess) the +general shape of this SQL statement, imagine he enters the following +password: + + x') OR ('x' = 'x + +Does that make sense to you? Probably not. But if this is inserted into +the SQL string by the C++ code above, the query becomes: + + SELECT number,amount + FROM accounts + WHERE allowed_to_see('user','x') OR ('x' = 'x') + +Is this what you wanted to happen? Probably not! The neat `allowed_to_see()` +clause is completely circumvented by the "`OR ('x' = 'x')`" clause, which is +always `true`. Therefore, the attacker will get to see all accounts in the +database! + + +Using the esc functions +----------------------- + +Here's how you can fix the problem in the example above: + + TX.exec( + "SELECT number,amount " + "FROM accounts " + "WHERE allowed_to_see('" + TX.esc(userid) + "', " + "'" + TX.esc(password) + "')"); + +Now, the quotes embedded in the attacker's string will be neatly escaped so +they can't "break out" of the quoted SQL string they were meant to go into: + + SELECT number,amount + FROM accounts + WHERE allowed_to_see('user', 'x'') OR (''x'' = ''x') + +If you look carefully, you'll see that thanks to the added escape characters +(a single-quote is escaped in SQL by doubling it) all we get is a very +strange-looking password string--but not a change in the SQL statement. + diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/getting-started.md b/ext/libpqxx-7.7.3/include/pqxx/doc/getting-started.md new file mode 100644 index 000000000..1b87b881f --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/getting-started.md @@ -0,0 +1,142 @@ +Getting started {#getting-started} +=============== + +The most basic three types in libpqxx are the _connection_, the _transaction_, +and the _result_. + +They fit together as follows: +* You connect to the database by creating a `pqxx::connection` object (see + @ref connections). + +* You create a transaction object (see @ref transactions) operating on that + connection. You'll usually want the `pqxx::work` variety. + + Once you're done you call the transaction's `commit` function to make its + work final. If you don't call this, the work will be rolled back when the + transaction object is destroyed. + +* Until then, use the transaction's `exec`, `query_value`, and `stream` + functions (and variants) to execute SQL statements. You pass the statements + themselves in as simple strings. (See @ref streams for more about data + streaming). + +* Most of the `exec` functions return a `pqxx::result` object, which acts + as a standard container of rows: `pqxx::row`. + + Each row in a result, in turn, acts as a container of fields: `pqxx::field`. + See @ref accessing-results for more about results, rows, and fields. + +* Each field's data is stored internally as a text string, in a format defined + by PostgreSQL. You can convert field or row values using their `as()` and + `to()` member functions. + +* After you've closed the transaction, the connection is free to run a next + transaction. + +Here's a very basic example. It connects to the default database (you'll +need to have one set up), queries it for a very simple result, converts it to +an `int`, and prints it out. It also contains some basic error handling. + + #include + #include + + int main() + { + try + { + // Connect to the database. In practice we may have to pass some + // arguments to say where the database server is, and so on. + // The constructor parses options exactly like libpq's + // PQconnectdb/PQconnect, see: + // https://www.postgresql.org/docs/10/static/libpq-connect.html + pqxx::connection c; + + // Start a transaction. In libpqxx, you always work in one. + pqxx::work w(c); + + // work::exec1() executes a query returning a single row of data. + // We'll just ask the database to return the number 1 to us. + pqxx::row r = w.exec1("SELECT 1"); + + // Commit your transaction. If an exception occurred before this + // point, execution will have left the block, and the transaction will + // have been destroyed along the way. In that case, the failed + // transaction would implicitly abort instead of getting to this point. + w.commit(); + + // Look at the first and only field in the row, parse it as an integer, + // and print it. + // + // "r[0]" returns the first field, which has an "as<...>()" member + // function template to convert its contents from their string format + // to a type of your choice. + std::cout << r[0].as() << std::endl; + } + catch (std::exception const &e) + { + std::cerr << e.what() << std::endl; + return 1; + } + } + +This prints the number 1. Notice that you can keep the result object around +after you've closed the transaction or even the connection. There are +situations where you can't do it, but generally it's fine. If you're +interested: you can install your own callbacks for receiving error messages +from the database, and in that case you'll have to keep the connection object +alive. But otherwise, it's nice to be able to "fire and forget" your +connection and deal with the data. + +You can also convert an entire row to a series of C++-side types in one go, +using the @c as member function on the row: + + pqxx::connection c; + pqxx::work w(c); + pqxx::row r = w.exec1("SELECT 1, 2, 'Hello'"); + auto [one, two, hello] = r.as(); + std::cout << (one + two) << ' ' << std::strlen(hello) << std::endl; + +Here's a slightly more complicated example. It takes an argument from the +command line and retrieves a string with that value. The interesting part is +that it uses the escaping-and-quoting function `quote` to embed this +string value in SQL safely. It also reads the result field's value as a +plain C-style string using its `c_str` function. + + #include + #include + #include + + int main(int argc, char *argv[]) + { + try + { + if (!argv[1]) throw std::runtime_error("Give me a string!"); + + pqxx::connection c; + pqxx::work w(c); + + // work::exec() returns a full result set, which can consist of any + // number of rows. + pqxx::result r = w.exec("SELECT " + w.quote(argv[1])); + + // End our transaction here. We can still use the result afterwards. + w.commit(); + + // Print the first field of the first row. Read it as a C string, + // just like std::string::c_str() does. + std::cout << r[0][0].c_str() << std::endl; + } + catch (std::exception const &e) + { + std::cerr << e.what() << std::endl; + return 1; + } + } + +You can find more about converting field values to native types, or +converting values to strings for use with libpqxx, under +@ref stringconversion. More about getting to the rows and fields of a +result is under @ref accessing-results. + +If you want to handle exceptions thrown by libpqxx in more detail, for +example to print the SQL contents of a query that failed, see @ref exception. diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/mainpage.md b/ext/libpqxx-7.7.3/include/pqxx/doc/mainpage.md new file mode 100644 index 000000000..5d4b8f9b2 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/mainpage.md @@ -0,0 +1,28 @@ +libpqxx {#mainpage} +======= + +@version 7.7.3 +@author Jeroen T. Vermeulen +@see http://pqxx.org +@see https://github.com/jtv/libpqxx + +Welcome to libpqxx, the C++ API to the PostgreSQL database management system. + +Compiling this package requires PostgreSQL to be installed -- including the +C headers for client development. The library builds on top of PostgreSQL's +standard C API, libpq. The libpq headers are not needed to compile client +programs, however. + +For a quick introduction to installing and using libpqxx, see the README.md +file. The latest information can be found at http://pqxx.org/ + + +Some links that should help you find your bearings: +* @ref getting-started +* @ref thread-safety +* @ref connections +* @ref transactions +* @ref escaping +* @ref performance +* @ref transactor +* @ref datatypes diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/mainpage.md.template b/ext/libpqxx-7.7.3/include/pqxx/doc/mainpage.md.template new file mode 100644 index 000000000..d5afb2427 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/mainpage.md.template @@ -0,0 +1,28 @@ +libpqxx {#mainpage} +======= + +@version @PQXXVERSION@ +@author Jeroen T. Vermeulen +@see http://pqxx.org +@see https://github.com/jtv/libpqxx + +Welcome to libpqxx, the C++ API to the PostgreSQL database management system. + +Compiling this package requires PostgreSQL to be installed -- including the +C headers for client development. The library builds on top of PostgreSQL's +standard C API, libpq. The libpq headers are not needed to compile client +programs, however. + +For a quick introduction to installing and using libpqxx, see the README.md +file. The latest information can be found at http://pqxx.org/ + + +Some links that should help you find your bearings: +* @ref getting-started +* @ref thread-safety +* @ref connections +* @ref transactions +* @ref escaping +* @ref performance +* @ref transactor +* @ref datatypes diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/parameters.md b/ext/libpqxx-7.7.3/include/pqxx/doc/parameters.md new file mode 100644 index 000000000..7ac792025 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/parameters.md @@ -0,0 +1,90 @@ +Statement parameters {#parameters} +==================== + +When you execute a prepared statement (see @ref prepared), or a parameterised +statement (using functions like `pqxx::connection::exec_params`), you may write +special _placeholders_ in the query text. They look like `$1`, `$2`, and so +on. + +If you execute the query and pass parameter values, the call will respectively +substitute the first where it finds `$1`, the second where it finds `$2`, et +cetera. + +Doing this saves you work. If you don't use statement parameters, you'll need +to quote and escape your values (see `connection::quote()` and friends) as you +insert them into your query as literal values. + +Or if you forget to do that, you leave yourself open to horrible +[SQL injection attacks](https://xkcd.com/327/). Trust me, I was born in a town +whose name started with an apostrophe! + +Statement parameters save you this work. With these parameters you can pass +your values as-is, and they will go across the wire to the database in a safe +format. + +In some cases it may even be faster! When a parameter represents binary data +(as in the SQL `BYTEA` type), libpqxx will send it directly as binary, which is +a bit more efficient. If you insert the binary data directly in your query +text, your CPU will have some extra work to do, converting the data into a text +format, escaping it, and adding quotes. + + +Dynamic parameter lists +----------------------- + +In rare cases you may just not know how many parameters you'll pass into your +statement when you call it. + +For these situations, have a look at `params`. It lets you compose your +parameters list on the fly, even add whole ranges of parameters at a time. + +You can pass a `params` into your statement as a normal parameter. It will +fill in all the parameter values it contains into that position of the +statement's overall parameter list. + +So if you call your statement passing a regular parameter `a`, a +`params` containing just a parameter `b`, and another regular parameter `c`, +then your call will pass parameters `a`, `b`, and `c`. Or if the params object +is empty, it will pass just `a` and `c`. If the params object contains `x` and +`y`, your call will pass `a, x, y, c`. + +You can mix static and dynamic parameters freely. Don't go overboard though: +complexity is where bugs happen! + + +Generating placeholders +----------------------- + +If your code gets particularly complex, it may sometimes happen that it becomes +hard to track which parameter value belongs with which placeholder. Did you +intend to pass this numeric value as `$7`, or as `$8`? The answer may depend +on an `if` that happened earlier in a different function. + +(Generally if things get that complex, it's a good idea to look for simpler +solutions. But especially when performance matters, sometimes you can't avoid +complexity like that.) + +There's a little helper class called `placeholders`. You can use it as a +counter which produces those placeholder strings, `$1`, `$2`, `$3`, et cetera. +When you start generating a complex statement, you can create both a `params` +and a `placeholders`: + + pqxx::params values; + pqxx::placeholders name; + +Let's say you've got some complex code to generate the conditions for an SQL +"WHERE" clause. You'll generally want to do these things close together in +your, so that you don't accidentally update one part and forget another: + + if (extra_clause) + { + // Extend the query text, using the current placeholder. + query += " AND x = " + name.get(); + // Add the parameter value. + values.append(my_x); + // Move on to the next placeholder value. + name.next(); + } + +Depending on the starting value of `name`, this might add to `query` a fragment +like "` AND x = $3`" or "` AND x = $5`". diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/performance.md b/ext/libpqxx-7.7.3/include/pqxx/doc/performance.md new file mode 100644 index 000000000..6c403684f --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/performance.md @@ -0,0 +1,24 @@ +Performance features {#performance} +==================== + +If your program's database interaction is not as efficient as it needs to be, +the first place to look is usually the SQL you're executing. But libpqxx +has a few specialized features to help you squeeze more performance out +of how you issue commands and retrieve data: + +* @ref streams. Use these as a faster way to transfer data between your + code and the database. +* `std::string_view` and `pqxx::zview`. In places where traditional C++ worked + with `std::string`, see whether `std::string_view` or `pqxx::zview` will + do. Of course that means that you'll have to look at the data's lifetime + more carefully, but it'll save the computer a lot of copying. +* @ref prepared. These can be executed many times without the server + parsing and planning them anew each time. They also save you having to + escape string parameters. +* `pqxx::pipeline` lets you send queries to the database in batches, and + continue other processing while they are executing. +* `pqxx::connecting` lets you start setting up a database connection, but + without blocking the thread. + +As always of course, don't risk the quality of your code for optimizations +that you don't need! diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/prepared-statement.md b/ext/libpqxx-7.7.3/include/pqxx/doc/prepared-statement.md new file mode 100644 index 000000000..5193866a6 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/prepared-statement.md @@ -0,0 +1,125 @@ +Prepared statements {#prepared} +=================== + +Prepared statements are SQL queries that you define once and then invoke +as many times as you like, typically with varying parameters. It's basically +a function that you can define ad hoc. + +If you have an SQL statement that you're going to execute many times in +quick succession, it may be more efficient to prepare it once and reuse it. +This saves the database backend the effort of parsing complex SQL and +figuring out an efficient execution plan. Another nice side effect is that +you don't need to worry about escaping parameters. Some corporate coding +standards require all SQL parameters to be passed in this way, to reduce the +risk of programmer mistakes leaving room for SQL injections. + + +Preparing a statement +--------------------- + +You create a prepared statement by preparing it on the connection (using the +`pqxx::connection::prepare` functions), passing an identifier and its SQL text. + +The identifier is the name by which the prepared statement will be known; it +should consist of ASCII letters, digits, and underscores only, and start with +an ASCII letter. The name is case-sensitive. + +```cxx + void prepare_my_statement(pqxx::connection &c) + { + c.prepare( + "my_statement", + "SELECT * FROM Employee WHERE name = 'Xavier'"); + } +``` + +Once you've done this, you'll be able to call `my_statement` from any +transaction you execute on the same connection. For this, use the +`pqxx::transaction_base::exec_prepared` functions. + +```cxx + pqxx::result execute_my_statement(pqxx::transaction_base &t) + { + return t.exec_prepared("my_statement"); + } +``` + + +Parameters +---------- + +Did I mention that prepared statements can have parameters? The query text +can contain `$1`, `$2` etc. as placeholders for parameter values that you +will provide when you invoke the prepared satement. + +See @ref parameters for more about this. And here's a simple example of +preparing a statement and invoking it with parameters: + +```cxx + void prepare_find(pqxx::connection &c) + { + // Prepare a statement called "find" that looks for employees with a + // given name (parameter 1) whose salary exceeds a given number + // (parameter 2). + c.prepare( + "find", + "SELECT * FROM Employee WHERE name = $1 AND salary > $2"); + } +``` + +This example looks up the prepared statement "find," passes `name` and +`min_salary` as parameters, and invokes the statement with those values: + +```cxx + pqxx::result execute_find( + pqxx::transaction_base &t, std::string name, int min_salary) + { + return t.exec_prepared("find", name, min_salary); + } +``` + + +A special prepared statement +---------------------------- + +There is one special case: the _nameless_ prepared statement. You may prepare +a statement without a name, i.e. whose name is an empty string. The unnamed +statement can be redefined at any time, without un-preparing it first. + + +Performance note +---------------- + +Don't assume that using prepared statements will speed up your application. +There are cases where prepared statements are actually slower than plain SQL. + +The reason is that the backend can often produce a better execution plan when +it knows the statement's actual parameter values. + +For example, say you've got a web application and you're querying for users +with status "inactive" who have email addresses in a given domain name X. If +X is a very popular provider, the best way for the database engine to plan the +query may be to list the inactive users first and then filter for the email +addresses you're looking for. But in other cases, it may be much faster to +find matching email addresses first and then see which of their owners are +"inactive." A prepared statement must be planned to fit either case, but a +direct query will be optimised based on table statistics, partial indexes, etc. + + +Zero bytes +---------- + +@warning Beware of "nul" bytes! + +Any string you pass as a parameter will end at the _first char with value +zero._ If you pass a string that contains a zero byte, the last byte in the +value will be the one just before the zero. + +So, if you need a zero byte in a string, consider that it's really a _binary +string,_ which is not the same thing as a text string. SQL represents binary +data as the `BYTEA` type, or in binary large objects ("blobs"). + +In libpqxx, you represent binary data as a range of `std::byte`. They must be +contiguous in memory, so that libpqxx can pass pointers to the underlying C +library. So you might use `std::basic_string`, or +`std::basic_string_view`, or `std::vector`. diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/streams.md b/ext/libpqxx-7.7.3/include/pqxx/doc/streams.md new file mode 100644 index 000000000..3df4d6126 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/streams.md @@ -0,0 +1,107 @@ +Streams {#streams} +======= + +Most of the time it's fine to retrieve data from the database using `SELECT` +queries, and store data using `INSERT`. But for those cases where efficiency +matters, there are two classes to help you do this better: `stream_from` and +`stream_to`. They're less flexible than SQL queries, and there's the risk of +losing your connection while you're in mid-stream, but you get some speed and +memory efficiencies in return. + +Both stream classes do data conversion for you: `stream_from` receives values +from the database in PostgreSQL's text format, and converts them to the C++ +types you specify. Likewise, `stream_to` converts C++ values you provide to +PostgreSQL's text format for transfer. (On its end, the database of course +converts values to and from their SQL types.) + + +Null values +----------- + +So how do you deal with nulls? It depends on the C++ type you're using. Some +types may have a built-in null value. For instance, if you have a +`char const *` value and you convert it to an SQL string, then converting a +`nullptr` will produce a NULL SQL value. + +But what do you do about C++ types which don't have a built-in null value, such +as `int`? The trick is to wrap it in `std::optional`. The difference between +`int` and `std::optional` is that the former always has an `int` value, +and the latter doesn't have to. + +Actually it's not just `std::optional`. You can do the same thing with +`std::unique_ptr` or `std::shared_ptr`. A smart pointer is less efficient than +`std::optional` in most situations because they allocate their value on the +heap, but sometimes that's what you want in order to save moving or copying +large values around. + +This part is not generic though. It won't work with just any smart-pointer +type, just the ones which are explicitly supported: `shared_ptr` and +`unique_ptr`. If you really need to, you can build support for additional +wrappers and smart pointers by copying the implementation patterns from the +existing smart-pointer support. + + +stream\_from +------------ + +Use `stream_from` to read data directly from the database. It's faster than +the transaction's `exec` functions if the result contains enough rows. But +also, you won't need to keep your full result set in memory. That can really +matter with larger data sets. + +And, you can start processing your data right after the first row of data comes +in from the server. With `exec()` you need to wait to receive all data, and +then you begin processing. With `stream_from` you can be processing data on +the client side while the server is still sending you the rest. + +You don't actually need to create a `stream_from` object yourself, though you +can. Two shorthand functions, @ref pqxx::transaction_base::stream +and @ref pqxx::transaction_base::for_each, can create the streams for you with +a minimum of overhead. + +Not all kinds of queries will work in a stream. Internally the streams make +use of PostgreSQL's `COPY` command, so see the PostgreSQL documentation for +`COPY` for the exact limitations. Basic `SELECT` and `UPDATE ... RETURNING` +queries should just work. + +As you read a row, the stream converts its fields to a tuple type containing +the value types you ask for: + + auto stream pqxx::stream_from::query( + tx, "SELECT name, points FROM score"); + std::tuple row; + while (stream >> row) + process(row); + stream.complete(); + +As the stream reads each row, it converts that row's data into your tuple, +goes through your loop body, and then promptly forgets that row's data. This +means you can easily process more data than will fit in memory. + + +stream\_to +---------- + +Use `stream_to` to write data directly to a database table. This saves you +having to perform an `INSERT` for every row, and so it can be significantly +faster if you want to insert more than just one or two rows at a time. + +As with `stream_from`, you can specify the table and the columns, and not much +else. You insert tuple-like objects of your choice: + + pqxx::stream_to stream{ + tx, + "score", + std::vector{"name", "points"}}; + for (auto const &entry: scores) + stream << entry; + stream.complete(); + +Each row is processed as you provide it, and not retained in memory after that. + +The call to `complete()` is more important here than it is for `stream_from`. +It's a lot like a "commit" or "abort" at the end of a transaction. If you omit +it, it will be done automatically during the stream's destructor. But since +destructors can't throw exceptions, any failures at that stage won't be visible +in your code. So, always call `complete()` on a `stream_to` to close it off +properly! diff --git a/ext/libpqxx-7.7.3/include/pqxx/doc/thread-safety.md b/ext/libpqxx-7.7.3/include/pqxx/doc/thread-safety.md new file mode 100644 index 000000000..07c7f9984 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/doc/thread-safety.md @@ -0,0 +1,29 @@ +Thread safety {#thread-safety} +============= + +This library does not contain any locking code to protect objects against +simultaneous modification in multi-threaded programs. Therefore it is up +to you, the user of the library, to ensure that your threaded client +programs perform no conflicting operations concurrently. + +Most of the time this isn't hard. Result sets are immutable, so you can +share them between threads without problem. The main rule is: + +@li Treat a connection, together with any and all objects related to it, as +a "world" of its own. You should generally make sure that the same "world" +is never accessed by another thread while you're doing anything non-const +in there. + +That means: don't issue a query on a transaction while you're also opening +a subtransaction, don't access a cursor while you may also be committing, +and so on. + +In particular, cursors are tricky. It's easy to perform a non-const +operation without noticing. So, if you're going to share cursors or +cursor-related objects between threads, lock very conservatively! + +Use `pqxx::describe_thread_safety` to find out at runtime what level of +thread safety is implemented in your build and version of libpqxx. It +returns a `pqxx::thread_safety_model` describing what you can and cannot rely +on. A command-line utility `tools/pqxxthreadsafety` prints out the same +information. diff --git a/ext/libpqxx-7.7.3/include/pqxx/errorhandler b/ext/libpqxx-7.7.3/include/pqxx/errorhandler new file mode 100644 index 000000000..ea572ee79 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/errorhandler @@ -0,0 +1,8 @@ +/** pqxx::errorhandler class. + * + * Callbacks for handling errors and warnings. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/errorhandler.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/errorhandler.hxx b/ext/libpqxx-7.7.3/include/pqxx/errorhandler.hxx new file mode 100644 index 000000000..2ffb5703c --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/errorhandler.hxx @@ -0,0 +1,92 @@ +/* Definition of the pqxx::errorhandler class. + * + * pqxx::errorhandler handlers errors and warnings in a database session. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/errorhandler instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ERRORHANDLER +#define PQXX_H_ERRORHANDLER + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/types.hxx" + + +namespace pqxx::internal::gate +{ +class errorhandler_connection; +} + + +namespace pqxx +{ +/** + * @addtogroup errorhandler + */ +//@{ + +/// Base class for error-handler callbacks. +/** To receive errors and warnings from a connection, subclass this with your + * own error-handler functor, and instantiate it for the connection. Destroying + * the handler un-registers it. + * + * A connection can have multiple error handlers at the same time. When the + * database connection emits an error or warning message, it passes the message + * to each error handler, starting with the most recently registered one and + * progressing towards the oldest one. However an error handler may also + * instruct the connection not to pass the message to further handlers by + * returning "false." + * + * @warning Strange things happen when a result object outlives its parent + * connection. If you register an error handler on a connection, then you must + * not access the result after destroying the connection. This applies even if + * you destroy the error handler first! + */ +class PQXX_LIBEXPORT errorhandler +{ +public: + explicit errorhandler(connection &); + virtual ~errorhandler(); + + /// Define in subclass: receive an error or warning message from the + /// database. + /** + * @return Whether the same error message should also be passed to the + * remaining, older errorhandlers. + */ + virtual bool operator()(char const msg[]) noexcept = 0; + + errorhandler() = delete; + errorhandler(errorhandler const &) = delete; + errorhandler &operator=(errorhandler const &) = delete; + +private: + connection *m_home; + + friend class internal::gate::errorhandler_connection; + void unregister() noexcept; +}; + + +/// An error handler that suppresses any previously registered error handlers. +class quiet_errorhandler : public errorhandler +{ +public: + /// Suppress error notices. + quiet_errorhandler(connection &conn) : errorhandler{conn} {} + + /// Revert to previous handling of error notices. + virtual bool operator()(char const[]) noexcept override { return false; } +}; + +//@} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/except b/ext/libpqxx-7.7.3/include/pqxx/except new file mode 100644 index 000000000..e5dd508bf --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/except @@ -0,0 +1,8 @@ +/** libpqxx exception classes. + * + * pqxx::sql_error, pqxx::broken_connection, pqxx::in_doubt_error, ... + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/except.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/except.hxx b/ext/libpqxx-7.7.3/include/pqxx/except.hxx new file mode 100644 index 000000000..24f959437 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/except.hxx @@ -0,0 +1,447 @@ +/* Definition of libpqxx exception classes. + * + * pqxx::sql_error, pqxx::broken_connection, pqxx::in_doubt_error, ... + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/except instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_EXCEPT +#define PQXX_H_EXCEPT + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + + +namespace pqxx +{ +/** + * @addtogroup exception Exception classes + * + * These exception classes follow, roughly, the two-level hierarchy defined by + * the PostgreSQL SQLSTATE error codes (see Appendix A of the PostgreSQL + * documentation corresponding to your server version). This is not a complete + * mapping though. There are other differences as well, e.g. the error code + * for `statement_completion_unknown` has a separate status in libpqxx as + * @ref in_doubt_error, and `too_many_connections` is classified as a + * `broken_connection` rather than a subtype of `insufficient_resources`. + * + * @see http://www.postgresql.org/docs/9.4/interactive/errcodes-appendix.html + * + * @{ + */ + +/// Run-time failure encountered by libpqxx, similar to std::runtime_error. +struct PQXX_LIBEXPORT failure : std::runtime_error +{ + explicit failure(std::string const &); +}; + + +/// Exception class for lost or failed backend connection. +/** + * @warning When this happens on Unix-like systems, you may also get a SIGPIPE + * signal. That signal aborts the program by default, so if you wish to be + * able to continue after a connection breaks, be sure to disarm this signal. + * + * If you're working on a Unix-like system, see the manual page for + * `signal` (2) on how to deal with SIGPIPE. The easiest way to make this + * signal harmless is to make your program ignore it: + * + * ```cxx + * #include + * + * int main() + * { + * signal(SIGPIPE, SIG_IGN); + * // ... + * ``` + */ +struct PQXX_LIBEXPORT broken_connection : failure +{ + broken_connection(); + explicit broken_connection(std::string const &); +}; + + +/// The caller attempted to set a variable to null, which is not allowed. +struct PQXX_LIBEXPORT variable_set_to_null : failure +{ + variable_set_to_null(); + explicit variable_set_to_null(std::string const &); +}; + + +/// Exception class for failed queries. +/** Carries, in addition to a regular error message, a copy of the failed query + * and (if available) the SQLSTATE value accompanying the error. + */ +class PQXX_LIBEXPORT sql_error : public failure +{ + /// Query string. Empty if unknown. + std::string const m_query; + /// SQLSTATE string describing the error type, if known; or empty string. + std::string const m_sqlstate; + +public: + explicit sql_error( + std::string const &whatarg = "", std::string const &Q = "", + char const sqlstate[] = nullptr); + virtual ~sql_error() noexcept override; + + /// The query whose execution triggered the exception + [[nodiscard]] PQXX_PURE std::string const &query() const noexcept; + + /// SQLSTATE error code if known, or empty string otherwise. + [[nodiscard]] PQXX_PURE std::string const &sqlstate() const noexcept; +}; + + +/// "Help, I don't know whether transaction was committed successfully!" +/** Exception that might be thrown in rare cases where the connection to the + * database is lost while finishing a database transaction, and there's no way + * of telling whether it was actually executed by the backend. In this case + * the database is left in an indeterminate (but consistent) state, and only + * manual inspection will tell which is the case. + */ +struct PQXX_LIBEXPORT in_doubt_error : failure +{ + explicit in_doubt_error(std::string const &); +}; + + +/// The backend saw itself forced to roll back the ongoing transaction. +struct PQXX_LIBEXPORT transaction_rollback : sql_error +{ + explicit transaction_rollback( + std::string const &whatarg, std::string const &q = "", + char const sqlstate[] = nullptr); +}; + + +/// Transaction failed to serialize. Please retry it. +/** Can only happen at transaction isolation levels REPEATABLE READ and + * SERIALIZABLE. + * + * The current transaction cannot be committed without violating the guarantees + * made by its isolation level. This is the effect of a conflict with another + * ongoing transaction. The transaction may still succeed if you try to + * perform it again. + */ +struct PQXX_LIBEXPORT serialization_failure : transaction_rollback +{ + explicit serialization_failure( + std::string const &whatarg, std::string const &q, + char const sqlstate[] = nullptr); +}; + + +/// We can't tell whether our last statement succeeded. +struct PQXX_LIBEXPORT statement_completion_unknown : transaction_rollback +{ + explicit statement_completion_unknown( + std::string const &whatarg, std::string const &q, + char const sqlstate[] = nullptr); +}; + + +/// The ongoing transaction has deadlocked. Retrying it may help. +struct PQXX_LIBEXPORT deadlock_detected : transaction_rollback +{ + explicit deadlock_detected( + std::string const &whatarg, std::string const &q, + char const sqlstate[] = nullptr); +}; + + +/// Internal error in libpqxx library +struct PQXX_LIBEXPORT internal_error : std::logic_error +{ + explicit internal_error(std::string const &); +}; + + +/// Error in usage of libpqxx library, similar to std::logic_error +struct PQXX_LIBEXPORT usage_error : std::logic_error +{ + explicit usage_error(std::string const &); +}; + + +/// Invalid argument passed to libpqxx, similar to std::invalid_argument +struct PQXX_LIBEXPORT argument_error : std::invalid_argument +{ + explicit argument_error(std::string const &); +}; + + +/// Value conversion failed, e.g. when converting "Hello" to int. +struct PQXX_LIBEXPORT conversion_error : std::domain_error +{ + explicit conversion_error(std::string const &); +}; + + +/// Could not convert value to string: not enough buffer space. +struct PQXX_LIBEXPORT conversion_overrun : conversion_error +{ + explicit conversion_overrun(std::string const &); +}; + + +/// Something is out of range, similar to std::out_of_range +struct PQXX_LIBEXPORT range_error : std::out_of_range +{ + explicit range_error(std::string const &); +}; + + +/// Query returned an unexpected number of rows. +struct PQXX_LIBEXPORT unexpected_rows : public range_error +{ + explicit unexpected_rows(std::string const &msg) : range_error{msg} {} +}; + + +/// Database feature not supported in current setup. +struct PQXX_LIBEXPORT feature_not_supported : sql_error +{ + explicit feature_not_supported( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +/// Error in data provided to SQL statement. +struct PQXX_LIBEXPORT data_exception : sql_error +{ + explicit data_exception( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT integrity_constraint_violation : sql_error +{ + explicit integrity_constraint_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT restrict_violation : integrity_constraint_violation +{ + explicit restrict_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT not_null_violation : integrity_constraint_violation +{ + explicit not_null_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT foreign_key_violation : integrity_constraint_violation +{ + explicit foreign_key_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT unique_violation : integrity_constraint_violation +{ + explicit unique_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT check_violation : integrity_constraint_violation +{ + explicit check_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT invalid_cursor_state : sql_error +{ + explicit invalid_cursor_state( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT invalid_sql_statement_name : sql_error +{ + explicit invalid_sql_statement_name( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT invalid_cursor_name : sql_error +{ + explicit invalid_cursor_name( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT syntax_error : sql_error +{ + /// Approximate position in string where error occurred, or -1 if unknown. + int const error_position; + + explicit syntax_error( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr, int pos = -1) : + sql_error{err, Q, sqlstate}, error_position{pos} + {} +}; + +struct PQXX_LIBEXPORT undefined_column : syntax_error +{ + explicit undefined_column( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + syntax_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT undefined_function : syntax_error +{ + explicit undefined_function( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + syntax_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT undefined_table : syntax_error +{ + explicit undefined_table( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + syntax_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT insufficient_privilege : sql_error +{ + explicit insufficient_privilege( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +/// Resource shortage on the server +struct PQXX_LIBEXPORT insufficient_resources : sql_error +{ + explicit insufficient_resources( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT disk_full : insufficient_resources +{ + explicit disk_full( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + insufficient_resources{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT out_of_memory : insufficient_resources +{ + explicit out_of_memory( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + insufficient_resources{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT too_many_connections : broken_connection +{ + explicit too_many_connections(std::string const &err) : + broken_connection{err} + {} +}; + +/// PL/pgSQL error +/** Exceptions derived from this class are errors from PL/pgSQL procedures. + */ +struct PQXX_LIBEXPORT plpgsql_error : sql_error +{ + explicit plpgsql_error( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +/// Exception raised in PL/pgSQL procedure +struct PQXX_LIBEXPORT plpgsql_raise : plpgsql_error +{ + explicit plpgsql_raise( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + plpgsql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT plpgsql_no_data_found : plpgsql_error +{ + explicit plpgsql_no_data_found( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + plpgsql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT plpgsql_too_many_rows : plpgsql_error +{ + explicit plpgsql_too_many_rows( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + plpgsql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT blob_already_exists : failure +{ + explicit blob_already_exists(std::string const &); +}; + +/** + * @} + */ +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/field b/ext/libpqxx-7.7.3/include/pqxx/field new file mode 100644 index 000000000..37cb69e84 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/field @@ -0,0 +1,8 @@ +/** pqxx::field class. + * + * pqxx::field refers to a field in a query result. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/field.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/field.hxx b/ext/libpqxx-7.7.3/include/pqxx/field.hxx new file mode 100644 index 000000000..b8b869fe4 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/field.hxx @@ -0,0 +1,542 @@ +/* Definitions for the pqxx::field class. + * + * pqxx::field refers to a field in a query result. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/field instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_FIELD +#define PQXX_H_FIELD + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/array.hxx" +#include "pqxx/composite.hxx" +#include "pqxx/result.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/types.hxx" + +namespace pqxx +{ +/// Reference to a field in a result set. +/** A field represents one entry in a row. It represents an actual value + * in the result set, and can be converted to various types. + */ +class PQXX_LIBEXPORT field +{ +public: + using size_type = field_size_type; + + /// Constructor. Do not call this yourself; libpqxx will do it for you. + /** Create field as reference to a field in a result set. + * @param r Row that this field is part of. + * @param c Column number of this field. + */ + [[deprecated( + "Do not construct fields yourself. Get them from the row.")]] field(row const &r, row_size_type c) noexcept; + + /// Constructor. Do not call this yourself; libpqxx will do it for you. + [[deprecated( + "Do not construct fields yourself. Get them from the " + "row.")]] field() noexcept = default; + + /** + * @name Comparison + */ + //@{ + // TODO: noexcept. Breaks ABI. + /// Byte-by-byte comparison of two fields (all nulls are considered equal) + /** @warning null handling is still open to discussion and change! + * + * Handling of null values differs from that in SQL where a comparison + * involving a null value yields null, so nulls are never considered equal + * to one another or even to themselves. + * + * Null handling also probably differs from the closest equivalent in C++, + * which is the NaN (Not-a-Number) value, a singularity comparable to + * SQL's null. This is because the builtin == operator demands that a == a. + * + * The usefulness of this operator is questionable. No interpretation + * whatsoever is imposed on the data; 0 and 0.0 are considered different, + * as are null vs. the empty string, or even different (but possibly + * equivalent and equally valid) encodings of the same Unicode character + * etc. + */ + [[nodiscard]] PQXX_PURE bool operator==(field const &) const; + + /// Byte-by-byte comparison (all nulls are considered equal) + /** @warning See operator==() for important information about this operator + */ + [[nodiscard]] PQXX_PURE bool operator!=(field const &rhs) const noexcept + { + return not operator==(rhs); + } + //@} + + /** + * @name Column information + */ + //@{ + /// Column name. + [[nodiscard]] PQXX_PURE char const *name() const &; + + /// Column type. + [[nodiscard]] oid PQXX_PURE type() const; + + /// What table did this column come from? + [[nodiscard]] PQXX_PURE oid table() const; + + /// Return row number. The first row is row 0, the second is row 1, etc. + PQXX_PURE constexpr row_size_type num() const noexcept { return col(); } + + /// What column number in its originating table did this column come from? + [[nodiscard]] PQXX_PURE row_size_type table_column() const; + //@} + + /** + * @name Content access + */ + //@{ + /// Read as `string_view`, or an empty one if null. + /** The result only remains usable while the data for the underlying + * @ref result exists. Once all `result` objects referring to that data have + * been destroyed, the `string_view` will no longer point to valid memory. + */ + [[nodiscard]] PQXX_PURE std::string_view view() const & + { + return std::string_view(c_str(), size()); + } + + /// Read as plain C string. + /** Since the field's data is stored internally in the form of a + * zero-terminated C string, this is the fastest way to read it. Use the + * to() or as() functions to convert the string to other types such as + * `int`, or to C++ strings. + * + * Do not use this for BYTEA values, or other binary values. To read those, + * convert the value to your desired type using `to()` or `as()`. For + * example: `f.as>()`. + */ + [[nodiscard]] PQXX_PURE char const *c_str() const &; + + /// Is this field's value null? + [[nodiscard]] PQXX_PURE bool is_null() const noexcept; + + /// Return number of bytes taken up by the field's value. + [[nodiscard]] PQXX_PURE size_type size() const noexcept; + + /// Read value into obj; or if null, leave obj untouched and return `false`. + /** This can be used with optional types (except pointers other than C-style + * strings). + */ + template + auto to(T &obj) const -> typename std::enable_if_t< + (not std::is_pointer::value or std::is_same::value), + bool> + { + if (is_null()) + { + return false; + } + else + { + auto const bytes{c_str()}; + from_string(bytes, obj); + return true; + } + } + + /// Read field as a composite value, write its components into `fields`. + /** @warning This is still experimental. It may change or be replaced. + * + * Returns whether the field was null. If it was, it will not touch the + * values in `fields`. + */ + template bool composite_to(T &...fields) const + { + if (is_null()) + { + return false; + } + else + { + parse_composite(m_home.m_encoding, view(), fields...); + return true; + } + } + + /// Read value into obj; or leave obj untouched and return `false` if null. + template bool operator>>(T &obj) const { return to(obj); } + + /// Read value into obj; or if null, use default value and return `false`. + /** This can be used with `std::optional`, as well as with standard smart + * pointer types, but not with raw pointers. If the conversion from a + * PostgreSQL string representation allocates a pointer (e.g. using `new`), + * then the object's later deallocation should be baked in as well, right + * from the point where the object is created. So if you want a pointer, use + * a smart pointer, not a raw pointer. + * + * There is one exception, of course: C-style strings. Those are just + * pointers to the field's internal text data. + */ + template + auto to(T &obj, T const &default_value) const -> typename std::enable_if_t< + (not std::is_pointer::value or std::is_same::value), + bool> + { + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = from_string(this->view()); + return not null; + } + + /// Return value as object of given type, or default value if null. + /** Note that unless the function is instantiated with an explicit template + * argument, the Default value's type also determines the result type. + */ + template T as(T const &default_value) const + { + if (is_null()) + return default_value; + else + return from_string(this->view()); + } + + /// Return value as object of given type, or throw exception if null. + /** Use as `as>()` or `as()` as + * an alternative to `get()`; this is disabled for use with raw pointers + * (other than C-strings) because storage for the value can't safely be + * allocated here + */ + template T as() const + { + if (is_null()) + { + if constexpr (not nullness::has_null) + internal::throw_null_conversion(type_name); + else + return nullness::null(); + } + else + { + return from_string(this->view()); + } + } + + /// Return value wrapped in some optional type (empty for nulls). + /** Use as `get()` as before to obtain previous behavior, or specify + * container type with `get()` + */ + template class O = std::optional> + constexpr O get() const + { + return as>(); + } + + // TODO: constexpr noexcept, once array_parser constructor gets those. + /// Parse the field as an SQL array. + /** Call the parser to retrieve values (and structure) from the array. + * + * Make sure the @ref result object stays alive until parsing is finished. If + * you keep the @ref row of `field` object alive, it will keep the @ref + * result object alive as well. + */ + array_parser as_array() const & + { + return array_parser{c_str(), m_home.m_encoding}; + } + //@} + + +protected: + constexpr result const &home() const noexcept { return m_home; } + constexpr result::size_type idx() const noexcept { return m_row; } + constexpr row_size_type col() const noexcept { return m_col; } + + // TODO: Create gates. + friend class pqxx::result; + friend class pqxx::row; + field( + result const &r, result_size_type row_num, row_size_type col_num) noexcept + : + m_col{col_num}, m_home{r}, m_row{row_num} + {} + + /** + * You'd expect this to be unsigned, but due to the way reverse iterators + * are related to regular iterators, it must be allowed to underflow to -1. + */ + row_size_type m_col; + +private: + result m_home; + result::size_type m_row; +}; + + +template<> inline bool field::to(std::string &obj) const +{ + bool const null{is_null()}; + if (not null) + obj = std::string{view()}; + return not null; +} + + +template<> +inline bool field::to( + std::string &obj, std::string const &default_value) const +{ + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = std::string{view()}; + return not null; +} + + +/// Specialization: `to(char const *&)`. +/** The buffer has the same lifetime as the data in this result (i.e. of this + * result object, or the last remaining one copied from it etc.), so take care + * not to use it after the last result object referring to this query result is + * destroyed. + */ +template<> inline bool field::to(char const *&obj) const +{ + bool const null{is_null()}; + if (not null) + obj = c_str(); + return not null; +} + + +template<> inline bool field::to(std::string_view &obj) const +{ + bool const null{is_null()}; + if (not null) + obj = view(); + return not null; +} + + +template<> +inline bool field::to( + std::string_view &obj, std::string_view const &default_value) const +{ + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = view(); + return not null; +} + + +template<> inline std::string_view field::as() const +{ + if (is_null()) + PQXX_UNLIKELY + internal::throw_null_conversion(type_name); + return view(); +} + + +template<> +inline std::string_view +field::as(std::string_view const &default_value) const +{ + return is_null() ? default_value : view(); +} + + +template<> inline bool field::to(zview &obj) const +{ + bool const null{is_null()}; + if (not null) + obj = zview{c_str(), size()}; + return not null; +} + + +template<> +inline bool field::to(zview &obj, zview const &default_value) const +{ + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = zview{c_str(), size()}; + return not null; +} + + +template<> inline zview field::as() const +{ + if (is_null()) + PQXX_UNLIKELY + internal::throw_null_conversion(type_name); + return zview{c_str(), size()}; +} + + +template<> inline zview field::as(zview const &default_value) const +{ + return is_null() ? default_value : zview{c_str(), size()}; +} + + +template> +class field_streambuf : public std::basic_streambuf +{ +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + using openmode = std::ios::openmode; + using seekdir = std::ios::seekdir; + + explicit field_streambuf(field const &f) : m_field{f} { initialize(); } + +protected: + virtual int sync() override { return traits_type::eof(); } + + virtual pos_type seekoff(off_type, seekdir, openmode) override + { + return traits_type::eof(); + } + virtual pos_type seekpos(pos_type, openmode) override + { + return traits_type::eof(); + } + virtual int_type overflow(int_type) override { return traits_type::eof(); } + virtual int_type underflow() override { return traits_type::eof(); } + +private: + field const &m_field; + + int_type initialize() + { + auto g{static_cast(const_cast(m_field.c_str()))}; + this->setg(g, g, g + std::size(m_field)); + return int_type(std::size(m_field)); + } +}; + + +/// Input stream that gets its data from a result field +/** Use this class exactly as you would any other istream to read data from a + * field. All formatting and streaming operations of `std::istream` are + * supported. What you'll typically want to use, however, is the fieldstream + * alias (which defines a @ref basic_fieldstream for `char`). This is similar + * to how e.g. `std::ifstream` relates to `std::basic_ifstream`. + * + * This class has only been tested for the char type (and its default traits). + */ +template> +class basic_fieldstream : public std::basic_istream +{ + using super = std::basic_istream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + + basic_fieldstream(field const &f) : super{nullptr}, m_buf{f} + { + super::init(&m_buf); + } + +private: + field_streambuf m_buf; +}; + +using fieldstream = basic_fieldstream; + +/// Write a result field to any type of stream +/** This can be convenient when writing a field to an output stream. More + * importantly, it lets you write a field to e.g. a `stringstream` which you + * can then use to read, format and convert the field in ways that to() does + * not support. + * + * Example: parse a field into a variable of the nonstandard + * "long long" type. + * + * ```cxx + * extern result R; + * long long L; + * stringstream S; + * + * // Write field's string into S + * S << R[0][0]; + * + * // Parse contents of S into L + * S >> L; + * ``` + */ +template +inline std::basic_ostream & +operator<<(std::basic_ostream &s, field const &value) +{ + s.write(value.c_str(), std::streamsize(std::size(value))); + return s; +} + + +/// Convert a field's value to type `T`. +/** Unlike the "regular" `from_string`, this knows how to deal with null + * values. + */ +template inline T from_string(field const &value) +{ + if (value.is_null()) + { + if constexpr (nullness::has_null) + return nullness::null(); + else + internal::throw_null_conversion(type_name); + } + else + { + return from_string(value.view()); + } +} + + +/// Convert a field's value to `nullptr_t`. +/** Yes, you read that right. This conversion does nothing useful. It always + * returns `nullptr`. + * + * Except... what if the field is not null? In that case, this throws + * @ref conversion_error. + */ +template<> +inline std::nullptr_t from_string(field const &value) +{ + if (not value.is_null()) + throw conversion_error{ + "Extracting non-null field into nullptr_t variable."}; + return nullptr; +} + + +/// Convert a field to a string. +template<> PQXX_LIBEXPORT std::string to_string(field const &value); +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/array-composite.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/array-composite.hxx new file mode 100644 index 000000000..d2b6603e5 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/array-composite.hxx @@ -0,0 +1,305 @@ +#if !defined(PQXX_ARRAY_COMPOSITE_HXX) +# define PQXX_ARRAY_COMPOSITE_HXX + +# include + +# include "pqxx/strconv.hxx" + +namespace pqxx::internal +{ +// Find the end of a double-quoted string. +/** `input[pos]` must be the opening double quote. + * + * Returns the offset of the first position after the closing quote. + */ +inline std::size_t scan_double_quoted_string( + char const input[], std::size_t size, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + // XXX: find_char<'"', '\\'>(). + auto next{scan(input, size, pos)}; + bool at_quote{false}; + for (pos = next, next = scan(input, size, pos); pos < size; + pos = next, next = scan(input, size, pos)) + { + if (at_quote) + { + if (next - pos == 1 and input[pos] == '"') + { + // We just read a pair of double quotes. Carry on. + at_quote = false; + } + else + { + // We just read one double quote, and now we're at a character that's + // not a second double quote. Ergo, that last character was the + // closing double quote and this is the position right after it. + return pos; + } + } + else if (next - pos == 1) + { + switch (input[pos]) + { + case '\\': + // Backslash escape. Skip ahead by one more character. + pos = next; + next = scan(input, size, pos); + break; + + case '"': + // This is either the closing double quote, or the first of a pair of + // double quotes. + at_quote = true; + break; + } + } + else + { + // Multibyte character. Carry on. + } + } + if (not at_quote) + throw argument_error{ + "Missing closing double-quote: " + std::string{input}}; + return pos; +} + + +/// Un-quote and un-escape a double-quoted SQL string. +inline std::string parse_double_quoted_string( + char const input[], std::size_t end, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + std::string output; + // Maximum output size is same as the input size, minus the opening and + // closing quotes. Or in the extreme opposite case, the real number could be + // half that. Usually it'll be a pretty close estimate. + output.reserve(std::size_t(end - pos - 2)); + + for (auto here{scan(input, end, pos)}, next{scan(input, end, here)}; + here < end - 1; here = next, next = scan(input, end, here)) + { + // A backslash here is always an escape. So is a double-quote, since we're + // inside the double-quoted string. In either case, we can just ignore the + // escape character and use the next character. This is the one redeeming + // feature of SQL's escaping system. + if ((next - here == 1) and (input[here] == '\\' or input[here] == '"')) + { + // Skip escape. + here = next; + next = scan(input, end, here); + } + output.append(input + here, input + next); + } + return output; +} + + +/// Find the end of an unquoted string in an array or composite-type value. +/** Stops when it gets to the end of the input; or when it sees any of the + * characters in STOP which has not been escaped. + * + * For array values, STOP is a comma, a semicolon, or a closing brace. For + * a value of a composite type, STOP is a comma or a closing parenthesis. + */ +template +inline std::size_t scan_unquoted_string( + char const input[], std::size_t size, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + bool at_backslash{false}; + auto next{scan(input, size, pos)}; + while ((pos < size) and + ((next - pos) > 1 or at_backslash or ((input[pos] != STOP) and ...))) + { + pos = next; + next = scan(input, size, pos); + at_backslash = + ((not at_backslash) and ((next - pos) == 1) and (input[pos] == '\\')); + } + return pos; +} + + +/// Parse an unquoted array entry or cfield of a composite-type field. +inline std::string parse_unquoted_string( + char const input[], std::size_t end, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + std::string output; + bool at_backslash{false}; + output.reserve(end - pos); + for (auto next{scan(input, end, pos)}; pos < end; + pos = next, next = scan(input, end, pos)) + { + at_backslash = + ((not at_backslash) and ((next - pos) == 1) and (input[pos] == '\\')); + if (not at_backslash) + output.append(input + pos, next - pos); + } + return output; +} + + +/// Parse a field of a composite-type value. +/** `T` is the C++ type of the field we're parsing, and `index` is its + * zero-based number. + * + * Strip off the leading parenthesis or bracket yourself before parsing. + * However, this function will parse the lcosing parenthesis or bracket. + * + * After a successful parse, `pos` will point at `std::end(text)`. + * + * For the purposes of parsing, ranges and arrays count as compositve values, + * so this function supports parsing those. If you specifically need a closing + * parenthesis, check afterwards that `text` did not end in a bracket instead. + * + * @param index Index of the current field, zero-based. It will increment for + * the next field. + * @param input Full input text for the entire composite-type value. + * @param pos Starting position (in `input`) of the field that we're parsing. + * After parsing, this will point at the beginning of the next field if + * there is one, or one position past the last character otherwise. + * @param field Destination for the parsed value. + * @param scan Glyph scanning function for the relevant encoding type. + * @param last_field Number of the last field in the value (zero-based). When + * parsing the last field, this will equal `index`. + */ +template +inline void parse_composite_field( + std::size_t &index, std::string_view input, std::size_t &pos, T &field, + glyph_scanner_func *scan, std::size_t last_field) +{ + assert(index <= last_field); + auto next{scan(std::data(input), std::size(input), pos)}; + if ((next - pos) != 1) + throw conversion_error{"Non-ASCII character in composite-type syntax."}; + + // Expect a field. + switch (input[pos]) + { + case ',': + case ')': + case ']': + // The field is empty, i.e, null. + if constexpr (nullness::has_null) + field = nullness::null(); + else + throw conversion_error{ + "Can't read composite field " + to_string(index) + ": C++ type " + + type_name + " does not support nulls."}; + break; + + case '"': { + auto const stop{scan_double_quoted_string( + std::data(input), std::size(input), pos, scan)}; + auto const text{ + parse_double_quoted_string(std::data(input), stop, pos, scan)}; + field = from_string(text); + pos = stop; + } + break; + + default: { + auto const stop{scan_unquoted_string<',', ')', ']'>( + std::data(input), std::size(input), pos, scan)}; + auto const text{parse_unquoted_string(std::data(input), stop, pos, scan)}; + field = from_string(text); + pos = stop; + } + break; + } + + // Expect a comma or a closing parenthesis. + next = scan(std::data(input), std::size(input), pos); + + if ((next - pos) != 1) + throw conversion_error{ + "Unexpected non-ASCII character after composite field: " + + std::string{input}}; + + if (index < last_field) + { + if (input[pos] != ',') + throw conversion_error{ + "Found '" + std::string{input[pos]} + + "' in composite value where comma was expected: " + std::data(input)}; + } + else + { + if (input[pos] == ',') + throw conversion_error{ + "Composite value contained more fields than the expected " + + to_string(last_field) + ": " + std::data(input)}; + if (input[pos] != ')' and input[pos] != ']') + throw conversion_error{ + "Composite value has unexpected characters where closing parenthesis " + "was expected: " + + std::string{input}}; + if (next != std::size(input)) + throw conversion_error{ + "Composite value has unexpected text after closing parenthesis: " + + std::string{input}}; + } + + pos = next; + ++index; +} + + +/// Conservatively estimate buffer size needed for a composite field. +template +inline std::size_t size_composite_field_buffer(T const &field) +{ + if constexpr (is_unquoted_safe) + { + // Safe to copy, without quotes or escaping. Drop the terminating zero. + return size_buffer(field) - 1; + } + else + { + // + Opening quote. + // + Field budget. + // - Terminating zero. + // + Escaping for each byte in the field's string representation. + // - Escaping for terminating zero. + // + Closing quote. + return 1 + 2 * (size_buffer(field) - 1) + 1; + } +} + + +template +inline void write_composite_field(char *&pos, char *end, T const &field) +{ + if constexpr (is_unquoted_safe) + { + // No need for quoting or escaping. Convert it straight into its final + // place in the buffer, and "backspace" the trailing zero. + pos = string_traits::into_buf(pos, end, field) - 1; + } + else + { + // The field may need escaping, which means we need an intermediate buffer. + // To avoid allocating that at run time, we use the end of the buffer that + // we have. + auto const budget{size_buffer(field)}; + *pos++ = '"'; + + // Now escape buf into its final position. + for (char const c : string_traits::to_buf(end - budget, end, field)) + { + if ((c == '"') or (c == '\\')) + *pos++ = '\\'; + + *pos++ = c; + } + + *pos++ = '"'; + } + + *pos++ = ','; +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/callgate.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/callgate.hxx new file mode 100644 index 000000000..42f7703e3 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/callgate.hxx @@ -0,0 +1,70 @@ +#ifndef PQXX_H_CALLGATE +#define PQXX_H_CALLGATE + +/* +Here's what a typical gate class definition looks like: + +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE @gateclass@ : callgate<@host@> +{ + friend class @client@; + + @gateclass@(reference x) : super(x) {} + + // Methods here. Use home() to access the host-class object. +}; +} // namespace pqxx::internal::gate +*/ + +namespace pqxx::internal +{ +/// Base class for call gates. +/** + * A call gate defines a limited, private interface on the host class that + * specified client classes can access. + * + * The metaphor works as follows: the gate stands in front of a "home," which + * is really a class, and only lets specific friends in. + * + * To implement a call gate that gives client C access to host H, + * * derive a gate class from callgate; + * * make the gate class a friend of H; + * * make C a friend of the gate class; and + * * implement "stuff C can do with H" as private members in the gate class. + * + * This special kind of "gated" friendship gives C private access to H, but + * only through an expressly limited interface. The gate class can access its + * host object as home(). + * + * Keep gate classes entirely stateless. They should be ultra-lightweight + * wrappers for their host classes, and be optimized away as much as possible + * by the compiler. Once you start adding state, you're on a slippery slope + * away from the pure, clean, limited interface pattern that gate classes are + * meant to implement. + * + * Ideally, all member functions of the gate class should be one-liners passing + * calls straight on to the host class. It can be useful however to break this + * rule temporarily during inter-class refactoring. + */ +template class PQXX_PRIVATE callgate +{ +protected: + /// This class, to keep constructors easy. + using super = callgate; + /// A reference to the host class. Helps keep constructors easy. + using reference = HOME &; + + callgate(reference x) : m_home(x) {} + + /// The home object. The gate class has full "private" access. + reference home() const noexcept { return m_home; } + +private: + reference m_home; +}; +} // namespace pqxx::internal + +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/concat.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/concat.hxx new file mode 100644 index 000000000..cd28bde7c --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/concat.hxx @@ -0,0 +1,45 @@ +#if !defined(PQXX_CONCAT_HXX) +# define PQXX_CONCAT_HXX + +# include +# include + +# include "pqxx/strconv.hxx" + +namespace pqxx::internal +{ +/// Convert item to a string, write it into [here, end). +template +void render_item(TYPE const &item, char *&here, char *end) +{ + here = string_traits::into_buf(here, end, item) - 1; +} + + +// C++20: Support non-random_access_range ranges. +/// Efficiently combine a bunch of items into one big string. +/** Use this as an optimised version of string concatentation. It takes just + * about any type; it will represent each item as a string according to its + * @ref string_traits. + * + * This is a simpler, more specialised version of @ref separated_list for a + * statically known series of items, possibly of different types. + */ +template +[[nodiscard]] inline std::string concat(TYPE... item) +{ + std::string buf; + // Size to accommodate string representations of all inputs, minus their + // terminating zero bytes. + buf.resize(size_buffer(item...)); + + char *const data{buf.data()}; + char *here = data; + char *end = data + std::size(buf); + (render_item(item, here, end), ...); + + buf.resize(static_cast(here - data)); + return buf; +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/conversions.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/conversions.hxx new file mode 100644 index 000000000..1df4fdead --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/conversions.hxx @@ -0,0 +1,1188 @@ +#include +#include +#include +#include +#include +#include + +#if defined(PQXX_HAVE_SPAN) && __has_include() +# include +#endif + +#include +#include +#include + +#include "pqxx/types.hxx" +#include "pqxx/util.hxx" + + +/* Internal helpers for string conversion, and conversion implementations. + * + * Do not include this header directly. The libpqxx headers do it for you. + */ +namespace pqxx::internal +{ +/// Convert a number in [0, 9] to its ASCII digit. +inline constexpr char number_to_digit(int i) noexcept +{ + return static_cast(i + '0'); +} + + +/// Compute numeric value of given textual digit (assuming that it is a digit). +constexpr int digit_to_number(char c) noexcept +{ + return c - '0'; +} + + +/// Summarize buffer overrun. +/** Don't worry about the exact parameter types: the sizes will be reasonably + * small, and nonnegative. + */ +std::string PQXX_LIBEXPORT +state_buffer_overrun(int have_bytes, int need_bytes); + + +template +inline std::string state_buffer_overrun(HAVE have_bytes, NEED need_bytes) +{ + return state_buffer_overrun( + static_cast(have_bytes), static_cast(need_bytes)); +} + + +/// Throw exception for attempt to convert null to given type. +[[noreturn]] PQXX_LIBEXPORT void +throw_null_conversion(std::string const &type); + + +/// Deliberately nonfunctional conversion traits for `char` types. +/** There are no string conversions for `char` and its signed and unsigned + * variants. Such a conversion would be dangerously ambiguous: should we treat + * it as text, or as a small integer? It'd be an open invitation for bugs. + * + * But the error message when you get this wrong is very cryptic. So, we + * derive dummy @ref string_traits implementations from this dummy type, and + * ensure that the compiler disallows their use. The compiler error message + * will at least contain a hint of the root of the problem. + */ +template struct disallowed_ambiguous_char_conversion +{ + static char *into_buf(char *, char *, CHAR_TYPE) = delete; + static constexpr zview + to_buf(char *, char *, CHAR_TYPE const &) noexcept = delete; + + static constexpr std::size_t + size_buffer(CHAR_TYPE const &) noexcept = delete; + static CHAR_TYPE from_string(std::string_view) = delete; +}; + + +template PQXX_LIBEXPORT extern std::string to_string_float(T); + + +/// Generic implementation for into_buf, on top of to_buf. +template +inline char *generic_into_buf(char *begin, char *end, T const &value) +{ + zview const text{string_traits::to_buf(begin, end, value)}; + auto const space{end - begin}; + // Include the trailing zero. + auto const len = std::size(text) + 1; + if (internal::cmp_greater(len, space)) + throw conversion_overrun{ + "Not enough buffer space to insert " + type_name + ". " + + state_buffer_overrun(space, len)}; + std::memmove(begin, text.data(), len); + return begin + len; +} + + +/// String traits for builtin integral types (though not bool). +template struct integral_traits +{ + static PQXX_LIBEXPORT T from_string(std::string_view text); + static PQXX_LIBEXPORT zview to_buf(char *begin, char *end, T const &value); + static PQXX_LIBEXPORT char *into_buf(char *begin, char *end, T const &value); + + static constexpr std::size_t size_buffer(T const &) noexcept + { + /** Includes a sign if needed; the number of base-10 digits which the type + * can reliably represent; the one extra base-10 digit which the type can + * only partially represent; and the terminating zero. + */ + return std::is_signed_v + std::numeric_limits::digits10 + 1 + 1; + } +}; + + +/// String traits for builtin floating-point types. +template struct float_traits +{ + static PQXX_LIBEXPORT T from_string(std::string_view text); + static PQXX_LIBEXPORT zview to_buf(char *begin, char *end, T const &value); + static PQXX_LIBEXPORT char *into_buf(char *begin, char *end, T const &value); + + // Return a nonnegative integral value's number of decimal digits. + static constexpr std::size_t digits10(std::size_t value) noexcept + { + if (value < 10) + return 1; + else + return 1 + digits10(value / 10); + } + + static constexpr std::size_t size_buffer(T const &) noexcept + { + using lims = std::numeric_limits; + // See #328 for a detailed discussion on the maximum number of digits. + // + // In a nutshell: for the big cases, the scientific notation is always + // the shortest one, and therefore the one that to_chars will pick. + // + // So... How long can the scientific notation get? 1 (for sign) + 1 (for + // decimal point) + 1 (for 'e') + 1 (for exponent sign) + max_digits10 + + // max number of digits in the exponent + 1 (terminating zero). + // + // What's the max number of digits in the exponent? It's the max number of + // digits out of the most negative exponent and the most positive one. + // + // The longest positive exponent is easy: 1 + ceil(log10(max_exponent10)). + // (The extra 1 is because 10^n takes up 1 + n digits, not n.) + // + // The longest negative exponent is a bit harder: min_exponent10 gives us + // the smallest power of 10 which a normalised version of T can represent. + // But the smallest denormalised power of 10 that T can represent is + // another max_digits10 powers of 10 below that. + // needs a minus sign. + // + // All this stuff messes with my head a bit because it's on the order of + // log10(log10(n)). It's easy to get the number of logs wrong. + auto const max_pos_exp{digits10(lims::max_exponent10)}; + // Really want std::abs(lims::min_exponent10), but MSVC 2017 apparently has + // problems with std::abs. So we use -lims::min_exponent10 instead. + auto const max_neg_exp{ + digits10(lims::max_digits10 - lims::min_exponent10)}; + return 1 + // Sign. + 1 + // Decimal point. + std::numeric_limits::max_digits10 + // Mantissa digits. + 1 + // Exponent "e". + 1 + // Exponent sign. + // Spell this weirdly to stop Windows compilers from reading this as + // a call to their "max" macro when NOMINMAX is not defined. + (std::max)(max_pos_exp, max_neg_exp) + // Exponent digits. + 1; // Terminating zero. + } +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/// The built-in arithmetic types do not have inherent null values. +template +struct nullness>> : no_null +{}; + + +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits + : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits + : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::float_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::float_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits : internal::float_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; + + +template<> struct string_traits +{ + static PQXX_LIBEXPORT bool from_string(std::string_view text); + + static constexpr zview to_buf(char *, char *, bool const &value) noexcept + { + return value ? "true"_zv : "false"_zv; + } + + static char *into_buf(char *begin, char *end, bool const &value) + { + return pqxx::internal::generic_into_buf(begin, end, value); + } + + static constexpr std::size_t size_buffer(bool const &) noexcept { return 6; } +}; + + +/// We don't support conversion to/from `char` types. +/** Why are these disallowed? Because they are ambiguous. It's not inherently + * clear whether we should treat values of these types as text or as small + * integers. Either choice would lead to bugs. + */ +template<> +struct string_traits + : internal::disallowed_ambiguous_char_conversion +{}; + +/// We don't support conversion to/from `char` types. +/** Why are these disallowed? Because they are ambiguous. It's not inherently + * clear whether we should treat values of these types as text or as small + * integers. Either choice would lead to bugs. + */ +template<> +struct string_traits + : internal::disallowed_ambiguous_char_conversion +{}; + +/// We don't support conversion to/from `char` types. +/** Why are these disallowed? Because they are ambiguous. It's not inherently + * clear whether we should treat values of these types as text or as small + * integers. Either choice would lead to bugs. + */ +template<> +struct string_traits + : internal::disallowed_ambiguous_char_conversion +{}; + + +template<> inline constexpr bool is_unquoted_safe{true}; + + +template struct nullness> +{ + static constexpr bool has_null = true; + /// Technically, you could have an optional of an always-null type. + static constexpr bool always_null = nullness::always_null; + static constexpr bool is_null(std::optional const &v) noexcept + { + return ((not v.has_value()) or pqxx::is_null(*v)); + } + static constexpr std::optional null() { return {}; } +}; + + +template +inline constexpr format param_format(std::optional const &value) +{ + return param_format(*value); +} + + +template struct string_traits> +{ + static char *into_buf(char *begin, char *end, std::optional const &value) + { + return string_traits::into_buf(begin, end, *value); + } + + static zview to_buf(char *begin, char *end, std::optional const &value) + { + if (value.has_value()) + return string_traits::to_buf(begin, end, *value); + else + return {}; + } + + static std::optional from_string(std::string_view text) + { + return std::optional{ + std::in_place, string_traits::from_string(text)}; + } + + static std::size_t size_buffer(std::optional const &value) noexcept + { + return pqxx::size_buffer(value.value()); + } +}; + + +template +inline constexpr bool is_unquoted_safe>{is_unquoted_safe}; + + +template struct nullness> +{ + static constexpr bool has_null = (nullness::has_null or ...); + static constexpr bool always_null = (nullness::always_null and ...); + static constexpr bool is_null(std::variant const &value) noexcept + { + return std::visit( + [](auto const &i) noexcept { + return nullness>::is_null(i); + }, + value); + } + + // We don't support `null()` for `std::variant`. + /** It would be technically possible to have a `null` in the case where just + * one of the types has a null, but it gets complicated and arbitrary. + */ + static constexpr std::variant null() = delete; +}; + + +template struct string_traits> +{ + static char * + into_buf(char *begin, char *end, std::variant const &value) + { + return std::visit( + [begin, end](auto const &i) { + return string_traits>::into_buf(begin, end, i); + }, + value); + } + static zview to_buf(char *begin, char *end, std::variant const &value) + { + return std::visit( + [begin, end](auto const &i) { + return string_traits>::to_buf(begin, end, i); + }, + value); + } + static std::size_t size_buffer(std::variant const &value) noexcept + { + return std::visit( + [](auto const &i) noexcept { return pqxx::size_buffer(i); }, value); + } + + /** There's no from_string for std::variant. We could have one with a rule + * like "pick the first type which fits the value," but we'd have to look + * into how natural that API feels to users. + */ + static std::variant from_string(std::string_view) = delete; +}; + + +template +inline constexpr format param_format(std::variant const &value) +{ + return std::visit([](auto &v) { return param_format(v); }, value); +} + + +template +inline constexpr bool is_unquoted_safe>{ + (is_unquoted_safe and ...)}; + + +template inline T from_string(std::stringstream const &text) +{ + return from_string(text.str()); +} + + +template<> struct string_traits +{ + static char *into_buf(char *, char *, std::nullptr_t) = delete; + + static constexpr zview + to_buf(char *, char *, std::nullptr_t const &) noexcept + { + return {}; + } + + static constexpr std::size_t size_buffer(std::nullptr_t = nullptr) noexcept + { + return 0; + } + static std::nullptr_t from_string(std::string_view) = delete; +}; + + +template<> struct string_traits +{ + static char *into_buf(char *, char *, std::nullopt_t) = delete; + + static constexpr zview + to_buf(char *, char *, std::nullopt_t const &) noexcept + { + return {}; + } + + static constexpr std::size_t size_buffer(std::nullopt_t) noexcept + { + return 0; + } + static std::nullopt_t from_string(std::string_view) = delete; +}; + + +template<> struct string_traits +{ + static char *into_buf(char *, char *, std::monostate) = delete; + + static constexpr zview + to_buf(char *, char *, std::monostate const &) noexcept + { + return {}; + } + + static constexpr std::size_t size_buffer(std::monostate) noexcept + { + return 0; + } + static std::monostate from_string(std::string_view) = delete; +}; + + +template<> inline constexpr bool is_unquoted_safe{true}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(char const *t) noexcept + { + return t == nullptr; + } + static constexpr char const *null() noexcept { return nullptr; } +}; + + +/// String traits for C-style string ("pointer to char const"). +template<> struct string_traits +{ + static char const *from_string(std::string_view text) { return text.data(); } + + static zview to_buf(char *begin, char *end, char const *const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, char const *const &value) + { + auto const space{end - begin}; + // Count the trailing zero, even though std::strlen() and friends don't. + auto const len{std::strlen(value) + 1}; + if (space < ptrdiff_t(len)) + throw conversion_overrun{ + "Could not copy string: buffer too small. " + + pqxx::internal::state_buffer_overrun(space, len)}; + std::memmove(begin, value, len); + return begin + len; + } + + static std::size_t size_buffer(char const *const &value) noexcept + { + return std::strlen(value) + 1; + } +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(char const *t) noexcept + { + return t == nullptr; + } + static constexpr char const *null() { return nullptr; } +}; + + +/// String traits for non-const C-style string ("pointer to char"). +template<> struct string_traits +{ + static char *into_buf(char *begin, char *end, char *const &value) + { + return string_traits::into_buf(begin, end, value); + } + static zview to_buf(char *begin, char *end, char *const &value) + { + return string_traits::to_buf(begin, end, value); + } + static std::size_t size_buffer(char *const &value) noexcept + { + return string_traits::size_buffer(value); + } + + /// Don't allow conversion to this type since it breaks const-safety. + static char *from_string(std::string_view) = delete; +}; + + +template struct nullness : no_null +{}; + + +/// String traits for C-style string constant ("array of char"). +/** @warning This assumes that every array-of-char is a C-style string literal. + * So, it must include a trailing zero. and it must have static duration. + */ +template struct string_traits +{ + static constexpr zview + to_buf(char *, char *, char const (&value)[N]) noexcept + { + return zview{value, N - 1}; + } + + static char *into_buf(char *begin, char *end, char const (&value)[N]) + { + if (internal::cmp_less(end - begin, size_buffer(value))) + throw conversion_overrun{ + "Could not convert char[] to string: too long for buffer."}; + std::memcpy(begin, value, N); + return begin + N; + } + static constexpr std::size_t size_buffer(char const (&)[N]) noexcept + { + return N; + } + + /// Don't allow conversion to this type. + static void from_string(std::string_view) = delete; +}; + + +template<> struct nullness : no_null +{}; + + +template<> struct string_traits +{ + static std::string from_string(std::string_view text) + { + return std::string{text}; + } + + static char *into_buf(char *begin, char *end, std::string const &value) + { + if (internal::cmp_greater_equal(std::size(value), end - begin)) + throw conversion_overrun{ + "Could not convert string to string: too long for buffer."}; + // Include the trailing zero. + value.copy(begin, std::size(value)); + begin[std::size(value)] = '\0'; + return begin + std::size(value) + 1; + } + + static zview to_buf(char *begin, char *end, std::string const &value) + { + return generic_to_buf(begin, end, value); + } + + static std::size_t size_buffer(std::string const &value) noexcept + { + return std::size(value) + 1; + } +}; + + +/// There's no real null for `std::string_view`. +/** I'm not sure how clear-cut this is: a `string_view` may have a null + * data pointer, which is analogous to a null `char` pointer. + */ +template<> struct nullness : no_null +{}; + + +/// String traits for `string_view`. +template<> struct string_traits +{ + static constexpr std::size_t + size_buffer(std::string_view const &value) noexcept + { + return std::size(value) + 1; + } + + static char *into_buf(char *begin, char *end, std::string_view const &value) + { + if (internal::cmp_greater_equal(std::size(value), end - begin)) + throw conversion_overrun{ + "Could not store string_view: too long for buffer."}; + value.copy(begin, std::size(value)); + begin[std::size(value)] = '\0'; + return begin + std::size(value) + 1; + } + + /// Don't convert to this type; it has nowhere to store its contents. + static std::string_view from_string(std::string_view) = delete; +}; + + +template<> struct nullness : no_null +{}; + + +/// String traits for `zview`. +template<> struct string_traits +{ + static constexpr std::size_t + size_buffer(std::string_view const &value) noexcept + { + return std::size(value) + 1; + } + + static char *into_buf(char *begin, char *end, zview const &value) + { + auto const size{std::size(value)}; + if (internal::cmp_less_equal(end - begin, std::size(value))) + throw conversion_overrun{"Not enough buffer space to store this zview."}; + value.copy(begin, size); + begin[size] = '\0'; + return begin + size + 1; + } + + static std::string_view to_buf(char *begin, char *end, zview const &value) + { + return {into_buf(begin, end, value), std::size(value)}; + } + + /// Don't convert to this type; it has nowhere to store its contents. + static zview from_string(std::string_view) = delete; +}; + + +template<> struct nullness : no_null +{}; + + +template<> struct string_traits +{ + static std::size_t size_buffer(std::stringstream const &) = delete; + + static std::stringstream from_string(std::string_view text) + { + std::stringstream stream; + stream.write(text.data(), std::streamsize(std::size(text))); + return stream; + } + + static char *into_buf(char *, char *, std::stringstream const &) = delete; + static std::string_view + to_buf(char *, char *, std::stringstream const &) = delete; +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = true; + static constexpr bool is_null(std::nullptr_t const &) noexcept + { + return true; + } + static constexpr std::nullptr_t null() noexcept { return nullptr; } +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = true; + static constexpr bool is_null(std::nullopt_t const &) noexcept + { + return true; + } + static constexpr std::nullopt_t null() noexcept { return std::nullopt; } +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = true; + static constexpr bool is_null(std::monostate const &) noexcept + { + return true; + } + static constexpr std::monostate null() noexcept { return {}; } +}; + + +template struct nullness> +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(std::unique_ptr const &t) noexcept + { + return not t or pqxx::is_null(*t); + } + static constexpr std::unique_ptr null() { return {}; } +}; + + +template +struct string_traits> +{ + static std::unique_ptr from_string(std::string_view text) + { + return std::make_unique(string_traits::from_string(text)); + } + + static char * + into_buf(char *begin, char *end, std::unique_ptr const &value) + { + return string_traits::into_buf(begin, end, *value); + } + + static zview + to_buf(char *begin, char *end, std::unique_ptr const &value) + { + if (value) + return string_traits::to_buf(begin, end, *value); + else + return {}; + } + + static std::size_t + size_buffer(std::unique_ptr const &value) noexcept + { + return pqxx::size_buffer(*value.get()); + } +}; + + +template +inline format param_format(std::unique_ptr const &value) +{ + return param_format(*value); +} + + +template +inline constexpr bool is_unquoted_safe>{ + is_unquoted_safe}; + + +template struct nullness> +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(std::shared_ptr const &t) noexcept + { + return not t or pqxx::is_null(*t); + } + static constexpr std::shared_ptr null() { return {}; } +}; + + +template struct string_traits> +{ + static std::shared_ptr from_string(std::string_view text) + { + return std::make_shared(string_traits::from_string(text)); + } + + static zview to_buf(char *begin, char *end, std::shared_ptr const &value) + { + return string_traits::to_buf(begin, end, *value); + } + static char * + into_buf(char *begin, char *end, std::shared_ptr const &value) + { + return string_traits::into_buf(begin, end, *value); + } + static std::size_t size_buffer(std::shared_ptr const &value) noexcept + { + return pqxx::size_buffer(*value); + } +}; + + +template format param_format(std::shared_ptr const &value) +{ + return param_format(*value); +} + + +template +inline constexpr bool is_unquoted_safe>{ + is_unquoted_safe}; + + +template<> +struct nullness> + : no_null> +{}; + + +#if defined(PQXX_HAVE_CONCEPTS) +template struct nullness : no_null +{}; + + +template inline constexpr format param_format(DATA const &) +{ + return format::binary; +} + + +template struct string_traits +{ + static std::size_t size_buffer(DATA const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview to_buf(char *begin, char *end, DATA const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, DATA const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + internal::esc_bin(value, begin); + return begin + budget; + } + + static DATA from_string(std::string_view text) + { + auto const size{pqxx::internal::size_unesc_bin(std::size(text))}; + std::basic_string buf; + buf.resize(size); + pqxx::internal::unesc_bin(text, reinterpret_cast(buf.data())); + return buf; + } +}; +#endif // PQXX_HAVE_CONCEPTS + + +template<> struct string_traits> +{ + static std::size_t + size_buffer(std::basic_string const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview + to_buf(char *begin, char *end, std::basic_string const &value) + { + return generic_to_buf(begin, end, value); + } + + static char * + into_buf(char *begin, char *end, std::basic_string const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + internal::esc_bin(value, begin); + return begin + budget; + } + + static std::basic_string from_string(std::string_view text) + { + auto const size{pqxx::internal::size_unesc_bin(std::size(text))}; + std::basic_string buf; + buf.resize(size); + pqxx::internal::unesc_bin(text, reinterpret_cast(buf.data())); + return buf; + } +}; + + +template<> +inline constexpr format param_format(std::basic_string const &) +{ + return format::binary; +} + + +template<> +struct nullness> + : no_null> +{}; + + +template<> struct string_traits> +{ + static std::size_t + size_buffer(std::basic_string_view const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview to_buf( + char *begin, char *end, std::basic_string_view const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf( + char *begin, char *end, std::basic_string_view const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + internal::esc_bin(value, begin); + return begin + budget; + } + + // There's no from_string, because there's nobody to hold the data. +}; + +template<> +inline constexpr format param_format(std::basic_string_view const &) +{ + return format::binary; +} +} // namespace pqxx + + +namespace pqxx::internal +{ +/// String traits for SQL arrays. +template struct array_string_traits +{ +private: + using elt_type = strip_t>; + using elt_traits = string_traits; + static constexpr zview s_null{"NULL"}; + +public: + static zview to_buf(char *begin, char *end, Container const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, Container const &value) + { + std::size_t const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to convert array to string."}; + + char *here = begin; + *here++ = '{'; + + bool nonempty{false}; + for (auto const &elt : value) + { + if (is_null(elt)) + { + s_null.copy(here, std::size(s_null)); + here += std::size(s_null); + } + else if constexpr (is_sql_array) + { + // Render nested array in-place. Then erase the trailing zero. + here = elt_traits::into_buf(here, end, elt) - 1; + } + else if constexpr (is_unquoted_safe) + { + // No need to quote or escape. Just convert the value straight into + // its place in the array, and "backspace" the trailing zero. + here = elt_traits::into_buf(here, end, elt) - 1; + } + else + { + *here++ = '"'; + + // Use the tail end of the destination buffer as an intermediate + // buffer. + auto const elt_budget{pqxx::size_buffer(elt)}; + for (char const c : elt_traits::to_buf(end - elt_budget, end, elt)) + { + if (c == '\\' or c == '"') + *here++ = '\\'; + *here++ = c; + } + *here++ = '"'; + } + *here++ = array_separator; + nonempty = true; + } + + // Erase that last comma, if present. + if (nonempty) + here--; + + *here++ = '}'; + *here++ = '\0'; + + return here; + } + + static std::size_t size_buffer(Container const &value) noexcept + { + if constexpr (is_unquoted_safe) + return 3 + std::accumulate( + std::begin(value), std::end(value), std::size_t{}, + [](std::size_t acc, elt_type const &elt) { + return acc + + (pqxx::is_null(elt) ? + std::size(s_null) : + elt_traits::size_buffer(elt)) - + 1; + }); + else + return 3 + std::accumulate( + std::begin(value), std::end(value), std::size_t{}, + [](std::size_t acc, elt_type const &elt) { + // Opening and closing quotes, plus worst-case escaping, + // but don't count the trailing zeroes. + std::size_t const elt_size{ + pqxx::is_null(elt) ? std::size(s_null) : + elt_traits::size_buffer(elt) - 1}; + return acc + 2 * elt_size + 2; + }); + } + + // We don't yet support parsing of array types using from_string. Doing so + // would require a reference to the connection. +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +template +struct nullness> : no_null> +{}; + + +template +struct string_traits> + : internal::array_string_traits> +{}; + + +/// We don't know how to pass array params in binary format, so pass as text. +template +inline constexpr format param_format(std::vector const &) +{ + return format::text; +} + + +/// A `std::vector` is a binary string. Other vectors are not. +template +inline constexpr format param_format(std::vector const &) +{ + return format::binary; +} + + +template inline constexpr bool is_sql_array>{true}; + + +template +struct nullness> : no_null> +{}; + + +template +struct string_traits> + : internal::array_string_traits> +{}; + + +/// We don't know how to pass array params in binary format, so pass as text. +template +inline constexpr format param_format(std::array const &) +{ + return format::text; +} + + +/// An array of `std::byte` is a binary string. +template +inline constexpr format param_format(std::array const &) +{ + return format::binary; +} + + +template +inline constexpr bool is_sql_array>{true}; +} // namespace pqxx + + +namespace pqxx +{ +template inline std::string to_string(T const &value) +{ + if (is_null(value)) + throw conversion_error{ + "Attempt to convert null " + type_name + " to a string."}; + + std::string buf; + // We can't just reserve() space; modifying the terminating zero leads to + // undefined behaviour. + buf.resize(size_buffer(value)); + auto const data{buf.data()}; + auto const end{ + string_traits::into_buf(data, data + std::size(buf), value)}; + buf.resize(static_cast(end - data - 1)); + return buf; +} + + +template<> inline std::string to_string(float const &value) +{ + return internal::to_string_float(value); +} +template<> inline std::string to_string(double const &value) +{ + return internal::to_string_float(value); +} +template<> inline std::string to_string(long double const &value) +{ + return internal::to_string_float(value); +} +template<> inline std::string to_string(std::stringstream const &value) +{ + return value.str(); +} + + +template inline void into_string(T const &value, std::string &out) +{ + if (is_null(value)) + throw conversion_error{ + "Attempt to convert null " + type_name + " to a string."}; + + // We can't just reserve() data; modifying the terminating zero leads to + // undefined behaviour. + out.resize(size_buffer(value) + 1); + auto const data{out.data()}; + auto const end{ + string_traits::into_buf(data, data + std::size(out), value)}; + out.resize(static_cast(end - data - 1)); +} +} // namespace pqxx diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/encoding_group.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/encoding_group.hxx new file mode 100644 index 000000000..e17736e5b --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/encoding_group.hxx @@ -0,0 +1,60 @@ +/** Enum type for supporting encodings in libpqxx + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ENCODING_GROUP +#define PQXX_H_ENCODING_GROUP + +#include + +namespace pqxx::internal +{ +// Types of encodings supported by PostgreSQL, see +// https://www.postgresql.org/docs/current/static/multibyte.html#CHARSET-TABLE +enum class encoding_group +{ + // Handles all single-byte fixed-width encodings + MONOBYTE, + + // Multibyte encodings. + // Many of these can embed ASCII-like bytes inside multibyte characters, + // notably Big5, SJIS, SHIFT_JIS_2004, GP18030, GBK, JOHAB, UHC. + BIG5, + EUC_CN, + // TODO: Merge EUC_JP and EUC_JIS_2004? + EUC_JP, + EUC_JIS_2004, + EUC_KR, + EUC_TW, + GB18030, + GBK, + JOHAB, + MULE_INTERNAL, + // TODO: Merge SJIS and SHIFT_JIS_2004? + SJIS, + SHIFT_JIS_2004, + UHC, + UTF8, +}; + + +// TODO:: Can we just use string_view now? +/// Function type: "find the end of the current glyph." +/** This type of function takes a text buffer, and a location in that buffer, + * and returns the location one byte past the end of the current glyph. + * + * The start offset marks the beginning of the current glyph. It must fall + * within the buffer. + * + * There are multiple different glyph scanner implementations, for different + * kinds of encodings. + */ +using glyph_scanner_func = + std::size_t(char const buffer[], std::size_t buffer_len, std::size_t start); +} // namespace pqxx::internal + +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/encodings.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/encodings.hxx new file mode 100644 index 000000000..ba7fecc70 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/encodings.hxx @@ -0,0 +1,90 @@ +/** Internal string encodings support for libpqxx + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ENCODINGS +#define PQXX_H_ENCODINGS + +#include "pqxx/internal/encoding_group.hxx" + +#include +#include + + +namespace pqxx::internal +{ +char const *name_encoding(int encoding_id); + +/// Convert libpq encoding enum or encoding name to its libpqxx group. +encoding_group enc_group(int /* libpq encoding ID */); +encoding_group enc_group(std::string_view); + + +/// Look up the glyph scanner function for a given encoding group. +/** To identify the glyph boundaries in a buffer, call this to obtain the + * scanner function appropriate for the buffer's encoding. Then, repeatedly + * call the scanner function to find the glyphs. + */ +PQXX_LIBEXPORT glyph_scanner_func *get_glyph_scanner(encoding_group); + + +// TODO: For ASCII search, treat UTF8/EUC_*/MULE_INTERNAL as MONOBYTE. + +/// Find any of the ASCII characters `NEEDLE` in `haystack`. +/** Scans through `haystack` until it finds a single-byte character that + * matches any value in `NEEDLE`. + * + * If it finds one, returns its offset. If not, returns the end of the + * haystack. + */ +template +inline std::size_t find_char( + glyph_scanner_func *scanner, std::string_view haystack, + std::size_t here = 0u) +{ + auto const sz{std::size(haystack)}; + auto const data{std::data(haystack)}; + while (here < sz) + { + auto next{scanner(data, sz, here)}; + // (For some reason gcc had a problem with a right-fold here. But clang + // was fine.) + if ((... or (data[here] == NEEDLE))) + { + // Also check against a multibyte character starting with a bytes which + // just happens to match one of the ASCII bytes we're looking for. It'd + // be cleaner to check that first, but either works. So, let's apply the + // most selective filter first and skip this check in almost all cases. + if (next == here + 1) + return here; + } + + // Nope, no hit. Move on. + here = next; + } + return sz; +} + + +/// Iterate over the glyphs in a buffer. +/** Scans the glyphs in the buffer, and for each, passes its begin and its + * one-past-end pointers to `callback`. + */ +template +inline void for_glyphs( + encoding_group enc, CALLABLE callback, char const buffer[], + std::size_t buffer_len, std::size_t start = 0) +{ + auto const scan{get_glyph_scanner(enc)}; + for (std::size_t here = start, next; here < buffer_len; here = next) + { + next = scan(buffer, buffer_len, here); + callback(buffer + here, buffer + next); + } +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-errorhandler.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-errorhandler.hxx new file mode 100644 index 000000000..ffc12a6cf --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-errorhandler.hxx @@ -0,0 +1,26 @@ +#include + +namespace pqxx +{ +class connection; +class errorhandler; +} // namespace pqxx + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_errorhandler : callgate +{ + friend class pqxx::errorhandler; + + connection_errorhandler(reference x) : super(x) {} + + void register_errorhandler(errorhandler *h) + { + home().register_errorhandler(h); + } + void unregister_errorhandler(errorhandler *h) + { + home().unregister_errorhandler(h); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-largeobject.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-largeobject.hxx new file mode 100644 index 000000000..49feaf9e6 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-largeobject.hxx @@ -0,0 +1,35 @@ +#include + +#include +#include + +namespace pqxx +{ +class blob; +class largeobject; +} // namespace pqxx + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_largeobject : callgate +{ + friend class pqxx::blob; + friend class pqxx::largeobject; + + connection_largeobject(reference x) : super(x) {} + + pq::PGconn *raw_connection() const { return home().raw_connection(); } +}; + + +class PQXX_PRIVATE const_connection_largeobject : callgate +{ + friend class pqxx::blob; + friend class pqxx::largeobject; + + const_connection_largeobject(reference x) : super(x) {} + + std::string error_message() const { return home().err_msg(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-notification_receiver.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-notification_receiver.hxx new file mode 100644 index 000000000..0bcb2db17 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-notification_receiver.hxx @@ -0,0 +1,29 @@ +#include + +#include "pqxx/connection.hxx" + + +namespace pqxx +{ +class notification_receiver; +} + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_notification_receiver : callgate +{ + friend class pqxx::notification_receiver; + + connection_notification_receiver(reference x) : super(x) {} + + void add_receiver(notification_receiver *receiver) + { + home().add_receiver(receiver); + } + void remove_receiver(notification_receiver *receiver) noexcept + { + home().remove_receiver(receiver); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-pipeline.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-pipeline.hxx new file mode 100644 index 000000000..c6ae6e17a --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-pipeline.hxx @@ -0,0 +1,23 @@ +#include "pqxx/internal/libpq-forward.hxx" +#include + +#include "pqxx/pipeline.hxx" + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_pipeline : callgate +{ + friend class pqxx::pipeline; + + connection_pipeline(reference x) : super(x) {} + + void start_exec(char const query[]) { home().start_exec(query); } + pqxx::internal::pq::PGresult *get_result() { return home().get_result(); } + void cancel_query() { home().cancel_query(); } + + bool consume_input() noexcept { return home().consume_input(); } + bool is_busy() const noexcept { return home().is_busy(); } + + int encoding_id() { return home().encoding_id(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-sql_cursor.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-sql_cursor.hxx new file mode 100644 index 000000000..51a889844 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-sql_cursor.hxx @@ -0,0 +1,19 @@ +#include + +namespace pqxx::internal +{ +class sql_cursor; +} + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_sql_cursor : callgate +{ + friend class pqxx::internal::sql_cursor; + + connection_sql_cursor(reference x) : super(x) {} + + result exec(char const query[]) { return home().exec(query); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-stream_from.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-stream_from.hxx new file mode 100644 index 000000000..8961e7146 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-stream_from.hxx @@ -0,0 +1,15 @@ +#include + +#include "pqxx/connection.hxx" + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_stream_from : callgate +{ + friend class pqxx::stream_from; + + connection_stream_from(reference x) : super{x} {} + + auto read_copy_line() { return home().read_copy_line(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-stream_to.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-stream_to.hxx new file mode 100644 index 000000000..a6974fb21 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-stream_to.hxx @@ -0,0 +1,17 @@ +#include + +#include "pqxx/stream_to.hxx" + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_stream_to : callgate +{ + friend class pqxx::stream_to; + + connection_stream_to(reference x) : super(x) {} + + void write_copy_line(std::string_view line) { home().write_copy_line(line); } + void end_copy_write() { home().end_copy_write(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-transaction.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-transaction.hxx new file mode 100644 index 000000000..74d659253 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/connection-transaction.hxx @@ -0,0 +1,44 @@ +#include + +namespace pqxx +{ +class connection; +} + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_transaction : callgate +{ + friend class pqxx::transaction_base; + + connection_transaction(reference x) : super(x) {} + + template result exec(STRING query, std::string_view desc) + { + return home().exec(query, desc); + } + + void register_transaction(transaction_base *t) + { + home().register_transaction(t); + } + void unregister_transaction(transaction_base *t) noexcept + { + home().unregister_transaction(t); + } + + auto read_copy_line() { return home().read_copy_line(); } + void write_copy_line(std::string_view line) { home().write_copy_line(line); } + void end_copy_write() { home().end_copy_write(); } + + result exec_prepared(zview statement, internal::c_params const &args) + { + return home().exec_prepared(statement, args); + } + + result exec_params(zview query, internal::c_params const &args) + { + return home().exec_params(query, args); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/errorhandler-connection.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/errorhandler-connection.hxx new file mode 100644 index 000000000..5560cedec --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/errorhandler-connection.hxx @@ -0,0 +1,13 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE errorhandler_connection : callgate +{ + friend class pqxx::connection; + + errorhandler_connection(reference x) : super(x) {} + + void unregister() noexcept { home().unregister(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/icursor_iterator-icursorstream.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/icursor_iterator-icursorstream.hxx new file mode 100644 index 000000000..296d22145 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/icursor_iterator-icursorstream.hxx @@ -0,0 +1,24 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE icursor_iterator_icursorstream : callgate +{ + friend class pqxx::icursorstream; + + icursor_iterator_icursorstream(reference x) : super(x) {} + + icursor_iterator::difference_type pos() const noexcept + { + return home().pos(); + } + + icursor_iterator *get_prev() { return home().m_prev; } + void set_prev(icursor_iterator *i) { home().m_prev = i; } + + icursor_iterator *get_next() { return home().m_next; } + void set_next(icursor_iterator *i) { home().m_next = i; } + + void fill(result const &r) { home().fill(r); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/icursorstream-icursor_iterator.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/icursorstream-icursor_iterator.hxx new file mode 100644 index 000000000..56056d5ef --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/icursorstream-icursor_iterator.hxx @@ -0,0 +1,32 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE icursorstream_icursor_iterator : callgate +{ + friend class pqxx::icursor_iterator; + + icursorstream_icursor_iterator(reference x) : super(x) {} + + void insert_iterator(icursor_iterator *i) noexcept + { + home().insert_iterator(i); + } + + void remove_iterator(icursor_iterator *i) const noexcept + { + home().remove_iterator(i); + } + + icursorstream::size_type forward() { return home().forward(); } + icursorstream::size_type forward(icursorstream::size_type n) + { + return home().forward(n); + } + + void service_iterators(icursorstream::difference_type p) + { + home().service_iterators(p); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-connection.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-connection.hxx new file mode 100644 index 000000000..daa0808c0 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-connection.hxx @@ -0,0 +1,14 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_connection : callgate +{ + friend class pqxx::connection; + + result_connection(reference x) : super(x) {} + + operator bool() const { return bool(home()); } + bool operator!() const { return not home(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-creation.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-creation.hxx new file mode 100644 index 000000000..3d9205f2c --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-creation.hxx @@ -0,0 +1,24 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_creation : callgate +{ + friend class pqxx::connection; + friend class pqxx::pipeline; + + result_creation(reference x) : super(x) {} + + static result create( + internal::pq::PGresult *rhs, std::shared_ptr const &query, + encoding_group enc) + { + return result(rhs, query, enc); + } + + void check_status(std::string_view desc = ""sv) const + { + return home().check_status(desc); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-pipeline.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-pipeline.hxx new file mode 100644 index 000000000..3ebe436d2 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-pipeline.hxx @@ -0,0 +1,16 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_pipeline : callgate +{ + friend class pqxx::pipeline; + + result_pipeline(reference x) : super(x) {} + + std::shared_ptr query_ptr() const + { + return home().query_ptr(); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-sql_cursor.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-sql_cursor.hxx new file mode 100644 index 000000000..78b450739 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/result-sql_cursor.hxx @@ -0,0 +1,13 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_sql_cursor : callgate +{ + friend class pqxx::internal::sql_cursor; + + result_sql_cursor(reference x) : super(x) {} + + char const *cmd_status() const noexcept { return home().cmd_status(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/transaction-sql_cursor.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/transaction-sql_cursor.hxx new file mode 100644 index 000000000..4ed78dc93 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/transaction-sql_cursor.hxx @@ -0,0 +1,10 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE transaction_sql_cursor : callgate +{ + friend class pqxx::internal::sql_cursor; + transaction_sql_cursor(reference x) : super(x) {} +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/gates/transaction-transaction_focus.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/transaction-transaction_focus.hxx new file mode 100644 index 000000000..ca7939a99 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/gates/transaction-transaction_focus.hxx @@ -0,0 +1,30 @@ +#include + +#include "pqxx/transaction_base.hxx" + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE transaction_transaction_focus : callgate +{ + friend class pqxx::transaction_focus; + + transaction_transaction_focus(reference x) : super(x) {} + + void register_focus(transaction_focus *focus) + { + home().register_focus(focus); + } + void unregister_focus(transaction_focus *focus) noexcept + { + home().unregister_focus(focus); + } + void register_pending_error(zview error) + { + home().register_pending_error(error); + } + void register_pending_error(std::string &&error) + { + home().register_pending_error(std::move(error)); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/header-post.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/header-post.hxx new file mode 100644 index 000000000..ff6bf8986 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/header-post.hxx @@ -0,0 +1,22 @@ +/* Compiler deficiency workarounds for compiling libpqxx headers. + * + * To be included at the end of each libpqxx header, in order to restore the + * client program's settings. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +// NO GUARDS HERE! This code should be executed every time! + +#if defined(_MSC_VER) +# pragma warning(pop) // Restore compiler's warning state +#endif + +#if !defined(PQXX_HEADER_PRE) +# error "Include pqxx/internal/header-post.hxx AFTER its 'pre' counterpart." +#endif + +#undef PQXX_HEADER_PRE diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/header-pre.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/header-pre.hxx new file mode 100644 index 000000000..abc1a398d --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/header-pre.hxx @@ -0,0 +1,169 @@ +/* Compiler settings for compiling libpqxx headers, and workarounds for all. + * + * Include this before including any other libpqxx headers from within libpqxx. + * And to balance it out, also include header-post.hxx at the end of the batch + * of headers. + * + * The public libpqxx headers (e.g. ``) include this already; + * there's no need to do this from within an application. + * + * Include this file at the highest aggregation level possible to avoid nesting + * and to keep things simple. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ + +// NO GUARD HERE! This part should be included every time this file is. +#if defined(_MSC_VER) + +// Save compiler's warning state, and set warning level 4 for maximum +// sensitivity to warnings. +# pragma warning(push, 4) + +// Visual C++ generates some entirely unreasonable warnings. Disable them. +// Copy constructor could not be generated. +# pragma warning(disable : 4511) +// Assignment operator could not be generated. +# pragma warning(disable : 4512) +// Can't expose outside classes without exporting them. Except the MSVC docs +// say please ignore the warning if it's a standard library class. +# pragma warning(disable : 4251) +// Can't derive library classes from outside classes without exporting them. +// Except the MSVC docs say please ignore the warning if the parent class is +// in the standard library. +# pragma warning(disable : 4275) +// Can't inherit from non-exported class. +# pragma warning(disable : 4275) + +#endif // _MSC_VER + + +#if defined(PQXX_HEADER_PRE) +# error "Avoid nesting #include of pqxx/internal/header-pre.hxx." +#endif + +#define PQXX_HEADER_PRE + + +// Workarounds & definitions that need to be included even in library's headers +#include "pqxx/config-public-compiler.h" + +// Enable ISO-646 alternative operaotr representations: "and" instead of "&&" +// etc. on older compilers. C++20 removes this header. +#if __has_include() +# include +#endif + + +#if defined(PQXX_HAVE_GCC_PURE) +/// Declare function "pure": no side effects, only reads globals and its args. +# define PQXX_PURE __attribute__((pure)) +#else +# define PQXX_PURE /* pure */ +#endif + + +#if defined(__GNUC__) +/// Tell the compiler to optimise a function for size, not speed. +# define PQXX_COLD __attribute__((cold)) +#else +# define PQXX_COLD /* cold */ +#endif + + +// Workarounds for Windows +#ifdef _WIN32 + +/* For now, export DLL symbols if _DLL is defined. This is done automatically + * by the compiler when linking to the dynamic version of the runtime library, + * according to "gzh" + */ +# if defined(PQXX_SHARED) && !defined(PQXX_LIBEXPORT) +# define PQXX_LIBEXPORT __declspec(dllimport) +# endif // PQXX_SHARED && !PQXX_LIBEXPORT + + +// Workarounds for Microsoft Visual C++ +# ifdef _MSC_VER + +// Suppress vtables on abstract classes. +# define PQXX_NOVTABLE __declspec(novtable) + +// Automatically link with the appropriate libpq (static or dynamic, debug or +// release). The default is to use the release DLL. Define PQXX_PQ_STATIC to +// link to a static version of libpq, and _DEBUG to link to a debug version. +// The two may be combined. +# if defined(PQXX_AUTOLINK) +# if defined(PQXX_PQ_STATIC) +# ifdef _DEBUG +# pragma comment(lib, "libpqd") +# else +# pragma comment(lib, "libpq") +# endif +# else +# ifdef _DEBUG +# pragma comment(lib, "libpqddll") +# else +# pragma comment(lib, "libpqdll") +# endif +# endif +# endif + +// If we're not compiling libpqxx itself, automatically link with the +// appropriate libpqxx library. To link with the libpqxx DLL, define +// PQXX_SHARED; the default is to link with the static library. A static link +// is the recommended practice. +// +// The preprocessor macro PQXX_INTERNAL is used to detect whether we +// are compiling the libpqxx library itself. When you compile the library +// yourself using your own project file, make sure to include this macro. +# if defined(PQXX_AUTOLINK) && !defined(PQXX_INTERNAL) +# ifdef PQXX_SHARED +# ifdef _DEBUG +# pragma comment(lib, "libpqxxD") +# else +# pragma comment(lib, "libpqxx") +# endif +# else // !PQXX_SHARED +# ifdef _DEBUG +# pragma comment(lib, "libpqxx_staticD") +# else +# pragma comment(lib, "libpqxx_static") +# endif +# endif +# endif + +# endif // _MSC_VER + +#elif defined(PQXX_HAVE_GCC_VISIBILITY) // !_WIN32 + +# define PQXX_LIBEXPORT __attribute__((visibility("default"))) +# define PQXX_PRIVATE __attribute__((visibility("hidden"))) + +#endif // PQXX_HAVE_GCC_VISIBILITY + + +#ifndef PQXX_LIBEXPORT +# define PQXX_LIBEXPORT /* libexport */ +#endif + +#ifndef PQXX_PRIVATE +# define PQXX_PRIVATE /* private */ +#endif + +#ifndef PQXX_NOVTABLE +# define PQXX_NOVTABLE /* novtable */ +#endif + +// C++20: Assume support. +#if defined(PQXX_HAVE_LIKELY) +# define PQXX_LIKELY [[likely]] +# define PQXX_UNLIKELY [[unlikely]] +#else +# define PQXX_LIKELY /* [[likely]] */ +# define PQXX_UNLIKELY /* [[unlikely]] */ +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/ignore-deprecated-post.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/ignore-deprecated-post.hxx new file mode 100644 index 000000000..cebcf0594 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/ignore-deprecated-post.hxx @@ -0,0 +1,15 @@ +/// End a code block started by "ignore-deprecated-pre.hxx". + +#if !defined(PQXX_IGNORING_DEPRECATED) +# error "Ended an 'ignore-deprecated' block while none was active." +#endif + +#if defined(__GNUC__) +# pragma GCC diagnostic pop +#endif // __GNUC__ + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +#undef PQXX_IGNORING_DEPRECATED diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/ignore-deprecated-pre.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/ignore-deprecated-pre.hxx new file mode 100644 index 000000000..8ac57afaa --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/ignore-deprecated-pre.hxx @@ -0,0 +1,28 @@ +/** Start a block of deprecated code which may call other deprecated code. + * + * Most compilers will emit warnings when deprecated code is invoked from + * non-deprecated code. But some compilers (notably gcc) will always emit the + * warning even when the calling code is also deprecated. + * + * This header starts a block where those warnings are suppressed. It can be + * included inside a code block. + * + * Always match the #include with a closing #include of + * "ignore-deprecated-post.hxx". To avoid mistakes, keep the enclosed area as + * small as possible. + */ +#if defined(PQXX_IGNORING_DEPRECATED) +# error "Started an 'ignore-deprecated' block inside another." +#endif + +#define PQXX_IGNORING_DEPRECATED + +#if defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif // __GNUC__ + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable : 4996) +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/libpq-forward.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/libpq-forward.hxx new file mode 100644 index 000000000..9e74f79ec --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/libpq-forward.hxx @@ -0,0 +1,31 @@ +/** Minimal forward declarations of libpq types needed in libpqxx headers. + * + * DO NOT INCLUDE THIS FILE when building client programs. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +extern "C" +{ + struct pg_conn; + struct pg_result; + struct pgNotify; +} + +/// Forward declarations of libpq types as needed in libpqxx headers. +namespace pqxx::internal::pq +{ +using PGconn = pg_conn; +using PGresult = pg_result; +using PGnotify = pgNotify; +using PQnoticeProcessor = void (*)(void *, char const *); +} // namespace pqxx::internal::pq + +namespace pqxx +{ +/// PostgreSQL database row identifier. +using oid = unsigned int; +} // namespace pqxx diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/result_iter.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/result_iter.hxx new file mode 100644 index 000000000..1fa1f7d8a --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/result_iter.hxx @@ -0,0 +1,124 @@ +/** Result loops. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_RESULT_ITER +#define PQXX_H_RESULT_ITER + +#include + +#include "pqxx/strconv.hxx" + +namespace pqxx +{ +class result; +} // namespace pqxx + + +namespace pqxx::internal +{ +// C++20: Replace with generator? +/// Iterator for looped unpacking of a result. +template class result_iter +{ +public: + using value_type = std::tuple; + + /// Construct an "end" iterator. + result_iter() = default; + + explicit result_iter(result const &home) : + m_home{&home}, m_size{std::size(home)} + { + if (not std::empty(home)) + read(); + } + result_iter(result_iter const &) = default; + + result_iter &operator++() + { + m_index++; + if (m_index >= m_size) + m_home = nullptr; + else + read(); + return *this; + } + + /// Comparison only works for comparing to end(). + bool operator==(result_iter const &rhs) const + { + return m_home == rhs.m_home; + } + bool operator!=(result_iter const &rhs) const { return not(*this == rhs); } + + value_type const &operator*() const { return m_value; } + +private: + void read() { (*m_home)[m_index].convert(m_value); } + + result const *m_home{nullptr}; + result::size_type m_index{0}; + result::size_type m_size; + value_type m_value; +}; + + +template class result_iteration +{ +public: + using iterator = result_iter; + explicit result_iteration(result const &home) : m_home{home} + { + constexpr auto tup_size{sizeof...(TYPE)}; + if (home.columns() != tup_size) + throw usage_error{internal::concat( + "Tried to extract ", to_string(tup_size), + " field(s) from a result with ", to_string(home.columns()), + " column(s).")}; + } + iterator begin() const + { + if (std::size(m_home) == 0) + return end(); + else + return iterator{m_home}; + } + iterator end() const { return {}; } + +private: + pqxx::result const &m_home; +}; +} // namespace pqxx::internal + + +template inline auto pqxx::result::iter() const +{ + return pqxx::internal::result_iteration{*this}; +} + + +template +inline void pqxx::result::for_each(CALLABLE &&func) const +{ + using args_tuple = internal::args_t; + constexpr auto sz{std::tuple_size_v}; + static_assert( + sz > 0, + "Callback for for_each must take parameters, one for each column in the " + "result."); + + auto const cols{this->columns()}; + if (sz != cols) + throw usage_error{internal::concat( + "Callback to for_each takes ", sz, "parameter", (sz == 1) ? "" : "s", + ", but result set has ", cols, "field", (cols == 1) ? "" : "s", ".")}; + + using pass_tuple = pqxx::internal::strip_types_t; + for (auto const r : *this) std::apply(func, r.as_tuple()); +} +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/result_iterator.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/result_iterator.hxx new file mode 100644 index 000000000..3f27a1d3f --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/result_iterator.hxx @@ -0,0 +1,389 @@ +/* Definitions for the pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/result instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_RESULT_ITERATOR +#define PQXX_H_RESULT_ITERATOR + +#include "pqxx/row.hxx" + + +/* Result iterator. + * + * Don't include this header from your own application; it is included for you + * by other libpqxx headers. + */ + +namespace pqxx +{ +/// Iterator for rows in a result. Use as result::const_iterator. +/** A result, once obtained, cannot be modified. Therefore there is no + * plain iterator type for result. However its const_iterator type can be + * used to inspect its rows without changing them. + */ +class PQXX_LIBEXPORT const_result_iterator : public row +{ +public: + using iterator_category = std::random_access_iterator_tag; + using value_type = row const; + using pointer = row const *; + using reference = row; + using size_type = result_size_type; + using difference_type = result_difference_type; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Create an iterator, but in an unusable state. + const_result_iterator() noexcept = default; + /// Copy an iterator. + const_result_iterator(const_result_iterator const &) noexcept = default; + /// Move an iterator. + const_result_iterator(const_result_iterator &&) noexcept = default; + + /// Begin iterating a @ref row. + const_result_iterator(row const &t) noexcept : row{t} {} +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /** + * @name Dereferencing operators + * + * An iterator "points to" its own row, which is also itself. This makes it + * easy to address a @ref result as a two-dimensional container, without + * going through the intermediate step of dereferencing the iterator. It + * makes the interface similar to C pointer/array semantics. + * + * IIRC Alex Stepanov, the inventor of the STL, once remarked that having + * this as standard behaviour for pointers would be useful in some + * algorithms. So even if this makes me look foolish, I would seem to be in + * distinguished company. + */ + //@{ + /// Dereference the iterator. + [[nodiscard]] pointer operator->() const { return this; } + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Dereference the iterator. + [[nodiscard]] reference operator*() const { return *this; } +#include "pqxx/internal/ignore-deprecated-post.hxx" + //@} + + /** + * @name Field access + */ + //@{ + using row::back; + using row::front; + using row::operator[]; + using row::at; + using row::rownumber; + //@} + + /** + * @name Manipulations + */ + //@{ + const_result_iterator &operator=(const_result_iterator const &rhs) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row::operator=(rhs); +#include "pqxx/internal/ignore-deprecated-post.hxx" + return *this; + } + + const_result_iterator &operator=(const_result_iterator &&rhs) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row::operator=(std::move(rhs)); +#include "pqxx/internal/ignore-deprecated-post.hxx" + return *this; + } + + const_result_iterator operator++(int); + const_result_iterator &operator++() + { + ++m_index; + return *this; + } + const_result_iterator operator--(int); + const_result_iterator &operator--() + { + --m_index; + return *this; + } + + const_result_iterator &operator+=(difference_type i) + { + m_index += i; + return *this; + } + const_result_iterator &operator-=(difference_type i) + { + m_index -= i; + return *this; + } + + /// Interchange two iterators in an exception-safe manner. + void swap(const_result_iterator &other) noexcept + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row::swap(other); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] bool operator==(const_result_iterator const &i) const + { + return m_index == i.m_index; + } + [[nodiscard]] bool operator!=(const_result_iterator const &i) const + { + return m_index != i.m_index; + } + [[nodiscard]] bool operator<(const_result_iterator const &i) const + { + return m_index < i.m_index; + } + [[nodiscard]] bool operator<=(const_result_iterator const &i) const + { + return m_index <= i.m_index; + } + [[nodiscard]] bool operator>(const_result_iterator const &i) const + { + return m_index > i.m_index; + } + [[nodiscard]] bool operator>=(const_result_iterator const &i) const + { + return m_index >= i.m_index; + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] inline const_result_iterator operator+(difference_type) const; + friend const_result_iterator + operator+(difference_type, const_result_iterator const &); + [[nodiscard]] inline const_result_iterator operator-(difference_type) const; + [[nodiscard]] inline difference_type + operator-(const_result_iterator const &) const; + //@} + +private: + friend class pqxx::result; + const_result_iterator(pqxx::result const *r, result_size_type i) noexcept : + row{*r, i, r->columns()} + {} +}; + + +/// Reverse iterator for result. Use as result::const_reverse_iterator. +class PQXX_LIBEXPORT const_reverse_result_iterator + : private const_result_iterator +{ +public: + using super = const_result_iterator; + using iterator_type = const_result_iterator; + using iterator_type::difference_type; + using iterator_type::iterator_category; + using iterator_type::pointer; + using value_type = iterator_type::value_type; + using reference = iterator_type::reference; + + /// Create an iterator, but in an unusable state. + const_reverse_result_iterator() = default; + /// Copy an iterator. + const_reverse_result_iterator(const_reverse_result_iterator const &rhs) = + default; + /// Copy a reverse iterator from a regular iterator. + explicit const_reverse_result_iterator(const_result_iterator const &rhs) : + const_result_iterator{rhs} + { + super::operator--(); + } + + /// Move a regular iterator into a reverse iterator. + explicit const_reverse_result_iterator(const_result_iterator const &&rhs) : + const_result_iterator{std::move(rhs)} + { + super::operator--(); + } + + /// Return the underlying "regular" iterator (as per standard library). + [[nodiscard]] PQXX_PURE const_result_iterator base() const noexcept; + + /** + * @name Dereferencing operators + */ + //@{ + /// Dereference iterator. + using const_result_iterator::operator->; + /// Dereference iterator. + using const_result_iterator::operator*; + //@} + + /** + * @name Field access + */ + //@{ + using const_result_iterator::back; + using const_result_iterator::front; + using const_result_iterator::operator[]; + using const_result_iterator::at; + using const_result_iterator::rownumber; + //@} + + /** + * @name Manipulations + */ + //@{ + const_reverse_result_iterator & + operator=(const_reverse_result_iterator const &r) + { + iterator_type::operator=(r); + return *this; + } + const_reverse_result_iterator &operator=(const_reverse_result_iterator &&r) + { + iterator_type::operator=(std::move(r)); + return *this; + } + const_reverse_result_iterator &operator++() + { + iterator_type::operator--(); + return *this; + } + const_reverse_result_iterator operator++(int); + const_reverse_result_iterator &operator--() + { + iterator_type::operator++(); + return *this; + } + const_reverse_result_iterator operator--(int); + const_reverse_result_iterator &operator+=(difference_type i) + { + iterator_type::operator-=(i); + return *this; + } + const_reverse_result_iterator &operator-=(difference_type i) + { + iterator_type::operator+=(i); + return *this; + } + + void swap(const_reverse_result_iterator &other) noexcept + { + const_result_iterator::swap(other); + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] const_reverse_result_iterator + operator+(difference_type i) const + { + return const_reverse_result_iterator(base() - i); + } + [[nodiscard]] const_reverse_result_iterator operator-(difference_type i) + { + return const_reverse_result_iterator(base() + i); + } + [[nodiscard]] difference_type + operator-(const_reverse_result_iterator const &rhs) const + { + return rhs.const_result_iterator::operator-(*this); + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] bool + operator==(const_reverse_result_iterator const &rhs) const noexcept + { + return iterator_type::operator==(rhs); + } + [[nodiscard]] bool + operator!=(const_reverse_result_iterator const &rhs) const noexcept + { + return not operator==(rhs); + } + + [[nodiscard]] bool operator<(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator>(rhs); + } + [[nodiscard]] bool operator<=(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator>=(rhs); + } + [[nodiscard]] bool operator>(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator<(rhs); + } + [[nodiscard]] bool operator>=(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator<=(rhs); + } + //@} +}; + + +inline const_result_iterator +const_result_iterator::operator+(result::difference_type o) const +{ + return {&m_result, size_type(result::difference_type(m_index) + o)}; +} + +inline const_result_iterator +operator+(result::difference_type o, const_result_iterator const &i) +{ + return i + o; +} + +inline const_result_iterator +const_result_iterator::operator-(result::difference_type o) const +{ + return {&m_result, result_size_type(result::difference_type(m_index) - o)}; +} + +inline result::difference_type +const_result_iterator::operator-(const const_result_iterator &i) const +{ + return result::difference_type(num() - i.num()); +} + +inline const_result_iterator result::end() const noexcept +{ + return {this, size()}; +} + + +inline const_result_iterator result::cend() const noexcept +{ + return end(); +} + + +inline const_reverse_result_iterator +operator+(result::difference_type n, const_reverse_result_iterator const &i) +{ + return const_reverse_result_iterator{i.base() - n}; +} + +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/sql_cursor.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/sql_cursor.hxx new file mode 100644 index 000000000..a26d06306 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/sql_cursor.hxx @@ -0,0 +1,118 @@ +/** Internal wrapper for SQL cursors. Supports higher-level cursor classes. + * + * DO NOT INCLUDE THIS FILE DIRECTLY. Other headers include it for you. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_SQL_CURSOR +#define PQXX_H_SQL_CURSOR + +namespace pqxx::internal +{ +/// Cursor with SQL positioning semantics. +/** Thin wrapper around an SQL cursor, with SQL's ideas of positioning. + * + * SQL cursors have pre-increment/pre-decrement semantics, with on either end + * of the result set a special position that does not repesent a row. This + * class models SQL cursors for the purpose of implementing more C++-like + * semantics on top. + * + * Positions of actual rows are numbered starting at 1. Position 0 exists but + * does not refer to a row. There is a similar non-row position at the end of + * the result set. + * + * Don't use this at home. You deserve better. Use the stateles_cursor + * instead. + */ +class PQXX_LIBEXPORT sql_cursor : public cursor_base +{ +public: + sql_cursor( + transaction_base &t, std::string_view query, std::string_view cname, + cursor_base::access_policy ap, cursor_base::update_policy up, + cursor_base::ownership_policy op, bool hold); + + sql_cursor( + transaction_base &t, std::string_view cname, + cursor_base::ownership_policy op); + + ~sql_cursor() noexcept { close(); } + + result fetch(difference_type rows, difference_type &displacement); + result fetch(difference_type rows) + { + difference_type d = 0; + return fetch(rows, d); + } + difference_type move(difference_type rows, difference_type &displacement); + difference_type move(difference_type rows) + { + difference_type d = 0; + return move(rows, d); + } + + /// Current position, or -1 for unknown + /** + * The starting position, just before the first row, counts as position zero. + * + * Position may be unknown if (and only if) this cursor was adopted, and has + * never hit its starting position (position zero). + */ + difference_type pos() const noexcept { return m_pos; } + + /// End position, or -1 for unknown + /** + * Returns the final position, just after the last row in the result set. The + * starting position, just before the first row, counts as position zero. + * + * End position is unknown until it is encountered during use. + */ + difference_type endpos() const noexcept { return m_endpos; } + + /// Return zero-row result for this cursor. + result const &empty_result() const noexcept { return m_empty_result; } + + void close() noexcept; + +private: + difference_type adjust(difference_type hoped, difference_type actual); + static std::string stridestring(difference_type); + /// Initialize cached empty result. Call only at beginning or end! + void init_empty_result(transaction_base &); + + /// Connection in which this cursor lives. + connection &m_home; + + /// Zero-row result from this cursor (or plain empty one if cursor is + /// adopted) + result m_empty_result; + + result m_cached_current_row; + + /// Is this cursor adopted (as opposed to created by this cursor object)? + bool m_adopted; + + /// Will this cursor object destroy its SQL cursor when it dies? + cursor_base::ownership_policy m_ownership; + + /// At starting position (-1), somewhere in the middle (0), or past end (1) + int m_at_end; + + /// Position, or -1 for unknown + difference_type m_pos; + + /// End position, or -1 for unknown + difference_type m_endpos = -1; +}; + + +PQXX_LIBEXPORT result_size_type obtain_stateless_cursor_size(sql_cursor &); +PQXX_LIBEXPORT result stateless_cursor_retrieve( + sql_cursor &, result::difference_type size, + result::difference_type begin_pos, result::difference_type end_pos); +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/statement_parameters.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/statement_parameters.hxx new file mode 100644 index 000000000..b078bf6e0 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/statement_parameters.hxx @@ -0,0 +1,131 @@ +/** Common implementation for statement parameter lists. + * + * These are used for both prepared statements and parameterized statements. + * + * DO NOT INCLUDE THIS FILE DIRECTLY. Other headers include it for you. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STATEMENT_PARAMETER +#define PQXX_H_STATEMENT_PARAMETER + +#include +#include +#include +#include +#include + +#include "pqxx/binarystring.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/util.hxx" + + +namespace pqxx::internal +{ +template +constexpr inline auto const iterator_identity{ + [](decltype(*std::declval()) x) { return x; }}; + + +/// Marker type: pass a dynamically-determined number of statement parameters. +/** @deprecated Use @ref params instead. + * + * Normally when invoking a prepared or parameterised statement, the number + * of parameters is known at compile time. For instance, + * `t.exec_prepared("foo", 1, "x");` executes statement `foo` with two + * parameters, an `int` and a C string. + * + * But sometimes you may want to pass a number of parameters known only at run + * time. In those cases, a @ref dynamic_params encodes a dynamically + * determined number of parameters. You can mix these with regular, static + * parameter lists, and you can re-use them for multiple statement invocations. + * + * A dynamic_params object does not store copies of its parameters, so make + * sure they remain accessible until you've executed the statement. + * + * The ACCESSOR is an optional callable (such as a lambda). If you pass an + * accessor `a`, then each parameter `p` goes into your statement as `a(p)`. + */ +template)> +class dynamic_params +{ +public: + /// Wrap a sequence of pointers or iterators. + constexpr dynamic_params(IT begin, IT end) : + m_begin(begin), m_end(end), m_accessor(iterator_identity) + {} + + /// Wrap a sequence of pointers or iterators. + /** This version takes an accessor callable. If you pass an accessor `acc`, + * then any parameter `p` will go into the statement's parameter list as + * `acc(p)`. + */ + constexpr dynamic_params(IT begin, IT end, ACCESSOR &acc) : + m_begin(begin), m_end(end), m_accessor(acc) + {} + + /// Wrap a container. + template + explicit constexpr dynamic_params(C &container) : + dynamic_params(std::begin(container), std::end(container)) + {} + + /// Wrap a container. + /** This version takes an accessor callable. If you pass an accessor `acc`, + * then any parameter `p` will go into the statement's parameter list as + * `acc(p)`. + */ + template + explicit constexpr dynamic_params(C &container, ACCESSOR &acc) : + dynamic_params(std::begin(container), std::end(container), acc) + {} + + constexpr IT begin() const noexcept { return m_begin; } + constexpr IT end() const noexcept { return m_end; } + + constexpr auto access(decltype(*std::declval()) value) const + -> decltype(std::declval()(value)) + { + return m_accessor(value); + } + +private: + IT const m_begin, m_end; + ACCESSOR m_accessor = iterator_identity; +}; + + +/// Internal type: encode statement parameters. +/** Compiles arguments for prepared statements and parameterised queries into + * a format that can be passed into libpq. + * + * Objects of this type are meant to be short-lived: a `c_params` lives and + * dies entirely within the call to execute. So, for example, if you pass in a + * non-null pointer as a parameter, @ref params may simply use that pointer as + * a parameter value, without arranging longer-term storage for the data to + * which it points. All values referenced by parameters must remain "live" + * until the parameterised or prepared statement has been executed. + */ +struct PQXX_LIBEXPORT c_params +{ + c_params() = default; + /// Copying these objects is pointless and expensive. Don't do it. + c_params(c_params const &) = delete; + c_params(c_params &&) = default; + + /// Pre-allocate storage for `n` parameters. + void reserve(std::size_t n) &; + + /// As used by libpq: pointers to parameter values. + std::vector values; + /// As used by libpq: lengths of non-null arguments, in bytes. + std::vector lengths; + /// As used by libpq: effectively boolean "is this a binary parameter?" + std::vector formats; +}; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/stream_iterator.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/stream_iterator.hxx new file mode 100644 index 000000000..f240dcfa7 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/stream_iterator.hxx @@ -0,0 +1,105 @@ +/** Stream iterators. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STREAM_ITERATOR +#define PQXX_H_STREAM_ITERATOR + +#include + +namespace pqxx +{ +class stream_from; +} + + +namespace pqxx::internal +{ +// C++20: Replace with generator? +/// Input iterator for stream_from. +/** Just barely enough to support range-based "for" loops. Don't assume that + * any of the usual behaviour works beyond that. + */ +template class stream_input_iterator +{ +public: + using value_type = std::tuple; + + /// Construct an "end" iterator. + stream_input_iterator() = default; + + explicit stream_input_iterator(stream_from &home) : m_home(&home) + { + advance(); + } + stream_input_iterator(stream_input_iterator const &) = default; + + stream_input_iterator &operator++() + { + advance(); + return *this; + } + + value_type const &operator*() const { return m_value; } + + /// Comparison only works for comparing to end(). + bool operator==(stream_input_iterator const &rhs) const + { + return m_home == rhs.m_home; + } + /// Comparison only works for comparing to end(). + bool operator!=(stream_input_iterator const &rhs) const + { + return not(*this == rhs); + } + +private: + void advance() + { + if (m_home == nullptr) + throw usage_error{"Moving stream_from iterator beyond end()."}; + if (not((*m_home) >> m_value)) + m_home = nullptr; + } + + stream_from *m_home{nullptr}; + value_type m_value; +}; + + +// C++20: Replace with generator? +/// Iteration over a @ref stream_from. +template class stream_input_iteration +{ +public: + using iterator = stream_input_iterator; + explicit stream_input_iteration(stream_from &home) : m_home{home} {} + iterator begin() const { return iterator{m_home}; } + iterator end() const { return {}; } + +private: + stream_from &m_home; +}; + + +// C++20: Replace with generator? +/// Iteration over a @ref stream_from, deleting it once done. +template class owning_stream_input_iteration +{ +public: + using iterator = stream_input_iterator; + explicit owning_stream_input_iteration(std::unique_ptr &&home) : + m_home{std::move(home)} + {} + iterator begin() const { return iterator{*m_home.get()}; } + iterator end() const { return {}; } + +private: + std::unique_ptr m_home; +}; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/internal/wait.hxx b/ext/libpqxx-7.7.3/include/pqxx/internal/wait.hxx new file mode 100644 index 000000000..7a82e6553 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/internal/wait.hxx @@ -0,0 +1,18 @@ +#if !defined(PQXX_WAIT_HXX) +# define PQXX_WAIT_HXX + +namespace pqxx::internal +{ +/// Wait. +/** This is normally `std::this_thread::sleep_for()`. But MinGW's `thread` + * header doesn't work, so we must be careful about including it. + */ +void PQXX_LIBEXPORT wait_for(unsigned int microseconds); + + +/// Wait for a socket to be ready for reading/writing, or timeout. +PQXX_LIBEXPORT void wait_fd( + int fd, bool for_read, bool for_write, unsigned seconds = 1, + unsigned microseconds = 0); +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/isolation b/ext/libpqxx-7.7.3/include/pqxx/isolation new file mode 100644 index 000000000..1b801329b --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/isolation @@ -0,0 +1,8 @@ +/** Transaction isolation levels. + * + * Policies and traits describing SQL transaction isolation levels + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/isolation.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/isolation.hxx b/ext/libpqxx-7.7.3/include/pqxx/isolation.hxx new file mode 100644 index 000000000..0698c6ab4 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/isolation.hxx @@ -0,0 +1,75 @@ +/* Definitions for transaction isolation levels, and such. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/isolation instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ISOLATION +#define PQXX_H_ISOLATION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/util.hxx" + +namespace pqxx +{ +/// Should a transaction be read-only, or read-write? +/** No, this is not an isolation level. So it really doesn't belong here. + * But it's not really worth a separate header. + */ +enum class write_policy +{ + read_only, + read_write +}; + + +/// Transaction isolation levels. +/** These are as defined in the SQL standard. But there are a few notes + * specific to PostgreSQL. + * + * First, postgres does not support "read uncommitted." The lowest level you + * can get is "read committed," which is better. PostgreSQL is built on the + * MVCC paradigm, which guarantees "read committed" isolation without any + * additional performance overhead, so there was no point in providing the + * lower level. + * + * Second, "repeatable read" also makes more isolation guarantees than the + * standard requires. According to the standard, this level prevents "dirty + * reads" and "nonrepeatable reads," but not "phantom reads." In postgres, + * it actually prevents all three. + * + * Third, "serializable" is only properly supported starting at postgres 9.1. + * If you request "serializable" isolation on an older backend, you will get + * the same isolation as in "repeatable read." It's better than the + * "repeatable read" defined in the SQL standard, but not a complete + * implementation of the standard's "serializable" isolation level. + * + * In general, a lower isolation level will allow more surprising interactions + * between ongoing transactions, but improve performance. A higher level + * gives you more protection from subtle concurrency bugs, but sometimes it + * may not be possible to complete your transaction without avoiding paradoxes + * in the data. In that case a transaction may fail, and the application will + * have to re-do the whole thing based on the latest state of the database. + * (If you want to retry your code in that situation, have a look at the + * transactor framework.) + * + * Study the levels and design your application with the right level in mind. + */ +enum isolation_level +{ + // PostgreSQL only has the better isolation levels. + // read_uncommitted, + + read_committed, + repeatable_read, + serializable, +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/largeobject b/ext/libpqxx-7.7.3/include/pqxx/largeobject new file mode 100644 index 000000000..1f2f94790 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/largeobject @@ -0,0 +1,8 @@ +/** Large Objects interface. + * + * Supports direct access to large objects, as well as through I/O streams + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/largeobject.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/largeobject.hxx b/ext/libpqxx-7.7.3/include/pqxx/largeobject.hxx new file mode 100644 index 000000000..ebafc51d8 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/largeobject.hxx @@ -0,0 +1,735 @@ +/* Large Objects interface. Deprecated; use blob instead. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/largeobject instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_LARGEOBJECT +#define PQXX_H_LARGEOBJECT + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/dbtransaction.hxx" + + +namespace pqxx +{ +/// Identity of a large object. +/** @deprecated Use the @ref blob class instead. + * + * Encapsulates the identity of a large object. + * + * A largeobject must be accessed only from within a backend transaction, but + * the object's identity remains valid as long as the object exists. + */ +class PQXX_LIBEXPORT largeobject +{ +public: + using size_type = large_object_size_type; + + /// Refer to a nonexistent large object (similar to what a null pointer + /// does). + [[deprecated("Use blob instead.")]] largeobject() noexcept = default; + + /// Create new large object. + /** @param t Backend transaction in which the object is to be created. + */ + [[deprecated("Use blob instead.")]] explicit largeobject(dbtransaction &t); + + /// Wrap object with given oid. + /** Convert combination of a transaction and object identifier into a + * large object identity. Does not affect the database. + * @param o Object identifier for the given object. + */ + [[deprecated("Use blob instead.")]] explicit largeobject(oid o) noexcept : + m_id{o} + {} + + /// Import large object from a local file. + /** Creates a large object containing the data found in the given file. + * @param t Backend transaction in which the large object is to be created. + * @param file A filename on the client program's filesystem. + */ + [[deprecated("Use blob instead.")]] largeobject( + dbtransaction &t, std::string_view file); + + /// Take identity of an opened large object. + /** Copy identity of already opened large object. Note that this may be done + * as an implicit conversion. + * @param o Already opened large object to copy identity from. + */ + [[deprecated("Use blob instead.")]] largeobject( + largeobjectaccess const &o) noexcept; + + /// Object identifier. + /** The number returned by this function identifies the large object in the + * database we're connected to (or oid_none is returned if we refer to the + * null object). + */ + [[nodiscard]] oid id() const noexcept { return m_id; } + + /** + * @name Identity comparisons + * + * These operators compare the object identifiers of large objects. This has + * nothing to do with the objects' actual contents; use them only for keeping + * track of containers of references to large objects and such. + */ + //@{ + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator==(largeobject const &other) const + { + return m_id == other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator!=(largeobject const &other) const + { + return m_id != other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator<=(largeobject const &other) const + { + return m_id <= other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator>=(largeobject const &other) const + { + return m_id >= other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator<(largeobject const &other) const + { + return m_id < other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator>(largeobject const &other) const + { + return m_id > other.m_id; + } + //@} + + /// Export large object's contents to a local file + /** Writes the data stored in the large object to the given file. + * @param t Transaction in which the object is to be accessed + * @param file A filename on the client's filesystem + */ + void to_file(dbtransaction &t, std::string_view file) const; + + /// Delete large object from database + /** Unlike its low-level equivalent cunlink, this will throw an exception if + * deletion fails. + * @param t Transaction in which the object is to be deleted + */ + void remove(dbtransaction &t) const; + +protected: + PQXX_PURE static internal::pq::PGconn * + raw_connection(dbtransaction const &T); + + PQXX_PRIVATE std::string reason(connection const &, int err) const; + +private: + oid m_id = oid_none; +}; + + +/// Accessor for large object's contents. +/** @deprecated Use the `blob` class instead. + */ +class PQXX_LIBEXPORT largeobjectaccess : private largeobject +{ +public: + using largeobject::size_type; + using off_type = size_type; + using pos_type = size_type; + + /// Open mode: `in`, `out` (can be combined using "bitwise or"). + /** According to the C++ standard, these should be in `std::ios_base`. We + * take them from derived class `std::ios` instead, which is easier on the + * eyes. + * + * Historical note: taking it from std::ios was originally a workaround for a + * problem with gcc 2.95. + */ + using openmode = std::ios::openmode; + + /// Default open mode: in, out, binary. + static constexpr auto default_mode{ + std::ios::in | std::ios::out | std::ios::binary}; + + /// Seek direction: `beg`, `cur`, `end`. + using seekdir = std::ios::seekdir; + + /// Create new large object and open it. + /** + * @param t Backend transaction in which the object is to be created. + * @param mode Access mode, defaults to ios_base::in | ios_base::out | + * ios_base::binary. + */ + [[deprecated("Use blob instead.")]] explicit largeobjectaccess( + dbtransaction &t, openmode mode = default_mode); + + /// Open large object with given oid. + /** Convert combination of a transaction and object identifier into a + * large object identity. Does not affect the database. + * @param t Transaction in which the object is to be accessed. + * @param o Object identifier for the given object. + * @param mode Access mode, defaults to ios_base::in | ios_base::out | + * ios_base::binary. + */ + [[deprecated("Use blob instead.")]] largeobjectaccess( + dbtransaction &t, oid o, openmode mode = default_mode); + + /// Open given large object. + /** Open a large object with the given identity for reading and/or writing. + * @param t Transaction in which the object is to be accessed. + * @param o Identity for the large object to be accessed. + * @param mode Access mode, defaults to ios_base::in | ios_base::out | + * ios_base::binary. + */ + [[deprecated("Use blob instead.")]] largeobjectaccess( + dbtransaction &t, largeobject o, openmode mode = default_mode); + + /// Import large object from a local file and open it. + /** Creates a large object containing the data found in the given file. + * @param t Backend transaction in which the large object is to be created. + * @param file A filename on the client program's filesystem. + * @param mode Access mode, defaults to ios_base::in | ios_base::out. + */ + [[deprecated("Use blob instead.")]] largeobjectaccess( + dbtransaction &t, std::string_view file, openmode mode = default_mode); + + ~largeobjectaccess() noexcept { close(); } + + /// Object identifier. + /** The number returned by this function uniquely identifies the large object + * in the context of the database we're connected to. + */ + using largeobject::id; + + /// Export large object's contents to a local file. + /** Writes the data stored in the large object to the given file. + * @param file A filename on the client's filesystem. + */ + void to_file(std::string_view file) const + { + largeobject::to_file(m_trans, file); + } + + using largeobject::to_file; + + /** + * @name High-level access to object contents. + */ + //@{ + /// Write data to large object. + /** @warning The size of a write is currently limited to 2GB. + * + * @param buf Data to write. + * @param len Number of bytes from Buf to write. + */ + void write(char const buf[], std::size_t len); + + /// Write string to large object. + /** If not all bytes could be written, an exception is thrown. + * @param buf Data to write; no terminating zero is written. + */ + void write(std::string_view buf) { write(std::data(buf), std::size(buf)); } + + /// Read data from large object. + /** Throws an exception if an error occurs while reading. + * @param buf Location to store the read data in. + * @param len Number of bytes to try and read. + * @return Number of bytes read, which may be less than the number requested + * if the end of the large object is reached. + */ + size_type read(char buf[], std::size_t len); + + /// Seek in large object's data stream. + /** Throws an exception if an error occurs. + * @return The new position in the large object + */ + size_type seek(size_type dest, seekdir dir); + + /// Report current position in large object's data stream. + /** Throws an exception if an error occurs. + * @return The current position in the large object. + */ + [[nodiscard]] size_type tell() const; + //@} + + /** + * @name Low-level access to object contents. + * + * These functions provide a more "C-like" access interface, returning + * special values instead of throwing exceptions on error. These functions + * are generally best avoided in favour of the high-level access functions, + * which behave more like C++ functions should. + * + * Due to libpq's underlying API, some operations are limited to "int" + * sizes, typically 2 GB, even though a large object can grow much larger. + */ + //@{ + /// Seek in large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @param dest Offset to go to. + * @param dir Origin to which dest is relative: ios_base::beg (from beginning + * of the object), ios_base::cur (from current access position), or + * ios_base;:end (from end of object). + * @return New position in large object, or -1 if an error occurred. + */ + pos_type cseek(off_type dest, seekdir dir) noexcept; + + /// Write to large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @param buf Data to write. + * @param len Number of bytes to write. + * @return Number of bytes actually written, or -1 if an error occurred. + */ + off_type cwrite(char const buf[], std::size_t len) noexcept; + + /// Read from large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @param buf Area where incoming bytes should be stored. + * @param len Number of bytes to read. + * @return Number of bytes actually read, or -1 if an error occurred.. + */ + off_type cread(char buf[], std::size_t len) noexcept; + + /// Report current position in large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @return Current position in large object, of -1 if an error occurred. + */ + [[nodiscard]] pos_type ctell() const noexcept; + //@} + + /** + * @name Error/warning output + */ + //@{ + /// Issue message to transaction's notice processor. + void process_notice(zview) noexcept; + //@} + + using largeobject::remove; + + using largeobject::operator==; + using largeobject::operator!=; + using largeobject::operator<; + using largeobject::operator<=; + using largeobject::operator>; + using largeobject::operator>=; + + largeobjectaccess() = delete; + largeobjectaccess(largeobjectaccess const &) = delete; + largeobjectaccess operator=(largeobjectaccess const &) = delete; + +private: + PQXX_PRIVATE std::string reason(int err) const; + internal::pq::PGconn *raw_connection() const + { + return largeobject::raw_connection(m_trans); + } + + PQXX_PRIVATE void open(openmode mode); + void close() noexcept; + + dbtransaction &m_trans; + int m_fd = -1; +}; + + +/// Streambuf to use large objects in standard I/O streams. +/** @deprecated Access large objects directly using the @ref blob class. + * + * The standard streambuf classes provide uniform access to data storage such + * as files or string buffers, so they can be accessed using standard input or + * output streams. This streambuf implementation provided similar access to + * large objects, so they could be read and written using the same stream + * classes. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class largeobject_streambuf : public std::basic_streambuf +{ + using size_type = largeobject::size_type; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + using openmode = largeobjectaccess::openmode; + using seekdir = largeobjectaccess::seekdir; + + /// Default open mode: in, out, binary. + static constexpr auto default_mode{ + std::ios::in | std::ios::out | std::ios::binary}; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + [[deprecated("Use blob instead.")]] largeobject_streambuf( + dbtransaction &t, largeobject o, openmode mode = default_mode, + size_type buf_size = 512) : + m_bufsize{buf_size}, m_obj{t, o, mode}, m_g{nullptr}, m_p{nullptr} + { + initialize(mode); + } +#include "pqxx/internal/ignore-deprecated-post.hxx" + + [[deprecated("Use blob instead.")]] largeobject_streambuf( + dbtransaction &t, oid o, openmode mode = default_mode, + size_type buf_size = 512) : + m_bufsize{buf_size}, m_obj{t, o, mode}, m_g{nullptr}, m_p{nullptr} + { + initialize(mode); + } + + virtual ~largeobject_streambuf() noexcept + { + delete[] m_p; + delete[] m_g; + } + + /// For use by large object stream classes. + void process_notice(zview const &s) { m_obj.process_notice(s); } + +protected: + virtual int sync() override + { + // setg() sets eback, gptr, egptr. + this->setg(this->eback(), this->eback(), this->egptr()); + return overflow(eof()); + } + + virtual pos_type seekoff(off_type offset, seekdir dir, openmode) override + { + return adjust_eof(m_obj.cseek(largeobjectaccess::off_type(offset), dir)); + } + + virtual pos_type seekpos(pos_type pos, openmode) override + { + largeobjectaccess::pos_type const newpos{ + m_obj.cseek(largeobjectaccess::off_type(pos), std::ios::beg)}; + return adjust_eof(newpos); + } + + virtual int_type overflow(int_type ch) override + { + auto *const pp{this->pptr()}; + if (pp == nullptr) + return eof(); + auto *const pb{this->pbase()}; + int_type res{0}; + + if (pp > pb) + { + auto const write_sz{pp - pb}; + auto const written_sz{ + m_obj.cwrite(pb, static_cast(pp - pb))}; + if (internal::cmp_less_equal(written_sz, 0)) + throw internal_error{ + "pqxx::largeobject: write failed " + "(is transaction still valid on write or flush?), " + "libpq reports error"}; + else if (write_sz != written_sz) + throw internal_error{ + "pqxx::largeobject: write failed " + "(is transaction still valid on write or flush?), " + + std::to_string(written_sz) + "/" + std::to_string(write_sz) + + " bytes written"}; + auto const out{adjust_eof(written_sz)}; + + if constexpr (std::is_arithmetic_v) + res = check_cast(out, "largeobject position"sv); + else + res = int_type(out); + } + this->setp(m_p, m_p + m_bufsize); + + // Write that one more character, if it's there. + if (ch != eof()) + { + *this->pptr() = static_cast(ch); + this->pbump(1); + } + return res; + } + + virtual int_type overflow() { return overflow(eof()); } + + virtual int_type underflow() override + { + if (this->gptr() == nullptr) + return eof(); + auto *const eb{this->eback()}; + auto const res{adjust_eof( + m_obj.cread(this->eback(), static_cast(m_bufsize)))}; + this->setg( + eb, eb, eb + (res == eof() ? 0 : static_cast(res))); + return (res == eof() or res == 0) ? eof() : traits_type::to_int_type(*eb); + } + +private: + /// Shortcut for traits_type::eof(). + static int_type eof() { return traits_type::eof(); } + + /// Helper: change error position of -1 to EOF (probably a no-op). + template static std::streampos adjust_eof(INTYPE pos) + { + bool const at_eof{pos == -1}; + if constexpr (std::is_arithmetic_v) + { + return check_cast( + (at_eof ? eof() : pos), "large object seek"sv); + } + else + { + return std::streampos(at_eof ? eof() : pos); + } + } + + void initialize(openmode mode) + { + if ((mode & std::ios::in) != 0) + { + m_g = new char_type[unsigned(m_bufsize)]; + this->setg(m_g, m_g, m_g); + } + if ((mode & std::ios::out) != 0) + { + m_p = new char_type[unsigned(m_bufsize)]; + this->setp(m_p, m_p + m_bufsize); + } + } + + size_type const m_bufsize; + largeobjectaccess m_obj; + + /// Get & put buffers. + char_type *m_g, *m_p; +}; + + +/// Input stream that gets its data from a large object. +/** @deprecated Access large objects directly using the @ref blob class. + * + * This class worked like any other istream, but to read data from a large + * object. It supported all formatting and streaming operations of + * `std::istream`. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class basic_ilostream : public std::basic_istream +{ + using super = std::basic_istream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Create a basic_ilostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_ilostream( + dbtransaction &t, largeobject o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::in | std::ios::binary, buf_size} + { + super::init(&m_buf); + } +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /// Create a basic_ilostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Identifier of a large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_ilostream( + dbtransaction &t, oid o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::in | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + +private: + largeobject_streambuf m_buf; +}; + +using ilostream = basic_ilostream; + + +/// Output stream that writes data back to a large object. +/** @deprecated Access large objects directly using the @ref blob class. + * + * This worked like any other ostream, but to write data to a large object. + * It supported all formatting and streaming operations of `std::ostream`. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class basic_olostream : public std::basic_ostream +{ + using super = std::basic_ostream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Create a basic_olostream. + /** + * @param t transaction in which this stream is to exist. + * @param o a large object to access. + * @param buf_size size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_olostream( + dbtransaction &t, largeobject o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /// Create a basic_olostream. + /** + * @param t transaction in which this stream is to exist. + * @param o a large object to access. + * @param buf_size size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_olostream( + dbtransaction &t, oid o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + + ~basic_olostream() + { + try + { + m_buf.pubsync(); + m_buf.pubsync(); + } + catch (std::exception const &e) + { + m_buf.process_notice(e.what()); + } + } + +private: + largeobject_streambuf m_buf; +}; + +using olostream = basic_olostream; + + +/// Stream that reads and writes a large object. +/** @deprecated Access large objects directly using the @ref blob class. + * + * This worked like a std::iostream, but to read data from, or write data to, a + * large object. It supported all formatting and streaming operations of + * `std::iostream`. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class basic_lostream : public std::basic_iostream +{ + using super = std::basic_iostream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + + /// Create a basic_lostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_lostream( + dbtransaction &t, largeobject o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{ + t, o, std::ios::in | std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + + /// Create a basic_lostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_lostream( + dbtransaction &t, oid o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{ + t, o, std::ios::in | std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + + ~basic_lostream() + { + try + { + m_buf.pubsync(); + m_buf.pubsync(); + } + catch (std::exception const &e) + { + m_buf.process_notice(e.what()); + } + } + +private: + largeobject_streambuf m_buf; +}; + +using lostream = basic_lostream; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/nontransaction b/ext/libpqxx-7.7.3/include/pqxx/nontransaction new file mode 100644 index 000000000..bb5b79724 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/nontransaction @@ -0,0 +1,8 @@ +/** pqxx::nontransaction class. + * + * pqxx::nontransaction provides nontransactional database access. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/nontransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/nontransaction.hxx b/ext/libpqxx-7.7.3/include/pqxx/nontransaction.hxx new file mode 100644 index 000000000..c50715594 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/nontransaction.hxx @@ -0,0 +1,76 @@ +/* Definition of the pqxx::nontransaction class. + * + * pqxx::nontransaction provides nontransactional database access + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/nontransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_NONTRANSACTION +#define PQXX_H_NONTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/connection.hxx" +#include "pqxx/result.hxx" +#include "pqxx/transaction.hxx" + +namespace pqxx +{ +using namespace std::literals; + +/// Simple "transaction" class offering no transactional integrity. +/** + * @ingroup transactions + * + * nontransaction, like transaction or any other transaction_base-derived + * class, provides access to a database through a connection. Unlike its + * siblings, however, nontransaction does not maintain any kind of + * transactional integrity. This may be useful eg. for read-only access to the + * database that does not require a consistent, atomic view on its data; or for + * operations that are not allowed within a backend transaction, such as + * creating tables. + * + * For queries that update the database, however, a real transaction is likely + * to be faster unless the transaction consists of only a single record update. + * + * Also, you can keep a nontransaction open for as long as you like. Actual + * back-end transactions are limited in lifespan, and will sometimes fail just + * because they took too long to execute or were left idle for too long. This + * will not happen with a nontransaction (although the connection may still + * time out, e.g. when the network is unavailable for a very long time). + * + * Any query executed in a nontransaction is committed immediately, and neither + * commit() nor abort() has any effect. + * + * Database features that require a backend transaction, such as cursors or + * large objects, will not work in a nontransaction. + */ +class PQXX_LIBEXPORT nontransaction final : public transaction_base +{ +public: + /// Constructor. + /** Create a "dummy" transaction. + * @param c Connection in which this "transaction" will operate. + * @param tname Optional tname for the transaction, beginning with a letter + * and containing only letters and digits. + */ + nontransaction(connection &c, std::string_view tname = ""sv) : + transaction_base{c, tname, std::shared_ptr{}} + { + register_transaction(); + } + + virtual ~nontransaction() override { close(); } + +private: + virtual void do_commit() override {} +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/notification b/ext/libpqxx-7.7.3/include/pqxx/notification new file mode 100644 index 000000000..a0bd1c73e --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/notification @@ -0,0 +1,8 @@ +/** pqxx::notification_receiver functor interface. + * + * pqxx::notification_receiver handles incoming notifications. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/notification.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/notification.hxx b/ext/libpqxx-7.7.3/include/pqxx/notification.hxx new file mode 100644 index 000000000..b59b8567a --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/notification.hxx @@ -0,0 +1,94 @@ +/* Definition of the pqxx::notification_receiver functor interface. + * + * pqxx::notification_receiver handles incoming notifications. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/notification instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_NOTIFICATION +#define PQXX_H_NOTIFICATION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/types.hxx" + + +namespace pqxx +{ +/// "Observer" base class for notifications. +/** @addtogroup notification Notifications and Receivers + * + * To listen on a notification issued using the NOTIFY command, derive your own + * class from notification_receiver and define its function-call operator to + * perform whatever action you wish to take when the given notification + * arrives. Then create an object of that class and pass it to your connection. + * DO NOT use raw SQL to listen for notifications, or your attempts to listen + * won't be resumed when a connection fails--and you'll have no way to notice. + * + * Notifications never arrive inside a transaction, not even in a + * nontransaction. Therefore, you are free to open a transaction of your own + * inside your receiver's function invocation operator. + * + * Notifications you are listening for may arrive anywhere within libpqxx code, + * but be aware that **PostgreSQL defers notifications occurring inside + * transactions.** (This was done for excellent reasons; just think about what + * happens if the transaction where you happen to handle an incoming + * notification is later rolled back for other reasons). So if you're keeping + * a transaction open, don't expect any of your receivers on the same + * connection to be notified. + * + * (For very similar reasons, outgoing notifications are also not sent until + * the transaction that sends them commits.) + * + * Multiple receivers on the same connection may listen on a notification of + * the same name. An incoming notification is processed by invoking all + * receivers (zero or more) of the same name. + */ +class PQXX_LIBEXPORT PQXX_NOVTABLE notification_receiver +{ +public: + /// Register the receiver with a connection. + /** + * @param c Connnection to operate on. + * @param channel Name of the notification to listen for. + */ + notification_receiver(connection &c, std::string_view channel); + /// Register the receiver with a connection. + notification_receiver(notification_receiver const &) = delete; + /// Register the receiver with a connection. + notification_receiver &operator=(notification_receiver const &) = delete; + /// Deregister the receiver. + virtual ~notification_receiver(); + + /// The channel that this receiver listens on. + [[nodiscard]] std::string const &channel() const & { return m_channel; } + + // TODO: Change API to take payload as zview instead of string ref. + /// Overridable: action to invoke when notification arrives. + /** + * @param payload An optional string that may have been passed to the NOTIFY + * command. + * @param backend_pid Process ID of the database backend process that served + * our connection when the notification arrived. The actual process ID + * behind the connection may have changed by the time this method is called. + */ + virtual void operator()(std::string const &payload, int backend_pid) = 0; + +protected: + connection &conn() const noexcept { return m_conn; } + +private: + connection &m_conn; + std::string m_channel; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/params b/ext/libpqxx-7.7.3/include/pqxx/params new file mode 100644 index 000000000..4098782aa --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/params @@ -0,0 +1,8 @@ +/** Helper classes for passing statement parameters. + * + * Use these for prepared statements and parameterised statements. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/params.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/params.hxx b/ext/libpqxx-7.7.3/include/pqxx/params.hxx new file mode 100644 index 000000000..2d29cdfed --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/params.hxx @@ -0,0 +1,383 @@ +/* Helpers for prepared statements and parameterised statements. + * + * See the connection class for more about such statements. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_PARAMS +#define PQXX_H_PARAMS + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/statement_parameters.hxx" +#include "pqxx/types.hxx" + + +/// @deprecated The new @ref params class replaces all of this. +namespace pqxx::prepare +{ +/// Pass a number of statement parameters only known at runtime. +/** @deprecated Use @ref params instead. + * + * When you call any of the `exec_params` functions, the number of arguments + * is normally known at compile time. This helper function supports the case + * where it is not. + * + * Use this function to pass a variable number of parameters, based on a + * sequence ranging from `begin` to `end` exclusively. + * + * The technique combines with the regular static parameters. You can use it + * to insert dynamic parameter lists in any place, or places, among the call's + * parameters. You can even insert multiple dynamic sequences. + * + * @param begin A pointer or iterator for iterating parameters. + * @param end A pointer or iterator for iterating parameters. + * @return An object representing the parameters. + */ +template +[[deprecated("Use the params class instead.")]] constexpr inline auto +make_dynamic_params(IT begin, IT end) +{ + return pqxx::internal::dynamic_params(begin, end); +} + + +/// Pass a number of statement parameters only known at runtime. +/** @deprecated Use @ref params instead. + * + * When you call any of the `exec_params` functions, the number of arguments + * is normally known at compile time. This helper function supports the case + * where it is not. + * + * Use this function to pass a variable number of parameters, based on a + * container of parameter values. + * + * The technique combines with the regular static parameters. You can use it + * to insert dynamic parameter lists in any place, or places, among the call's + * parameters. You can even insert multiple dynamic containers. + * + * @param container A container of parameter values. + * @return An object representing the parameters. + */ +template +[[deprecated("Use the params class instead.")]] constexpr inline auto +make_dynamic_params(C const &container) +{ + using IT = typename C::const_iterator; +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return pqxx::internal::dynamic_params{container}; +#include "pqxx/internal/ignore-deprecated-post.hxx" +} + + +/// Pass a number of statement parameters only known at runtime. +/** @deprecated Use @ref params instead. + * + * When you call any of the `exec_params` functions, the number of arguments + * is normally known at compile time. This helper function supports the case + * where it is not. + * + * Use this function to pass a variable number of parameters, based on a + * container of parameter values. + * + * The technique combines with the regular static parameters. You can use it + * to insert dynamic parameter lists in any place, or places, among the call's + * parameters. You can even insert multiple dynamic containers. + * + * @param container A container of parameter values. + * @param accessor For each parameter `p`, pass `accessor(p)`. + * @return An object representing the parameters. + */ +template +[[deprecated("Use the params class instead.")]] constexpr inline auto +make_dynamic_params(C &container, ACCESSOR accessor) +{ + using IT = decltype(std::begin(container)); +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return pqxx::internal::dynamic_params{container, accessor}; +#include "pqxx/internal/ignore-deprecated-post.hxx" +} +} // namespace pqxx::prepare + + +namespace pqxx +{ +/// Generate parameter placeholders for use in an SQL statement. +/** When you want to pass parameters to a prepared statement or a parameterised + * statement, you insert placeholders into the SQL. During invocation, the + * database replaces those with the respective parameter values you passed. + * + * The placeholders look like `$1` (for the first parameter value), `$2` (for + * the second), and so on. You can just write those directly in your + * statement. But for those rare cases where it becomes difficult to track + * which number a placeholder should have, you can use a `placeholders` object + * to count and generate them in order. + */ +template class placeholders +{ +public: + /// Maximum number of parameters we support. + static inline constexpr unsigned int max_params{ + (std::numeric_limits::max)()}; + + placeholders() + { + static constexpr auto initial{"$1\0"sv}; + initial.copy(std::data(m_buf), std::size(initial)); + } + + /// Read an ephemeral version of the current placeholder text. + /** @warning Changing the current placeholder number will overwrite this. + * Use the view immediately, or lose it. + */ + constexpr zview view() const &noexcept + { + return zview{std::data(m_buf), m_len}; + } + + /// Read the current placeholder text, as a `std::string`. + /** This will be slightly slower than converting to a `zview`. With most + * C++ implementations however, until you get into ridiculous numbers of + * parameters, the string will benefit from the Short String Optimization, or + * SSO. + */ + std::string get() const { return std::string(std::data(m_buf), m_len); } + + /// Move on to the next parameter. + void next() & + { + if (m_current >= max_params) + throw range_error{pqxx::internal::concat( + "Too many parameters in one statement: limit is ", max_params, ".")}; + ++m_current; + if (m_current % 10 == 0) + { + // Carry the 1. Don't get too clever for this relatively rare + // case, just rewrite the entire number. Leave the $ in place + // though. + char *const data{std::data(m_buf)}; + char *const end{string_traits::into_buf( + data + 1, data + std::size(m_buf), m_current)}; + // (Subtract because we don't include the trailing zero.) + m_len = check_cast(end - data, "placeholders counter") - 1; + } + else + { + PQXX_LIKELY + // Shortcut for the common case: just increment that last digit. + ++m_buf[m_len - 1]; + } + } + + /// Return the current placeholder number. The initial placeholder is 1. + COUNTER count() const noexcept { return m_current; } + +private: + /// Current placeholder number. Starts at 1. + COUNTER m_current = 1; + + /// Length of the current placeholder string, not including trailing zero. + COUNTER m_len = 2; + + /// Text buffer where we render the placeholders, with a trailing zero. + /** We keep reusing this for every subsequent placeholder, just because we + * don't like string allocations. + * + * Maximum length is the maximum base-10 digits that COUNTER can fully + * represent, plus 1 more for the extra digit that it can only partially + * fill up, plus room for the dollar sign and the trailing zero. + */ + std::array::digits10 + 3> m_buf; +}; + + +/// Build a parameter list for a parameterised or prepared statement. +/** When calling a parameterised statement or a prepared statement, you can + * pass parameters into the statement directly in the invocation, as + * additional arguments to `exec_prepared` or `exec_params`. But in + * complex cases, sometimes that's just not convenient. + * + * In those situations, you can create a `params` and append your parameters + * into that, one by one. Then you pass the `params` to `exec_prepared` or + * `exec_params`. + * + * Combinations also work: if you have a `params` containing a string + * parameter, and you call `exec_params` with an `int` argument followed by + * your `params`, you'll be passing the `int` as the first parameter and + * the string as the second. You can even insert a `params` in a `params`, + * or pass two `params` objects to a statement. + */ +class PQXX_LIBEXPORT params +{ +public: + params() = default; + + /// Pre-populate a `params` with `args`. Feel free to add more later. + template constexpr params(Args &&...args) + { + reserve(sizeof...(args)); + append_pack(std::forward(args)...); + } + + /// Pre-allocate room for at least `n` parameters. + /** This is not needed, but it may improve efficiency. + * + * Reserve space if you're going to add parameters individually, and you've + * got some idea of how many there are going to be. It may save some + * memory re-allocations. + */ + void reserve(std::size_t n) &; + + // C++20: constexpr. + /// Get the number of parameters currently in this `params`. + [[nodiscard]] auto size() const noexcept { return m_params.size(); } + + // C++20: Use the vector's ssize() directly and go noexcept+constexpr. + /// Get the number of parameters (signed). + /** Unlike `size()`, this is not yet `noexcept`. That's because C++17's + * `std::vector` does not have a `ssize()` member function. These member + * functions are `noexcept`, but `std::size()` and `std::ssize()` are + * not. + */ + [[nodiscard]] auto ssize() const { return pqxx::internal::ssize(m_params); } + + /// Append a null value. + void append() &; + + /// Append a non-null zview parameter. + /** The underlying data must stay valid for as long as the `params` + * remains active. + */ + void append(zview) &; + + /// Append a non-null string parameter. + /** Copies the underlying data into internal storage. For best efficiency, + * use the @ref zview variant if you can, or `std::move()` + */ + void append(std::string const &) &; + + /// Append a non-null string parameter. + void append(std::string &&) &; + + /// Append a non-null binary parameter. + /** The underlying data must stay valid for as long as the `params` + * remains active. + */ + void append(std::basic_string_view) &; + + /// Append a non-null binary parameter. + /** Copies the underlying data into internal storage. For best efficiency, + * use the `std::basic_string_view` variant if you can, or + * `std::move()`. + */ + void append(std::basic_string const &) &; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Append a non-null binary parameter. + /** The `data` object must stay in place and unchanged, for as long as the + * `params` remains active. + */ + template void append(DATA const &data) & + { + append( + std::basic_string_view{std::data(data), std::size(data)}); + } +#endif // PQXX_HAVE_CONCEPTS + + /// Append a non-null binary parameter. + void append(std::basic_string &&) &; + + /// @deprecated Append binarystring parameter. + /** The binarystring must stay valid for as long as the `params` remains + * active. + */ + void append(binarystring const &value) &; + + /// Append all parameters from value. + template + void append(pqxx::internal::dynamic_params const &value) & + { + for (auto ¶m : value) append(value.access(param)); + } + + void append(params const &value) &; + + void append(params &&value) &; + + /// Append a non-null parameter, converting it to its string + /// representation. + template void append(TYPE const &value) & + { + // TODO: Pool storage for multiple string conversions in one buffer? + if constexpr (nullness>::always_null) + { + ignore_unused(value); + m_params.emplace_back(); + } + else if (is_null(value)) + { + m_params.emplace_back(); + } + else + { + m_params.emplace_back(entry{to_string(value)}); + } + } + + /// Append all elements of `range` as parameters. + template void append_multi(RANGE const &range) & + { +#if defined(PQXX_HAVE_CONCEPTS) + if constexpr (std::ranges::sized_range) + reserve(std::size(*this) + std::size(range)); +#endif + for (auto &value : range) append(value); + } + + /// For internal use: Generate a `params` object for use in calls. + /** The params object encapsulates the pointers which we will need to pass + * to libpq when calling a parameterised or prepared statement. + * + * The pointers in the params will refer to storage owned by either the + * params object, or the caller. This is not a problem because a + * `c_params` object is guaranteed to live only while the call is going on. + * As soon as we climb back out of that call tree, we're done with that + * data. + */ + pqxx::internal::c_params make_c_params() const; + +private: + /// Recursively append a pack of params. + template + void append_pack(Arg &&arg, More &&...args) + { + this->append(std::forward(arg)); + // Recurse for remaining args. + append_pack(std::forward(args)...); + } + + /// Terminating case: append an empty parameter pack. It's not hard BTW. + constexpr void append_pack() noexcept {} + + // The way we store a parameter depends on whether it's binary or text + // (most types are text), and whether we're responsible for storing the + // contents. + using entry = std::variant< + std::nullptr_t, zview, std::string, std::basic_string_view, + std::basic_string>; + std::vector m_params; + + static constexpr std::string_view s_overflow{ + "Statement parameter length overflow."sv}; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/pipeline b/ext/libpqxx-7.7.3/include/pqxx/pipeline new file mode 100644 index 000000000..bf828843a --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/pipeline @@ -0,0 +1,8 @@ +/** pqxx::pipeline class. + * + * Throughput-optimized query interface. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/pipeline.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/pipeline.hxx b/ext/libpqxx-7.7.3/include/pqxx/pipeline.hxx new file mode 100644 index 000000000..049dcdd58 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/pipeline.hxx @@ -0,0 +1,237 @@ +/* Definition of the pqxx::pipeline class. + * + * Throughput-optimized mechanism for executing queries. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/pipeline instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_PIPELINE +#define PQXX_H_PIPELINE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/transaction_base.hxx" + + +namespace pqxx +{ +// TODO: libpq 14 introduced a similar "pipeline mode." Can we use that? + +/// Processes several queries in FIFO manner, optimized for high throughput. +/** Use a pipeline if you want to keep doing useful work while your queries are + * executing. Result retrieval is decoupled from execution request; queries + * "go in at the front" and results "come out the back." + * + * Actually, you can retrieve the results in any order if you want, but it may + * lead to surprising "time travel" effects if any of the queries fails. In + * particular, syntax errors in the queries can confuse things and show up too + * early in the stream of results. + * + * Generally, if any of the queries fails, it will throw an exception at the + * point where you request its result. But it may happen earlier, especially + * if you request results out of chronological order. + * + * @warning While a pipeline is active, you cannot execute queries, open + * streams, etc. on the same transaction. A transaction can have at most one + * object of a type derived from @ref pqxx::transaction_focus active on it at a + * time. + */ +class PQXX_LIBEXPORT pipeline : public transaction_focus +{ +public: + /// Identifying numbers for queries. + using query_id = long; + + pipeline(pipeline const &) = delete; + pipeline &operator=(pipeline const &) = delete; + + /// Start a pipeline. + explicit pipeline(transaction_base &t) : transaction_focus{t, s_classname} + { + init(); + } + /// Start a pipeline. Assign it a name, for more helpful error messages. + pipeline(transaction_base &t, std::string_view tname) : + transaction_focus{t, s_classname, tname} + { + init(); + } + + /// Close the pipeline. + ~pipeline() noexcept; + + /// Add query to the pipeline. + /** Queries accumulate in the pipeline, which sends them to the backend in a + * batch separated by semicolons. The queries you insert must not use this + * trick themselves, or the pipeline will get hopelessly confused! + * + * @return Identifier for this query, unique only within this pipeline. + */ + query_id insert(std::string_view) &; + + /// Wait for all ongoing or pending operations to complete, and detach. + /** Detaches from the transaction when done. + * + * This does not produce the queries' results, so it may not report any + * errors which may have occurred in their execution. To be sure that your + * statements succeeded, call @ref retrieve until the pipeline is empty. + */ + void complete(); + + /// Forget all ongoing or pending operations and retrieved results. + /** Queries already sent to the backend may still be completed, depending + * on implementation and timing. + * + * Any error state (unless caused by an internal error) will also be cleared. + * This is mostly useful in a nontransaction, since a backend transaction is + * aborted automatically when an error occurs. + * + * Detaches from the transaction when done. + */ + void flush(); + + /// Cancel ongoing query, if any. + /** May cancel any or all of the queries that have been inserted at this + * point whose results have not yet been retrieved. If the pipeline lives in + * a backend transaction, that transaction may be left in a nonfunctional + * state in which it can only be aborted. + * + * Therefore, either use this function in a nontransaction, or abort the + * transaction after calling it. + */ + void cancel(); + + /// Is result for given query available? + [[nodiscard]] bool is_finished(query_id) const; + + /// Retrieve result for given query. + /** If the query failed for whatever reason, this will throw an exception. + * The function will block if the query has not finished yet. + * @warning If results are retrieved out-of-order, i.e. in a different order + * than the one in which their queries were inserted, errors may "propagate" + * to subsequent queries. + */ + result retrieve(query_id qid) + { + return retrieve(m_queries.find(qid)).second; + } + + /// Retrieve oldest unretrieved result (possibly wait for one). + /** @return The query's identifier and its result set. */ + std::pair retrieve(); + + [[nodiscard]] bool empty() const noexcept { return std::empty(m_queries); } + + /// Set maximum number of queries to retain before issuing them to the + /// backend. + /** The pipeline will perform better if multiple queries are issued at once, + * but retaining queries until the results are needed (as opposed to issuing + * them to the backend immediately) may negate any performance benefits the + * pipeline can offer. + * + * Recommended practice is to set this value no higher than the number of + * queries you intend to insert at a time. + * @param retain_max A nonnegative "retention capacity;" passing zero will + * cause queries to be issued immediately + * @return Old retention capacity + */ + int retain(int retain_max = 2) &; + + + /// Resume retained query emission. Harmless when not needed. + void resume() &; + +private: + struct PQXX_PRIVATE Query + { + explicit Query(std::string_view q) : + query{std::make_shared(q)} + {} + + std::shared_ptr query; + result res; + }; + + using QueryMap = std::map; + + void init(); + void attach(); + void detach(); + + /// Upper bound to query id's. + static constexpr query_id qid_limit() noexcept + { + // Parenthesise this to work around an eternal Visual C++ problem: + // Without the extra parentheses, unless NOMINMAX is defined, the + // preprocessor will mistake this "max" for its annoying built-in macro + // of the same name. + return (std::numeric_limits::max)(); + } + + /// Create new query_id. + PQXX_PRIVATE query_id generate_id(); + + bool have_pending() const noexcept + { + return m_issuedrange.second != m_issuedrange.first; + } + + PQXX_PRIVATE void issue(); + + /// The given query failed; never issue anything beyond that. + void set_error_at(query_id qid) noexcept + { + PQXX_UNLIKELY + if (qid < m_error) + m_error = qid; + } + + /// Throw pqxx::internal_error. + [[noreturn]] PQXX_PRIVATE void internal_error(std::string const &err); + + PQXX_PRIVATE bool obtain_result(bool expect_none = false); + + PQXX_PRIVATE void obtain_dummy(); + PQXX_PRIVATE void get_further_available_results(); + PQXX_PRIVATE void check_end_results(); + + /// Receive any results that happen to be available; it's not urgent. + PQXX_PRIVATE void receive_if_available(); + + /// Receive results, up to stop if possible. + PQXX_PRIVATE void receive(pipeline::QueryMap::const_iterator stop); + std::pair retrieve(pipeline::QueryMap::iterator); + + QueryMap m_queries; + std::pair m_issuedrange; + int m_retain = 0; + int m_num_waiting = 0; + query_id m_q_id = 0; + + /// Is there a "dummy query" pending? + bool m_dummy_pending = false; + + /// Point at which an error occurred; no results beyond it will be available + query_id m_error = qid_limit(); + + /// Encoding. + /** We store this in the object to avoid the risk of exceptions at awkward + * moments. + */ + internal::encoding_group m_encoding; + + static constexpr std::string_view s_classname{"pipeline"}; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/pqxx b/ext/libpqxx-7.7.3/include/pqxx/pqxx new file mode 100644 index 000000000..17a8eaa9c --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/pqxx @@ -0,0 +1,28 @@ +/// Convenience header: include all libpqxx definitions. +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/array.hxx" +#include "pqxx/binarystring.hxx" +#include "pqxx/blob.hxx" +#include "pqxx/connection.hxx" +#include "pqxx/cursor.hxx" +#include "pqxx/errorhandler.hxx" +#include "pqxx/except.hxx" +#include "pqxx/largeobject.hxx" +#include "pqxx/nontransaction.hxx" +#include "pqxx/notification.hxx" +#include "pqxx/params.hxx" +#include "pqxx/pipeline.hxx" +#include "pqxx/prepared_statement.hxx" +#include "pqxx/result.hxx" +#include "pqxx/internal/result_iterator.hxx" +#include "pqxx/internal/result_iter.hxx" +#include "pqxx/robusttransaction.hxx" +#include "pqxx/row.hxx" +#include "pqxx/stream_from.hxx" +#include "pqxx/stream_to.hxx" +#include "pqxx/subtransaction.hxx" +#include "pqxx/transaction.hxx" +#include "pqxx/transactor.hxx" + +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/prepared_statement b/ext/libpqxx-7.7.3/include/pqxx/prepared_statement new file mode 100644 index 000000000..674be7090 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/prepared_statement @@ -0,0 +1,3 @@ +/// @deprecated Include @c instead. + +#include "params.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/prepared_statement.hxx b/ext/libpqxx-7.7.3/include/pqxx/prepared_statement.hxx new file mode 100644 index 000000000..674be7090 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/prepared_statement.hxx @@ -0,0 +1,3 @@ +/// @deprecated Include @c instead. + +#include "params.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/range b/ext/libpqxx-7.7.3/include/pqxx/range new file mode 100644 index 000000000..11985eca4 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/range @@ -0,0 +1,6 @@ +/** Client-side support for SQL range types. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/range.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/range.hxx b/ext/libpqxx-7.7.3/include/pqxx/range.hxx new file mode 100644 index 000000000..dc480e4b7 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/range.hxx @@ -0,0 +1,515 @@ +#ifndef PQXX_H_RANGE +#define PQXX_H_RANGE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/internal/array-composite.hxx" +#include "pqxx/internal/concat.hxx" + +namespace pqxx +{ +/// An _unlimited_ boundary value to a @ref pqxx::range. +/** Use this as a lower or upper bound for a range if the range should extend + * to infinity on that side. + * + * An unlimited boundary is always inclusive of "infinity" values, if the + * range's value type supports them. + */ +struct no_bound +{ + template constexpr bool extends_down_to(TYPE const &) const + { + return true; + } + template constexpr bool extends_up_to(TYPE const &) const + { + return true; + } +}; + + +/// An _inclusive_ boundary value to a @ref pqxx::range. +/** Use this as a lower or upper bound for a range if the range should include + * the value. + */ +template class inclusive_bound +{ +public: + inclusive_bound() = delete; + explicit inclusive_bound(TYPE const &value) : m_value{value} + { + if (is_null(value)) + throw argument_error{"Got null value as an inclusive range bound."}; + } + + [[nodiscard]] constexpr TYPE const &get() const &noexcept { return m_value; } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as a lower bound, include value? + [[nodiscard]] bool extends_down_to(TYPE const &value) const + { + return not(value < m_value); + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as an upper bound, include value? + [[nodiscard]] bool extends_up_to(TYPE const &value) const + { + return not(m_value < value); + } + +private: + TYPE m_value; +}; + + +/// An _exclusive_ boundary value to a @ref pqxx::range. +/** Use this as a lower or upper bound for a range if the range should _not_ + * include the value. + */ +template class exclusive_bound +{ +public: + exclusive_bound() = delete; + explicit exclusive_bound(TYPE const &value) : m_value{value} + { + if (is_null(value)) + throw argument_error{"Got null value as an exclusive range bound."}; + } + + [[nodiscard]] constexpr TYPE const &get() const &noexcept { return m_value; } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as a lower bound, include value? + [[nodiscard]] bool extends_down_to(TYPE const &value) const + { + return m_value < value; + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as an upper bound, include value? + [[nodiscard]] bool extends_up_to(TYPE const &value) const + { + return value < m_value; + } + +private: + TYPE m_value; +}; + + +/// A range boundary value. +/** A range bound is either no bound at all; or an inclusive bound; or an + * exclusive bound. Pass one of the three to the constructor. + */ +template class range_bound +{ +public: + range_bound() = delete; + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(no_bound) : m_bound{} {} + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(inclusive_bound const &bound) : m_bound{bound} {} + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(exclusive_bound const &bound) : m_bound{bound} {} + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(range_bound const &) = default; + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(range_bound &&) = default; + + // TODO: constexpr and/or noexcept if underlying operators support it. + bool operator==(range_bound const &rhs) const + { + if (this->is_limited()) + return ( + rhs.is_limited() and (this->is_inclusive() == rhs.is_inclusive()) and + (*this->value() == *rhs.value())); + else + return not rhs.is_limited(); + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + bool operator!=(range_bound const &rhs) const { return not(*this == rhs); } + range_bound &operator=(range_bound const &) = default; + range_bound &operator=(range_bound &&) = default; + + /// Is this a finite bound? + constexpr bool is_limited() const noexcept + { + return not std::holds_alternative(m_bound); + } + + /// Is this boundary an inclusive one? + constexpr bool is_inclusive() const noexcept + { + return std::holds_alternative>(m_bound); + } + + /// Is this boundary an exclusive one? + constexpr bool is_exclusive() const noexcept + { + return std::holds_alternative>(m_bound); + } + + // TODO: constexpr/noexcept if underlying function supports it. + /// Would this bound, as a lower bound, include `value`? + bool extends_down_to(TYPE const &value) const + { + return std::visit( + [&value](auto const &bound) { return bound.extends_down_to(value); }, + m_bound); + } + + // TODO: constexpr/noexcept if underlying function supports it. + /// Would this bound, as an upper bound, include `value`? + bool extends_up_to(TYPE const &value) const + { + return std::visit( + [&value](auto const &bound) { return bound.extends_up_to(value); }, + m_bound); + } + + /// Return bound value, or `nullptr` if it's not limited. + [[nodiscard]] constexpr TYPE const *value() const &noexcept + { + return std::visit( + [](auto const &bound) noexcept { + using bound_t = std::decay_t; + if constexpr (std::is_same_v) + return static_cast(nullptr); + else + return &bound.get(); + }, + m_bound); + } + +private: + std::variant, exclusive_bound> m_bound; +}; + + +// C++20: Concepts for comparisons, construction, etc. +/// A C++ equivalent to PostgreSQL's range types. +/** You can use this as a client-side representation of a "range" in SQL. + * + * PostgreSQL defines several range types, differing in the data type over + * which they range. You can also define your own range types. + * + * Usually you'll want the server to deal with ranges. But on occasions where + * you need to work with them client-side, you may want to use @ref + * pqxx::range. (In cases where all you do is pass them along to the server + * though, it's not worth the complexity. In that case you might as well treat + * ranges as just strings.) + * + * For documentation on PostgreSQL's range types, see: + * https://www.postgresql.org/docs/current/rangetypes.html + * + * The value type must be copyable and default-constructible, and support the + * less-than (`<`) and equals (`==`) comparisons. Value initialisation must + * produce a consistent value. + */ +template class range +{ +public: + /// Create a range. + /** For each of the two bounds, pass a @ref no_bound, @ref inclusive_bound, + * or + * @ref exclusive_bound. + */ + range(range_bound lower, range_bound upper) : + m_lower{lower}, m_upper{upper} + { + if ( + lower.is_limited() and upper.is_limited() and + (*upper.value() < *lower.value())) + throw range_error{internal::concat( + "Range's lower bound (", *lower.value(), + ") is greater than its upper bound (", *upper.value(), ").")}; + } + + // TODO: constexpr and/or noexcept if underlying constructor supports it. + /// Create an empty range. + /** SQL has a separate literal to denote an empty range, but any range which + * encompasses no values is an empty range. + */ + range() : + m_lower{exclusive_bound{TYPE{}}}, + m_upper{exclusive_bound{TYPE{}}} + {} + + // TODO: constexpr and/or noexcept if underlying operators support it. + bool operator==(range const &rhs) const + { + return (this->lower_bound() == rhs.lower_bound() and + this->upper_bound() == rhs.upper_bound()) or + (this->empty() and rhs.empty()); + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + bool operator!=(range const &rhs) const { return !(*this == rhs); } + + range(range const &) = default; + range(range &&) = default; + range &operator=(range const &) = default; + range &operator=(range &&) = default; + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Is this range clearly empty? + /** An empty range encompasses no values. + * + * It is possible to "fool" this. For example, if your range is of an + * integer type and has exclusive bounds of 0 and 1, it encompasses no values + * but its `empty()` will return false. The PostgreSQL implementation, by + * contrast, will notice that it is empty. Similar things can happen for + * floating-point types, but with more subtleties and edge cases. + */ + bool empty() const + { + return (m_lower.is_exclusive() or m_upper.is_exclusive()) and + m_lower.is_limited() and m_upper.is_limited() and + not(*m_lower.value() < *m_upper.value()); + } + + // TODO: constexpr and/or noexcept if underlying functions support it. + /// Does this range encompass `value`? + bool contains(TYPE value) const + { + return m_lower.extends_down_to(value) and m_upper.extends_up_to(value); + } + + // TODO: constexpr and/or noexcept if underlying operators support it. + /// Does this range encompass all of `other`? + /** This function is not particularly smart. It does not know, for example, + * that integer ranges `[0,9]` and `[0,10)` contain the same values. + */ + bool contains(range const &other) const + { + return (*this & other) == other; + } + + [[nodiscard]] constexpr range_bound const & + lower_bound() const &noexcept + { + return m_lower; + } + [[nodiscard]] constexpr range_bound const & + upper_bound() const &noexcept + { + return m_upper; + } + + // TODO: constexpr and/or noexcept if underlying operators support it. + /// Intersection of two ranges. + /** Returns a range describing those values which are in both ranges. + */ + range operator&(range const &other) const + { + range_bound lower{no_bound{}}; + if (not this->lower_bound().is_limited()) + lower = other.lower_bound(); + else if (not other.lower_bound().is_limited()) + lower = this->lower_bound(); + else if (*this->lower_bound().value() < *other.lower_bound().value()) + lower = other.lower_bound(); + else if (*other.lower_bound().value() < *this->lower_bound().value()) + lower = this->lower_bound(); + else if (this->lower_bound().is_exclusive()) + lower = this->lower_bound(); + else + lower = other.lower_bound(); + + range_bound upper{no_bound{}}; + if (not this->upper_bound().is_limited()) + upper = other.upper_bound(); + else if (not other.upper_bound().is_limited()) + upper = this->upper_bound(); + else if (*other.upper_bound().value() < *this->upper_bound().value()) + upper = other.upper_bound(); + else if (*this->upper_bound().value() < *other.upper_bound().value()) + upper = this->upper_bound(); + else if (this->upper_bound().is_exclusive()) + upper = this->upper_bound(); + else + upper = other.upper_bound(); + + if ( + lower.is_limited() and upper.is_limited() and + (*upper.value() < *lower.value())) + return {}; + else + return {lower, upper}; + } + + /// Convert to another base type. + template operator range() const + { + range_bound lower{no_bound{}}, upper{no_bound{}}; + if (lower_bound().is_inclusive()) + lower = inclusive_bound{*lower_bound().value()}; + else if (lower_bound().is_exclusive()) + lower = exclusive_bound{*lower_bound().value()}; + + if (upper_bound().is_inclusive()) + upper = inclusive_bound{*upper_bound().value()}; + else if (upper_bound().is_exclusive()) + upper = exclusive_bound{*upper_bound().value()}; + + return {lower, upper}; + } + +private: + range_bound m_lower, m_upper; +}; + + +/// String conversions for a @ref range type. +/** Conversion assumes that either your client encoding is UTF-8, or the values + * are pure ASCII. + */ +template struct string_traits> +{ + [[nodiscard]] static inline zview + to_buf(char *begin, char *end, range const &value) + { + return generic_to_buf(begin, end, value); + } + + static inline char * + into_buf(char *begin, char *end, range const &value) + { + if (value.empty()) + { + if ((end - begin) <= internal::ssize(s_empty)) + throw conversion_overrun{s_overrun.c_str()}; + char *here = begin + s_empty.copy(begin, std::size(s_empty)); + *here++ = '\0'; + return here; + } + else + { + if (end - begin < 4) + throw conversion_overrun{s_overrun.c_str()}; + char *here = begin; + *here++ = + (static_cast(value.lower_bound().is_inclusive() ? '[' : '(')); + TYPE const *lower{value.lower_bound().value()}; + // Convert bound (but go back to overwrite that trailing zero). + if (lower != nullptr) + here = string_traits::into_buf(here, end, *lower) - 1; + *here++ = ','; + TYPE const *upper{value.upper_bound().value()}; + // Convert bound (but go back to overwrite that trailing zero). + if (upper != nullptr) + here = string_traits::into_buf(here, end, *upper) - 1; + if ((end - here) < 2) + throw conversion_overrun{s_overrun.c_str()}; + *here++ = + static_cast(value.upper_bound().is_inclusive() ? ']' : ')'); + *here++ = '\0'; + return here; + } + } + + [[nodiscard]] static inline range from_string(std::string_view text) + { + if (std::size(text) < 3) + throw pqxx::conversion_error{err_bad_input(text)}; + bool left_inc{false}; + switch (text[0]) + { + case '[': left_inc = true; break; + + case '(': break; + + case 'e': + case 'E': + if ( + (std::size(text) != std::size(s_empty)) or + (text[1] != 'm' and text[1] != 'M') or + (text[2] != 'p' and text[2] != 'P') or + (text[3] != 't' and text[3] != 'T') or + (text[4] != 'y' and text[4] != 'Y')) + throw pqxx::conversion_error{err_bad_input(text)}; + return {}; + break; + + default: throw pqxx::conversion_error{err_bad_input(text)}; + } + + auto scan{internal::get_glyph_scanner(internal::encoding_group::UTF8)}; + // The field parser uses this to track which field it's parsing, and + // when not to expect a field separator. + std::size_t index{0}; + // The last field we expect to see. + static constexpr std::size_t last{1}; + // Current parsing position. We skip the opening parenthesis or bracket. + std::size_t pos{1}; + // The string may leave out either bound to indicate that it's unlimited. + std::optional lower, upper; + // We reuse the same field parser we use for composite values and arrays. + internal::parse_composite_field(index, text, pos, lower, scan, last); + internal::parse_composite_field(index, text, pos, upper, scan, last); + + // We need one more character: the closing parenthesis or bracket. + if (pos != std::size(text)) + throw pqxx::conversion_error{err_bad_input(text)}; + char const closing{text[pos - 1]}; + if (closing != ')' and closing != ']') + throw pqxx::conversion_error{err_bad_input(text)}; + bool const right_inc{closing == ']'}; + + range_bound lower_bound{no_bound{}}, upper_bound{no_bound{}}; + if (lower) + { + if (left_inc) + lower_bound = inclusive_bound{*lower}; + else + lower_bound = exclusive_bound{*lower}; + } + if (upper) + { + if (right_inc) + upper_bound = inclusive_bound{*upper}; + else + upper_bound = exclusive_bound{*upper}; + } + + return {lower_bound, upper_bound}; + } + + [[nodiscard]] static inline constexpr std::size_t + size_buffer(range const &value) noexcept + { + TYPE const *lower{value.lower_bound().value()}, + *upper{value.upper_bound().value()}; + std::size_t const lsz{ + lower == nullptr ? 0 : string_traits::size_buffer(*lower) - 1}, + usz{upper == nullptr ? 0 : string_traits::size_buffer(*upper) - 1}; + + if (value.empty()) + return std::size(s_empty) + 1; + else + return 1 + lsz + 1 + usz + 2; + } + +private: + static constexpr zview s_empty{"empty"_zv}; + static constexpr auto s_overrun{"Not enough space in buffer for range."_zv}; + + /// Compose error message for invalid range input. + static std::string err_bad_input(std::string_view text) + { + return internal::concat("Invalid range input: '", text, "'"); + } +}; + + +/// A range type does not have an innate null value. +template struct nullness> : no_null> +{}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/result b/ext/libpqxx-7.7.3/include/pqxx/result new file mode 100644 index 000000000..523394b72 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/result @@ -0,0 +1,16 @@ +/** pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/result.hxx" + +// Now include some types which depend on result, but which the user will +// expect to see defined after including this header. +#include "pqxx/internal/result_iterator.hxx" +#include "pqxx/field.hxx" +#include "pqxx/internal/result_iter.hxx" + +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/result.hxx b/ext/libpqxx-7.7.3/include/pqxx/result.hxx new file mode 100644 index 000000000..6c41cc096 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/result.hxx @@ -0,0 +1,335 @@ +/* Definitions for the pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/result instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_RESULT +#define PQXX_H_RESULT + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include + +#include "pqxx/except.hxx" +#include "pqxx/types.hxx" +#include "pqxx/util.hxx" +#include "pqxx/zview.hxx" + +#include "pqxx/internal/encodings.hxx" + + +namespace pqxx::internal +{ +// TODO: Make noexcept (but breaks ABI). +PQXX_LIBEXPORT void clear_result(pq::PGresult const *); +} // namespace pqxx::internal + + +namespace pqxx::internal::gate +{ +class result_connection; +class result_creation; +class result_pipeline; +class result_row; +class result_sql_cursor; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +/// Result set containing data returned by a query or command. +/** This behaves as a container (as defined by the C++ standard library) and + * provides random access const iterators to iterate over its rows. You can + * also access a row by indexing a `result R` by the row's zero-based + * number: + * + * + * for (result::size_type i=0; i < std::size(R); ++i) Process(R[i]); + * + * + * Result sets in libpqxx are lightweight, reference-counted wrapper objects + * which are relatively small and cheap to copy. Think of a result object as + * a "smart pointer" to an underlying result set. + * + * @warning The result set that a result object points to is not thread-safe. + * If you copy a result object, it still refers to the same underlying result + * set. So never copy, destroy, query, or otherwise access a result while + * another thread may be copying, destroying, querying, or otherwise accessing + * the same result set--even if it is doing so through a different result + * object! + */ +class PQXX_LIBEXPORT result +{ +public: + using size_type = result_size_type; + using difference_type = result_difference_type; + using reference = row; + using const_iterator = const_result_iterator; + using pointer = const_iterator; + using iterator = const_iterator; + using const_reverse_iterator = const_reverse_result_iterator; + using reverse_iterator = const_reverse_iterator; + + result() noexcept : + m_data{make_data_pointer()}, + m_query{}, + m_encoding{internal::encoding_group::MONOBYTE} + {} + + result(result const &rhs) noexcept = default; + result(result &&rhs) noexcept = default; + + /// Assign one result to another. + /** Copying results is cheap: it copies only smart pointers, but the actual + * data stays in the same place. + */ + result &operator=(result const &rhs) noexcept = default; + + /// Assign one result to another, invaliding the old one. + result &operator=(result &&rhs) noexcept = default; + + /** + * @name Comparisons + * + * You can compare results for equality. Beware: this is a very strict, + * dumb comparison. The smallest difference between two results (such as a + * string "Foo" versus a string "foo") will make them unequal. + */ + //@{ + /// Compare two results for equality. + [[nodiscard]] bool operator==(result const &) const noexcept; + /// Compare two results for inequality. + [[nodiscard]] bool operator!=(result const &rhs) const noexcept + { + return not operator==(rhs); + } + //@} + + /// Iterate rows, reading them directly into a tuple of "TYPE...". + /** Converts the fields to values of the given respective types. + * + * Use this only with a ranged "for" loop. The iteration produces + * std::tuple which you can "unpack" to a series of `auto` + * variables. + */ + template auto iter() const; + + [[nodiscard]] const_reverse_iterator rbegin() const; + [[nodiscard]] const_reverse_iterator crbegin() const; + [[nodiscard]] const_reverse_iterator rend() const; + [[nodiscard]] const_reverse_iterator crend() const; + + [[nodiscard]] const_iterator begin() const noexcept; + [[nodiscard]] const_iterator cbegin() const noexcept; + [[nodiscard]] inline const_iterator end() const noexcept; + [[nodiscard]] inline const_iterator cend() const noexcept; + + [[nodiscard]] reference front() const noexcept; + [[nodiscard]] reference back() const noexcept; + + [[nodiscard]] PQXX_PURE size_type size() const noexcept; + [[nodiscard]] PQXX_PURE bool empty() const noexcept; + [[nodiscard]] size_type capacity() const noexcept { return size(); } + + /// Exchange two `result` values in an exception-safe manner. + /** If the swap fails, the two values will be exactly as they were before. + * + * The swap is not necessarily thread-safe. + */ + void swap(result &) noexcept; + + /// Index a row by number. + /** This returns a @ref row object. Generally you should not keep the row + * around as a variable, but if you do, make sure that your variable is a + * `row`, not a `row&`. + */ + [[nodiscard]] row operator[](size_type i) const noexcept; + +#if defined(PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT) + // TODO: If C++23 will let us, also accept string for the column. + [[nodiscard]] field + operator[](size_type row_num, row_size_type col_num) const noexcept; +#endif + + /// Index a row by number, but check that the row number is valid. + row at(size_type) const; + + /// Index a field by row number and column number. + field at(size_type, row_size_type) const; + + /// Let go of the result's data. + /** Use this if you need to deallocate the result data earlier than you can + * destroy the `result` object itself. + * + * Multiple `result` objects can refer to the same set of underlying data. + * The underlying data will be deallocated once all `result` objects that + * refer to it are cleared or destroyed. + */ + void clear() noexcept + { + m_data.reset(); + m_query = nullptr; + } + + /** + * @name Column information + */ + //@{ + /// Number of columns in result. + [[nodiscard]] PQXX_PURE row_size_type columns() const noexcept; + + /// Number of given column (throws exception if it doesn't exist). + [[nodiscard]] row_size_type column_number(zview name) const; + + /// Name of column with this number (throws exception if it doesn't exist) + [[nodiscard]] char const *column_name(row_size_type number) const &; + + /// Return column's type, as an OID from the system catalogue. + [[nodiscard]] oid column_type(row_size_type col_num) const; + + /// Return column's type, as an OID from the system catalogue. + [[nodiscard]] oid column_type(zview col_name) const + { + return column_type(column_number(col_name)); + } + + /// What table did this column come from? + [[nodiscard]] oid column_table(row_size_type col_num) const; + + /// What table did this column come from? + [[nodiscard]] oid column_table(zview col_name) const + { + return column_table(column_number(col_name)); + } + + /// What column in its table did this column come from? + [[nodiscard]] row_size_type table_column(row_size_type col_num) const; + + /// What column in its table did this column come from? + [[nodiscard]] row_size_type table_column(zview col_name) const + { + return table_column(column_number(col_name)); + } + //@} + + /// Query that produced this result, if available (empty string otherwise) + [[nodiscard]] PQXX_PURE std::string const &query() const &noexcept; + + /// If command was an `INSERT` of 1 row, return oid of the inserted row. + /** @return Identifier of inserted row if exactly one row was inserted, or + * @ref oid_none otherwise. + */ + [[nodiscard]] PQXX_PURE oid inserted_oid() const; + + /// If command was `INSERT`, `UPDATE`, or `DELETE`: number of affected rows. + /** @return Number of affected rows if last command was `INSERT`, `UPDATE`, + * or `DELETE`; zero for all other commands. + */ + [[nodiscard]] PQXX_PURE size_type affected_rows() const; + + // C++20: Concept like std::invocable, but without specifying param types. + /// Run `func` on each row, passing the row's fields as parameters. + /** Goes through the rows from first to last. You provide a callable `func`. + * + * For each row in the `result`, `for_each` will call `func`. It converts + * the row's fields to the types of `func`'s parameters, and pass them to + * `func`. + * + * (Therefore `func` must have a _single_ signature. It can't be a generic + * lambda, or an object of a class with multiple overloaded function call + * operators. Otherwise, `for_each` will have no way to detect a parameter + * list without ambiguity.) + * + * If any of your parameter types is `std::string_view`, it refers to the + * underlying storage of this `result`. + * + * If any of your parameter types is a reference type, its argument will + * refer to a temporary value which only lives for the duration of that + * single invocation to `func`. If the reference is an lvalue reference, it + * must be `const`. + * + * For example, this queries employee names and salaries from the database + * and prints how much each would like to earn instead: + * ```cxx + * tx.exec("SELECT name, salary FROM employee").for_each( + * [](std::string_view name, float salary){ + * std::cout << name << " would like " << salary * 2 << ".\n"; + * }) + * ``` + * + * If `func` throws an exception, processing stops at that point and + * propagates the exception. + * + * @throws usage_error if `func`'s number of parameters does not match the + * number of columns in this result. + */ + template inline void for_each(CALLABLE &&func) const; + +private: + using data_pointer = std::shared_ptr; + + /// Underlying libpq result set. + data_pointer m_data; + + /// Factory for data_pointer. + static data_pointer + make_data_pointer(internal::pq::PGresult const *res = nullptr) noexcept + { + return {res, internal::clear_result}; + } + + friend class pqxx::internal::gate::result_pipeline; + PQXX_PURE std::shared_ptr query_ptr() const noexcept + { + return m_query; + } + + /// Query string. + std::shared_ptr m_query; + + internal::encoding_group m_encoding; + + static std::string const s_empty_string; + + friend class pqxx::field; + // TODO: noexcept. Breaks ABI. + PQXX_PURE char const *get_value(size_type row, row_size_type col) const; + // TODO: noexcept. Breaks ABI. + PQXX_PURE bool get_is_null(size_type row, row_size_type col) const; + PQXX_PURE + field_size_type get_length(size_type, row_size_type) const noexcept; + + friend class pqxx::internal::gate::result_creation; + result( + internal::pq::PGresult *rhs, std::shared_ptr query, + internal::encoding_group enc); + + PQXX_PRIVATE void check_status(std::string_view desc = ""sv) const; + + friend class pqxx::internal::gate::result_connection; + friend class pqxx::internal::gate::result_row; + bool operator!() const noexcept { return m_data.get() == nullptr; } + operator bool() const noexcept { return m_data.get() != nullptr; } + + [[noreturn]] PQXX_PRIVATE void + throw_sql_error(std::string const &Err, std::string const &Query) const; + PQXX_PRIVATE PQXX_PURE int errorposition() const; + PQXX_PRIVATE std::string status_error() const; + + friend class pqxx::internal::gate::result_sql_cursor; + PQXX_PURE char const *cmd_status() const noexcept; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/robusttransaction b/ext/libpqxx-7.7.3/include/pqxx/robusttransaction new file mode 100644 index 000000000..04b71d7cc --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/robusttransaction @@ -0,0 +1,8 @@ +/** pqxx::robusttransaction class. + * + * pqxx::robusttransaction is a slower but safer transaction class. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/robusttransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/robusttransaction.hxx b/ext/libpqxx-7.7.3/include/pqxx/robusttransaction.hxx new file mode 100644 index 000000000..faf6dbf5e --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/robusttransaction.hxx @@ -0,0 +1,120 @@ +/* Definition of the pqxx::robusttransaction class. + * + * pqxx::robusttransaction is a slower but safer transaction class. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/robusttransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ROBUSTTRANSACTION +#define PQXX_H_ROBUSTTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/dbtransaction.hxx" + +namespace pqxx::internal +{ +/// Helper base class for the @ref robusttransaction class template. +class PQXX_LIBEXPORT PQXX_NOVTABLE basic_robusttransaction + : public dbtransaction +{ +public: + virtual ~basic_robusttransaction() override = 0; + +protected: + basic_robusttransaction( + connection &c, zview begin_command, std::string_view tname); + basic_robusttransaction(connection &c, zview begin_command); + +private: + using IDType = unsigned long; + + std::string m_conn_string; + std::string m_xid; + int m_backendpid = -1; + + void init(zview begin_command); + + // @warning This function will become `final`. + virtual void do_commit() override; +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/** + * @ingroup transactions + * + * @{ + */ + +/// Slightly slower, better-fortified version of transaction. +/** Requires PostgreSQL 10 or better. + * + * robusttransaction is similar to transaction, but spends more time and effort + * to deal with the hopefully rare case that the connection to the backend is + * lost just while it's trying to commit. In such cases, the client does not + * know whether the backend (on the other side of the broken connection) + * managed to commit the transaction. + * + * When this happens, robusttransaction tries to reconnect to the database and + * figure out what happened. + * + * This service level was made optional since you may not want to pay the + * overhead where it is not necessary. Certainly the use of this class makes + * no sense for local connections, or for transactions that read the database + * but never modify it, or for noncritical database manipulations. + * + * Besides being slower, it's also more complex. Which means that in practice + * a robusttransaction could actually fail more instead of less often than a + * normal transaction. What robusttransaction tries to achieve is to give you + * certainty, not just be more successful per se. + */ +template +class robusttransaction final : public internal::basic_robusttransaction +{ +public: + /** Create robusttransaction of given name. + * @param c Connection inside which this robusttransaction should live. + * @param tname optional human-readable name for this transaction. + */ + robusttransaction(connection &c, std::string_view tname) : + internal::basic_robusttransaction{ + c, pqxx::internal::begin_cmd, + tname} + {} + + /** Create robusttransaction of given name. + * @param c Connection inside which this robusttransaction should live. + * @param tname optional human-readable name for this transaction. + */ + robusttransaction(connection &c, std::string &&tname) : + internal::basic_robusttransaction{ + c, pqxx::internal::begin_cmd, + std::move(tname)} + {} + + /** Create robusttransaction of given name. + * @param c Connection inside which this robusttransaction should live. + */ + explicit robusttransaction(connection &c) : + internal::basic_robusttransaction{ + c, pqxx::internal::begin_cmd} + {} + + virtual ~robusttransaction() noexcept override { close(); } +}; + +/** + * @} + */ +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/row b/ext/libpqxx-7.7.3/include/pqxx/row new file mode 100644 index 000000000..62a950ac8 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/row @@ -0,0 +1,11 @@ +/** pqxx::row class. + * + * pqxx::row refers to a row in a result. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/result.hxx" +#include "pqxx/row.hxx" + +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/row.hxx b/ext/libpqxx-7.7.3/include/pqxx/row.hxx new file mode 100644 index 000000000..5be5132e3 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/row.hxx @@ -0,0 +1,561 @@ +/* Definitions for the pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/result instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ROW +#define PQXX_H_ROW + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/except.hxx" +#include "pqxx/field.hxx" +#include "pqxx/result.hxx" + +#include "pqxx/internal/concat.hxx" + +namespace pqxx::internal +{ +template class result_iter; +} // namespace pqxx::internal + + +namespace pqxx +{ +/// Reference to one row in a result. +/** A row represents one row (also called a row) in a query result set. + * It also acts as a container mapping column numbers or names to field + * values (see below): + * + * ```cxx + * cout << row["date"].c_str() << ": " << row["name"].c_str() << endl; + * ``` + * + * The row itself acts like a (non-modifyable) container, complete with its + * own const_iterator and const_reverse_iterator. + */ +class PQXX_LIBEXPORT row +{ +public: + using size_type = row_size_type; + using difference_type = row_difference_type; + using const_iterator = const_row_iterator; + using iterator = const_iterator; + using reference = field; + using pointer = const_row_iterator; + using const_reverse_iterator = const_reverse_row_iterator; + using reverse_iterator = const_reverse_iterator; + + row() noexcept = default; + row(row &&) noexcept = default; + row(row const &) noexcept = default; + row &operator=(row const &) noexcept = default; + row &operator=(row &&) noexcept = default; + + /** + * @name Comparison + */ + //@{ + [[nodiscard]] PQXX_PURE bool operator==(row const &) const noexcept; + [[nodiscard]] bool operator!=(row const &rhs) const noexcept + { + return not operator==(rhs); + } + //@} + + [[nodiscard]] const_iterator begin() const noexcept; + [[nodiscard]] const_iterator cbegin() const noexcept; + [[nodiscard]] const_iterator end() const noexcept; + [[nodiscard]] const_iterator cend() const noexcept; + + /** + * @name Field access + */ + //@{ + [[nodiscard]] reference front() const noexcept; + [[nodiscard]] reference back() const noexcept; + + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator rbegin() const; + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator crbegin() const; + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator rend() const; + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator crend() const; + + [[nodiscard]] reference operator[](size_type) const noexcept; + /** Address field by name. + * @warning This is much slower than indexing by number, or iterating. + */ + [[nodiscard]] reference operator[](zview col_name) const; + + reference at(size_type) const; + /** Address field by name. + * @warning This is much slower than indexing by number, or iterating. + */ + reference at(zview col_name) const; + + [[nodiscard]] constexpr size_type size() const noexcept + { + return m_end - m_begin; + } + + [[deprecated("Swap iterators, not rows.")]] void swap(row &) noexcept; + + /// Row number, assuming this is a real row and not end()/rend(). + [[nodiscard]] constexpr result::size_type rownumber() const noexcept + { + return m_index; + } + + /** + * @name Column information + */ + //@{ + /// Number of given column (throws exception if it doesn't exist). + [[nodiscard]] size_type column_number(zview col_name) const; + + /// Return a column's type. + [[nodiscard]] oid column_type(size_type) const; + + /// Return a column's type. + [[nodiscard]] oid column_type(zview col_name) const + { + return column_type(column_number(col_name)); + } + + /// What table did this column come from? + [[nodiscard]] oid column_table(size_type col_num) const; + + /// What table did this column come from? + [[nodiscard]] oid column_table(zview col_name) const + { + return column_table(column_number(col_name)); + } + + /// What column number in its table did this result column come from? + /** A meaningful answer can be given only if the column in question comes + * directly from a column in a table. If the column is computed in any + * other way, a logic_error will be thrown. + * + * @param col_num a zero-based column number in this result set + * @return a zero-based column number in originating table + */ + [[nodiscard]] size_type table_column(size_type) const; + + /// What column number in its table did this result column come from? + [[nodiscard]] size_type table_column(zview col_name) const + { + return table_column(column_number(col_name)); + } + //@} + + [[nodiscard]] constexpr result::size_type num() const noexcept + { + return rownumber(); + } + + /** Produce a slice of this row, containing the given range of columns. + * + * @deprecated I haven't heard of anyone caring about row slicing at all in + * at least the last 15 years. Yet it adds complexity, so unless anyone + * files a bug explaining why they really need this feature, I'm going to + * remove it. Even if they do, the feature may need an update. + * + * The slice runs from the range's starting column to the range's end + * column, exclusive. It looks just like a normal result row, except + * slices can be empty. + */ + [[deprecated("Row slicing is going away. File a bug if you need it.")]] row + slice(size_type sbegin, size_type send) const; + + /// Is this a row without fields? Can only happen to a slice. + [[nodiscard, deprecated("Row slicing is going away.")]] PQXX_PURE bool + empty() const noexcept; + + /// Extract entire row's values into a tuple. + /** Converts to the types of the tuple's respective fields. + */ + template void to(Tuple &t) const + { + check_size(std::tuple_size_v); + convert(t); + } + + template std::tuple as() const + { + check_size(sizeof...(TYPE)); + using seq = std::make_index_sequence; + return get_tuple>(seq{}); + } + +protected: + friend class const_row_iterator; + friend class result; + row(result const &r, result_size_type index, size_type cols) noexcept; + + /// Throw @ref usage_error if row size is not `expected`. + void check_size(size_type expected) const + { + if (size() != expected) + throw usage_error{internal::concat( + "Tried to extract ", expected, " field(s) from a row of ", size(), + ".")}; + } + + /// Convert to a given tuple of values, don't check sizes. + /** We need this for cases where we have a full tuple of field types, but + * not a parameter pack. + */ + template TUPLE as_tuple() const + { + using seq = std::make_index_sequence>; + return get_tuple(seq{}); + } + + template friend class pqxx::internal::result_iter; + /// Convert entire row to tuple fields, without checking row size. + template void convert(Tuple &t) const + { + extract_fields(t, std::make_index_sequence>{}); + } + + friend class field; + + /// Result set of which this is one row. + result m_result; + + /// Row number. + /** + * You'd expect this to be unsigned, but due to the way reverse iterators + * are related to regular iterators, it must be allowed to underflow to -1. + */ + result::size_type m_index = 0; + + // TODO: Remove m_begin and (if possible) m_end when we remove slice(). + /// First column in slice. This row ignores lower-numbered columns. + size_type m_begin = 0; + /// End column in slice. This row only sees lower-numbered columns. + size_type m_end = 0; + +private: + template + void extract_fields(Tuple &t, std::index_sequence) const + { + (extract_value(t), ...); + } + + template + void extract_value(Tuple &t) const; + + /// Convert row's values as a new tuple. + template + auto get_tuple(std::index_sequence) const + { + return std::make_tuple(get_field()...); + } + + /// Extract and convert a field. + template auto get_field() const + { + return (*this)[index].as>(); + } +}; + + +/// Iterator for fields in a row. Use as row::const_iterator. +class PQXX_LIBEXPORT const_row_iterator : public field +{ +public: + using iterator_category = std::random_access_iterator_tag; + using value_type = field const; + using pointer = field const *; + using size_type = row_size_type; + using difference_type = row_difference_type; + using reference = field; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + const_row_iterator() = default; +#include "pqxx/internal/ignore-deprecated-post.hxx" + const_row_iterator(row const &t, row_size_type c) noexcept : + field{t.m_result, t.m_index, c} + {} + const_row_iterator(field const &F) noexcept : field{F} {} + const_row_iterator(const_row_iterator const &) noexcept = default; + const_row_iterator(const_row_iterator &&) noexcept = default; + + /** + * @name Dereferencing operators + */ + //@{ + [[nodiscard]] constexpr pointer operator->() const noexcept { return this; } + [[nodiscard]] reference operator*() const noexcept { return {*this}; } + //@} + + /** + * @name Manipulations + */ + //@{ + const_row_iterator &operator=(const_row_iterator const &) noexcept = default; + const_row_iterator &operator=(const_row_iterator &&) noexcept = default; + + // TODO: noexcept. Breaks ABI. + const_row_iterator operator++(int); + const_row_iterator &operator++() noexcept + { + ++m_col; + return *this; + } + // TODO: noexcept. Breaks ABI. + const_row_iterator operator--(int); + const_row_iterator &operator--() noexcept + { + --m_col; + return *this; + } + + const_row_iterator &operator+=(difference_type i) noexcept + { + m_col = size_type(difference_type(m_col) + i); + return *this; + } + const_row_iterator &operator-=(difference_type i) noexcept + { + m_col = size_type(difference_type(m_col) - i); + return *this; + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] constexpr bool + operator==(const_row_iterator const &i) const noexcept + { + return col() == i.col(); + } + [[nodiscard]] constexpr bool + operator!=(const_row_iterator const &i) const noexcept + { + return col() != i.col(); + } + [[nodiscard]] constexpr bool + operator<(const_row_iterator const &i) const noexcept + { + return col() < i.col(); + } + [[nodiscard]] constexpr bool + operator<=(const_row_iterator const &i) const noexcept + { + return col() <= i.col(); + } + [[nodiscard]] constexpr bool + operator>(const_row_iterator const &i) const noexcept + { + return col() > i.col(); + } + [[nodiscard]] constexpr bool + operator>=(const_row_iterator const &i) const noexcept + { + return col() >= i.col(); + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] inline const_row_iterator + operator+(difference_type) const noexcept; + + friend const_row_iterator + operator+(difference_type, const_row_iterator const &) noexcept; + + [[nodiscard]] inline const_row_iterator + operator-(difference_type) const noexcept; + [[nodiscard]] inline difference_type + operator-(const_row_iterator const &) const noexcept; + //@} +}; + + +/// Reverse iterator for a row. Use as row::const_reverse_iterator. +class PQXX_LIBEXPORT const_reverse_row_iterator : private const_row_iterator +{ +public: + using super = const_row_iterator; + using iterator_type = const_row_iterator; + using iterator_type::difference_type; + using iterator_type::iterator_category; + using iterator_type::pointer; + using value_type = iterator_type::value_type; + using reference = iterator_type::reference; + + const_reverse_row_iterator() noexcept = default; + const_reverse_row_iterator(const_reverse_row_iterator const &) noexcept = + default; + const_reverse_row_iterator(const_reverse_row_iterator &&) noexcept = default; + + explicit const_reverse_row_iterator(super const &rhs) noexcept : + const_row_iterator{rhs} + { + super::operator--(); + } + + [[nodiscard]] PQXX_PURE iterator_type base() const noexcept; + + /** + * @name Dereferencing operators + */ + //@{ + using iterator_type::operator->; + using iterator_type::operator*; + //@} + + /** + * @name Manipulations + */ + //@{ + const_reverse_row_iterator & + operator=(const_reverse_row_iterator const &r) noexcept + { + iterator_type::operator=(r); + return *this; + } + const_reverse_row_iterator operator++() noexcept + { + iterator_type::operator--(); + return *this; + } + // TODO: noexcept. Breaks ABI. + const_reverse_row_iterator operator++(int); + const_reverse_row_iterator &operator--() noexcept + { + iterator_type::operator++(); + return *this; + } + const_reverse_row_iterator operator--(int); + // TODO: noexcept. Breaks ABI. + const_reverse_row_iterator &operator+=(difference_type i) noexcept + { + iterator_type::operator-=(i); + return *this; + } + const_reverse_row_iterator &operator-=(difference_type i) noexcept + { + iterator_type::operator+=(i); + return *this; + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] const_reverse_row_iterator + operator+(difference_type i) const noexcept + { + return const_reverse_row_iterator{base() - i}; + } + [[nodiscard]] const_reverse_row_iterator + operator-(difference_type i) noexcept + { + return const_reverse_row_iterator{base() + i}; + } + [[nodiscard]] difference_type + operator-(const_reverse_row_iterator const &rhs) const noexcept + { + return rhs.const_row_iterator::operator-(*this); + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] bool + operator==(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator==(rhs); + } + [[nodiscard]] bool + operator!=(const_reverse_row_iterator const &rhs) const noexcept + { + return !operator==(rhs); + } + + [[nodiscard]] constexpr bool + operator<(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator>(rhs); + } + [[nodiscard]] constexpr bool + operator<=(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator>=(rhs); + } + [[nodiscard]] constexpr bool + operator>(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator<(rhs); + } + [[nodiscard]] constexpr bool + operator>=(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator<=(rhs); + } + //@} +}; + + +const_row_iterator +const_row_iterator::operator+(difference_type o) const noexcept +{ + // TODO:: More direct route to home().columns()? + return { + row{home(), idx(), home().columns()}, + size_type(difference_type(col()) + o)}; +} + +inline const_row_iterator operator+( + const_row_iterator::difference_type o, const_row_iterator const &i) noexcept +{ + return i + o; +} + +inline const_row_iterator +const_row_iterator::operator-(difference_type o) const noexcept +{ + // TODO:: More direct route to home().columns()? + return { + row{home(), idx(), home().columns()}, + size_type(difference_type(col()) - o)}; +} + +inline const_row_iterator::difference_type +const_row_iterator::operator-(const_row_iterator const &i) const noexcept +{ + return difference_type(num() - i.num()); +} + + +template +inline void row::extract_value(Tuple &t) const +{ + using field_type = strip_t(t))>; + field const f{m_result, m_index, index}; + std::get(t) = from_string(f); +} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/separated_list b/ext/libpqxx-7.7.3/include/pqxx/separated_list new file mode 100644 index 000000000..1bdf51c6a --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/separated_list @@ -0,0 +1,6 @@ +/** Helper similar to Python's @c str.join(). + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/separated_list.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/separated_list.hxx b/ext/libpqxx-7.7.3/include/pqxx/separated_list.hxx new file mode 100644 index 000000000..d4230ea08 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/separated_list.hxx @@ -0,0 +1,142 @@ +/* Helper similar to Python's `str.join()`. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/separated_list instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_SEPARATED_LIST +#define PQXX_H_SEPARATED_LIST + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/strconv.hxx" + +// C++20: Simplify using std::ranges::range. +// C++20: Optimise buffer allocation using random_access_range/iterator. +namespace pqxx +{ +/** + * @defgroup utility Utility functions + */ +//@{ + +/// Represent sequence of values as a string, joined by a given separator. +/** + * Use this to turn e.g. the numbers 1, 2, and 3 into a string "1, 2, 3". + * + * @param sep separator string (to be placed between items) + * @param begin beginning of items sequence + * @param end end of items sequence + * @param access functor defining how to dereference sequence elements + */ +template +[[nodiscard]] inline std::string +separated_list(std::string_view sep, ITER begin, ITER end, ACCESS access) +{ + if (end == begin) + return {}; + auto next{begin}; + ++next; + if (next == end) + return to_string(access(begin)); + + // From here on, we've got at least 2 elements -- meaning that we need sep. + using elt_type = strip_t; + using traits = string_traits; + + std::size_t budget{0}; + for (ITER cnt{begin}; cnt != end; ++cnt) + budget += traits::size_buffer(access(cnt)); + budget += + static_cast(std::distance(begin, end)) * std::size(sep); + + std::string result; + result.resize(budget); + + char *const data{result.data()}; + char *here{data}; + char *stop{data + budget}; + here = traits::into_buf(here, stop, access(begin)) - 1; + for (++begin; begin != end; ++begin) + { + here += sep.copy(here, std::size(sep)); + here = traits::into_buf(here, stop, access(begin)) - 1; + } + result.resize(static_cast(here - data)); + return result; +} + + +/// Render sequence as a string, using given separator between items. +template +[[nodiscard]] inline std::string +separated_list(std::string_view sep, ITER begin, ITER end) +{ + return separated_list(sep, begin, end, [](ITER i) { return *i; }); +} + + +/// Render items in a container as a string, using given separator. +template +[[nodiscard]] inline auto +separated_list(std::string_view sep, CONTAINER const &c) + /* + Always std::string; necessary because SFINAE doesn't work with the + contents of function bodies, so the check for iterability has to be in + the signature. + */ + -> typename std::enable_if< + (not std::is_void::value and + not std::is_void::value), + std::string>::type +{ + return separated_list(sep, std::begin(c), std::end(c)); +} + + +/// Render items in a tuple as a string, using given separator. +template< + typename TUPLE, std::size_t INDEX = 0, typename ACCESS, + typename std::enable_if< + (INDEX == std::tuple_size::value - 1), int>::type = 0> +[[nodiscard]] inline std::string separated_list( + std::string_view /* sep */, TUPLE const &t, ACCESS const &access) +{ + return to_string(access(&std::get(t))); +} + +template< + typename TUPLE, std::size_t INDEX = 0, typename ACCESS, + typename std::enable_if< + (INDEX < std::tuple_size::value - 1), int>::type = 0> +[[nodiscard]] inline std::string +separated_list(std::string_view sep, TUPLE const &t, ACCESS const &access) +{ + std::string out{to_string(access(&std::get(t)))}; + out.append(sep); + out.append(separated_list(sep, t, access)); + return out; +} + +template< + typename TUPLE, std::size_t INDEX = 0, + typename std::enable_if< + (INDEX <= std::tuple_size::value), int>::type = 0> +[[nodiscard]] inline std::string +separated_list(std::string_view sep, TUPLE const &t) +{ + // TODO: Optimise allocation. + return separated_list(sep, t, [](TUPLE const &tup) { return *tup; }); +} +//@} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/strconv b/ext/libpqxx-7.7.3/include/pqxx/strconv new file mode 100644 index 000000000..aa2c40ed5 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/strconv @@ -0,0 +1,6 @@ +/** String conversion definitions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/strconv.hxx b/ext/libpqxx-7.7.3/include/pqxx/strconv.hxx new file mode 100644 index 000000000..863711228 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/strconv.hxx @@ -0,0 +1,468 @@ +/* String conversion definitions. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/stringconv instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STRCONV +#define PQXX_H_STRCONV + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include +#include +#include + +#if __has_include() +# include +#endif + +#if defined(PQXX_HAVE_RANGES) && __has_include() +# include +#endif + +#include "pqxx/except.hxx" +#include "pqxx/util.hxx" +#include "pqxx/zview.hxx" + + +namespace pqxx::internal +{ +/// Attempt to demangle @c std::type_info::name() to something human-readable. +PQXX_LIBEXPORT std::string demangle_type_name(char const[]); +} // namespace pqxx::internal + + +namespace pqxx +{ +/** + * @defgroup stringconversion String conversion + * + * The PostgreSQL server accepts and represents data in string form. It has + * its own formats for various data types. The string conversions define how + * various C++ types translate to and from their respective PostgreSQL text + * representations. + * + * Each conversion is defined by a specialisations of @c string_traits. It + * gets complicated if you want top performance, but until you do, all you + * really need to care about when converting values between C++ in-memory + * representations such as @c int and the postgres string representations is + * the @c pqxx::to_string and @c pqxx::from_string functions. + * + * If you need to convert a type which is not supported out of the box, you'll + * need to define your own specialisations for these templates, similar to the + * ones defined here and in `pqxx/conversions.hxx`. Any conversion code which + * "sees" your specialisation will now support your conversion. In particular, + * you'll be able to read result fields into a variable of the new type. + * + * There is a macro to help you define conversions for individual enumeration + * types. The conversion will represent enumeration values as numeric strings. + */ +//@{ + +/// A human-readable name for a type, used in error messages and such. +/** Actually this may not always be very user-friendly. It uses + * @c std::type_info::name(). On gcc-like compilers we try to demangle its + * output. Visual Studio produces human-friendly names out of the box. + * + * This variable is not inline. Inlining it gives rise to "memory leak" + * warnings from asan, the address sanitizer, possibly from use of + * @c std::type_info::name. + */ +template +std::string const type_name{internal::demangle_type_name(typeid(TYPE).name())}; + + +/// Traits describing a type's "null value," if any. +/** Some C++ types have a special value or state which correspond directly to + * SQL's NULL. + * + * The @c nullness traits describe whether it exists, and whether a particular + * value is null. + */ +template struct nullness +{ + /// Does this type have a null value? + static bool has_null; + + /// Is this type always null? + static bool always_null; + + /// Is @c value a null? + static bool is_null(TYPE const &value); + + /// Return a null value. + /** Don't use this in generic code to compare a value and see whether it is + * null. Some types may have multiple null values which do not compare as + * equal, or may define a null value which is not equal to anything including + * itself, like in SQL. + */ + [[nodiscard]] static TYPE null(); +}; + + +/// Nullness traits describing a type which does not have a null value. +template struct no_null +{ + /// Does @c TYPE have a "built-in null value"? + /** For example, a pointer can equal @c nullptr, which makes a very natural + * representation of an SQL null value. For such types, the code sometimes + * needs to make special allowances. + * + * for most types, such as @c int or @c std::string, there is no built-in + * null. If you want to represent an SQL null value for such a type, you + * would have to wrap it in something that does have a null value. For + * example, you could use @c std::optional for "either an @c int or a + * null value." + */ + static constexpr bool has_null = false; + + /// Are all values of this type null? + /** There are a few special C++ types which are always null - mainly + * @c std::nullptr_t. + */ + static constexpr bool always_null = false; + + /// Does a given value correspond to an SQL null value? + /** Most C++ types, such as @c int or @c std::string, have no inherent null + * value. But some types such as C-style string pointers do have a natural + * equivalent to an SQL null. + */ + [[nodiscard]] static constexpr bool is_null(TYPE const &) noexcept + { + return false; + } +}; + + +/// Traits class for use in string conversions. +/** Specialize this template for a type for which you wish to add to_string + * and from_string support. + * + * String conversions are not meant to work for nulls. Check for null before + * converting a value of @c TYPE to a string, or vice versa. + */ +template struct string_traits +{ + /// Return a @c string_view representing value, plus terminating zero. + /** Produces a @c string_view containing the PostgreSQL string representation + * for @c value. + * + * Uses the space from @c begin to @c end as a buffer, if needed. The + * returned string may lie somewhere in that buffer, or it may be a + * compile-time constant, or it may be null if value was a null value. Even + * if the string is stored in the buffer, its @c begin() may or may not be + * the same as @c begin. + * + * The @c string_view is guaranteed to be valid as long as the buffer from + * @c begin to @c end remains accessible and unmodified. + * + * @throws pqxx::conversion_overrun if the provided buffer space may not be + * enough. For maximum performance, this is a conservative estimate. It may + * complain about a buffer which is actually large enough for your value, if + * an exact check gets too expensive. + */ + [[nodiscard]] static inline zview + to_buf(char *begin, char *end, TYPE const &value); + + /// Write value's string representation into buffer at @c begin. + /** Assumes that value is non-null. + * + * Writes value's string representation into the buffer, starting exactly at + * @c begin, and ensuring a trailing zero. Returns the address just beyond + * the trailing zero, so the caller could use it as the @c begin for another + * call to @c into_buf writing a next value. + */ + static inline char *into_buf(char *begin, char *end, TYPE const &value); + + /// Parse a string representation of a @c TYPE value. + /** Throws @c conversion_error if @c value does not meet the expected format + * for a value of this type. + */ + [[nodiscard]] static inline TYPE from_string(std::string_view text); + + // C++20: Can we make these all constexpr? + /// Estimate how much buffer space is needed to represent value. + /** The estimate may be a little pessimistic, if it saves time. + * + * The estimate includes the terminating zero. + */ + [[nodiscard]] static inline std::size_t + size_buffer(TYPE const &value) noexcept; +}; + + +/// Nullness: Enums do not have an inherent null value. +template +struct nullness>> : no_null +{}; +} // namespace pqxx + + +namespace pqxx::internal +{ +/// Helper class for defining enum conversions. +/** The conversion will convert enum values to numeric strings, and vice versa. + * + * To define a string conversion for an enum type, derive a @c string_traits + * specialisation for the enum from this struct. + * + * There's usually an easier way though: the @c PQXX_DECLARE_ENUM_CONVERSION + * macro. Use @c enum_traits manually only if you need to customise your + * traits type in more detail. + */ +template struct enum_traits +{ + using impl_type = std::underlying_type_t; + using impl_traits = string_traits; + + [[nodiscard]] static constexpr zview + to_buf(char *begin, char *end, ENUM const &value) + { + return impl_traits::to_buf(begin, end, to_underlying(value)); + } + + static constexpr char *into_buf(char *begin, char *end, ENUM const &value) + { + return impl_traits::into_buf(begin, end, to_underlying(value)); + } + + [[nodiscard]] static ENUM from_string(std::string_view text) + { + return static_cast(impl_traits::from_string(text)); + } + + [[nodiscard]] static std::size_t size_buffer(ENUM const &value) noexcept + { + return impl_traits::size_buffer(to_underlying(value)); + } + +private: + // C++23: Replace with std::to_underlying. + static constexpr impl_type to_underlying(ENUM const &value) noexcept + { + return static_cast(value); + } +}; +} // namespace pqxx::internal + + +/// Macro: Define a string conversion for an enum type. +/** This specialises the @c pqxx::string_traits template, so use it in the + * @c ::pqxx namespace. + * + * For example: + * + * #include + * #include + * enum X { xa, xb }; + * namespace pqxx { PQXX_DECLARE_ENUM_CONVERSION(x); } + * int main() { std::cout << pqxx::to_string(xa) << std::endl; } + */ +#define PQXX_DECLARE_ENUM_CONVERSION(ENUM) \ + template<> struct string_traits : pqxx::internal::enum_traits \ + {}; \ + template<> inline std::string const type_name { #ENUM } + + +namespace pqxx +{ +/// Parse a value in postgres' text format as a TYPE. +/** If the form of the value found in the string does not match the expected + * type, e.g. if a decimal point is found when converting to an integer type, + * the conversion fails. Overflows (e.g. converting "9999999999" to a 16-bit + * C++ type) are also treated as errors. If in some cases this behaviour + * should be inappropriate, convert to something bigger such as @c long @c int + * first and then truncate the resulting value. + * + * Only the simplest possible conversions are supported. Fancy features like + * hexadecimal or octal, spurious signs, or exponent notation won't work. + * Whitespace is not stripped away. Only the kinds of strings that come out of + * PostgreSQL and out of to_string() can be converted. + */ +template +[[nodiscard]] inline TYPE from_string(std::string_view text) +{ + return string_traits::from_string(text); +} + + +/// "Convert" a std::string_view to a std::string_view. +/** Just returns its input. + * + * @warning Of course the result is only valid for as long as the original + * string remains valid! Never access the string referenced by the return + * value after the original has been destroyed. + */ +template<> +[[nodiscard]] inline std::string_view from_string(std::string_view text) +{ + return text; +} + + +/// Attempt to convert postgres-generated string to given built-in object. +/** This is like the single-argument form of the function, except instead of + * returning the value, it sets @c value. + * + * You may find this more convenient in that it infers the type you want from + * the argument you pass. But there are disadvantages: it requires an + * assignment operator, and it may be less efficient. + */ +template inline void from_string(std::string_view text, T &value) +{ + value = from_string(text); +} + + +/// Convert a value to a readable string that PostgreSQL will understand. +/** The conversion does no special formatting, and ignores any locale settings. + * The resulting string will be human-readable and in a format suitable for use + * in SQL queries. It won't have niceties such as "thousands separators" + * though. + */ +template inline std::string to_string(TYPE const &value); + + +/// Convert multiple values to strings inside a single buffer. +/** There must be enough room for all values, or this will throw + * @c conversion_overrun. You can obtain a conservative estimate of the buffer + * space required by calling @c size_buffer() on the values. + * + * The @c std::string_view results may point into the buffer, so don't assume + * that they will remain valid after you destruct or move the buffer. + */ +template +[[nodiscard]] inline std::vector +to_buf(char *here, char const *end, TYPE... value) +{ + return {[&here, end](auto v) { + auto begin = here; + here = string_traits::into_buf(begin, end, v); + // Exclude the trailing zero out of the string_view. + auto len{static_cast(here - begin) - 1}; + return std::string_view{begin, len}; + }(value)...}; +} + +/// Convert a value to a readable string that PostgreSQL will understand. +/** This variant of to_string can sometimes save a bit of time in loops, by + * re-using a std::string for multiple conversions. + */ +template +inline void into_string(TYPE const &value, std::string &out); + + +/// Is @c value null? +template +[[nodiscard]] inline constexpr bool is_null(TYPE const &value) noexcept +{ + return nullness>::is_null(value); +} + + +/// Estimate how much buffer space is needed to represent values as a string. +/** The estimate may be a little pessimistic, if it saves time. It also + * includes room for a terminating zero after each value. + */ +template +[[nodiscard]] inline std::size_t size_buffer(TYPE const &...value) noexcept +{ + return (string_traits>::size_buffer(value) + ...); +} + + +/// Does this type translate to an SQL array? +/** Specialisations may override this to be true for container types. + * + * This may not always be a black-and-white choice. For instance, a + * @c std::string is a container, but normally it translates to an SQL string, + * not an SQL array. + */ +template inline constexpr bool is_sql_array{false}; + + +/// Can we use this type in arrays and composite types without quoting them? +/** Define this as @c true only if values of @c TYPE can never contain any + * special characters that might need escaping or confuse the parsing of array + * or composite * types, such as commas, quotes, parentheses, braces, newlines, + * and so on. + * + * When converting a value of such a type to a string in an array or a field in + * a composite type, we do not need to add quotes, nor escape any special + * characters. + * + * This is just an optimisation, so it defaults to @c false to err on the side + * of slow correctness. + */ +template inline constexpr bool is_unquoted_safe{false}; + + +/// Element separator between SQL array elements of this type. +template inline constexpr char array_separator{','}; + + +/// What's the preferred format for passing non-null parameters of this type? +/** This affects how we pass parameters of @c TYPE when calling parameterised + * statements or prepared statements. + * + * Generally we pass parameters in text format, but binary strings are the + * exception. We also pass nulls in binary format, so this function need not + * handle null values. + */ +template inline constexpr format param_format(TYPE const &) +{ + return format::text; +} + + +/// Implement @c string_traits::to_buf by calling @c into_buf. +/** When you specialise @c string_traits for a new type, most of the time its + * @c to_buf implementation has no special optimisation tricks and just writes + * its text into the buffer it receives from the caller, starting at the + * beginning. + * + * In that common situation, you can implement @c to_buf as just a call to + * @c generic_to_buf. It will call @c into_buf and return the right result for + * @c to_buf. + */ +template +inline zview generic_to_buf(char *begin, char *end, TYPE const &value) +{ + using traits = string_traits; + // The trailing zero does not count towards the zview's size, so subtract 1 + // from the result we get from into_buf(). + if (is_null(value)) + return {}; + else + return {begin, traits::into_buf(begin, end, value) - begin - 1}; +} + + +#if defined(PQXX_HAVE_CONCEPTS) +/// Concept: Binary string, akin to @c std::string for binary data. +/** Any type that satisfies this concept can represent an SQL BYTEA value. + * + * A @c binary has a @c begin(), @c end(), @c size(), and @data(). Each byte + * is a @c std::byte, and they must all be laid out contiguously in memory so + * we can reference them by a pointer. + */ +template +concept binary = std::ranges::contiguous_range and + std::is_same_v>, std::byte>; +#endif +//@} +} // namespace pqxx + + +#include "pqxx/internal/conversions.hxx" +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/stream_from b/ext/libpqxx-7.7.3/include/pqxx/stream_from new file mode 100644 index 000000000..972762443 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/stream_from @@ -0,0 +1,8 @@ +/** pqxx::stream_from class. + * + * pqxx::stream_from enables optimized batch reads from a database table. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/stream_from.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/stream_from.hxx b/ext/libpqxx-7.7.3/include/pqxx/stream_from.hxx new file mode 100644 index 000000000..ff4a93d2e --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/stream_from.hxx @@ -0,0 +1,361 @@ +/* Definition of the pqxx::stream_from class. + * + * pqxx::stream_from enables optimized batch reads from a database table. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/stream_from instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STREAM_FROM +#define PQXX_H_STREAM_FROM + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/connection.hxx" +#include "pqxx/except.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/encoding_group.hxx" +#include "pqxx/internal/stream_iterator.hxx" +#include "pqxx/separated_list.hxx" +#include "pqxx/transaction_focus.hxx" + + +namespace pqxx +{ +class transaction_base; + + +/// Pass this to a `stream_from` constructor to stream table contents. +/** @deprecated Use @ref stream_from::table() instead. + */ +constexpr from_table_t from_table; +/// Pass this to a `stream_from` constructor to stream query results. +/** @deprecated Use stream_from::query() instead. + */ +constexpr from_query_t from_query; + + +/// Stream data from the database. +/** For larger data sets, retrieving data this way is likely to be faster than + * executing a query and then iterating and converting the rows fields. You + * will also be able to start processing before all of the data has come in. + * + * There are also downsides. Not all kinds of query will work in a stream. + * But straightforward `SELECT` and `UPDATE ... RETURNING` queries should work. + * This function makes use of @ref pqxx::stream_from, which in turn uses + * PostgreSQL's `COPY` command, so see the documentation for those to get the + * full details. + * + * There are other downsides. If there stream encounters an error, it may + * leave the entire connection in an unusable state, so you'll have to give the + * whole thing up. Finally, opening a stream puts the connection in a special + * state, so you won't be able to do many other things with the connection or + * the transaction while the stream is open. + * + * There are two ways of starting a stream: you stream either all rows in a + * table (using one of the factories, `table()` or `raw_table()`), or the + * results of a query (using the `query()` factory). + * + * Usually you'll want the `stream` convenience wrapper in + * @ref transaction_base, * so you don't need to deal with this class directly. + * + * @warning While a stream is active, you cannot execute queries, open a + * pipeline, etc. on the same transaction. A transaction can have at most one + * object of a type derived from @ref pqxx::transaction_focus active on it at a + * time. + */ +class PQXX_LIBEXPORT stream_from : transaction_focus +{ +public: + using raw_line = + std::pair>, std::size_t>; + + /// Factory: Execute query, and stream the results. + /** The query can be a SELECT query or a VALUES query; or it can be an + * UPDATE, INSERT, or DELETE with a RETURNING clause. + * + * The query is executed as part of a COPY statement, so there are additional + * restrictions on what kind of query you can use here. See the PostgreSQL + * documentation for the COPY command: + * + * https://www.postgresql.org/docs/current/sql-copy.html + */ + static stream_from query(transaction_base &tx, std::string_view q) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return {tx, from_query, q}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /** + * @name Streaming data from tables + * + * You can use `stream_from` to read a table's contents. This is a quick + * and easy way to read a table, but it comes with limitations. It cannot + * stream from a view, only from a table. It does not support conditions. + * And there are no guarantees about ordering. If you need any of those + * things, consider streaming from a query instead. + */ + //@{ + + /// Factory: Stream data from a pre-quoted table and columns. + /** Use this factory if you need to create multiple streams using the same + * table path and/or columns list, and you want to save a bit of work on + * composing the internal SQL statement for starting the stream. It lets you + * compose the string representations for the table path and the columns + * list, so you can compute these once and then re-use them later. + * + * @param tx The transaction within which the stream will operate. + * @param path Name or path for the table upon which the stream will + * operate. If any part of the table path may contain special + * characters or be case-sensitive, quote the path using + * pqxx::connection::quote_table(). + * @param columns Columns which the stream will read. They should be + * comma-separated and, if needed, quoted. You can produce the string + * using pqxx::connection::quote_columns(). If you omit this argument, + * the stream will read all columns in the table, in schema order. + */ + static stream_from raw_table( + transaction_base &tx, std::string_view path, + std::string_view columns = ""sv); + + /// Factory: Stream data from a given table. + /** This is the convenient way to stream from a table. + */ + static stream_from table( + transaction_base &tx, table_path path, + std::initializer_list columns = {}); + //@} + + /// Execute query, and stream over the results. + /** @deprecated Use factory function @ref query instead. + */ + [[deprecated("Use query() factory instead.")]] stream_from( + transaction_base &, from_query_t, std::string_view query); + + /// Stream all rows in table, all columns. + /** @deprecated Use factories @ref table or @ref raw_table instead. + */ + [[deprecated("Use table() or raw_table() factory instead.")]] stream_from( + transaction_base &, from_table_t, std::string_view table); + + /// Stream given columns from all rows in table. + /** @deprecated Use factories @ref table or @ref raw_table instead. + */ + template + [[deprecated("Use table() or raw_table() factory instead.")]] stream_from( + transaction_base &, from_table_t, std::string_view table, + Iter columns_begin, Iter columns_end); + + /// Stream given columns from all rows in table. + /** @deprecated Use factory function @ref query instead. + */ + template + [[deprecated("Use table() or raw_table() factory instead.")]] stream_from( + transaction_base &tx, from_table_t, std::string_view table, + Columns const &columns); + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// @deprecated Use factories @ref table or @ref raw_table instead. + [[deprecated("Use the from_table_t overload instead.")]] stream_from( + transaction_base &tx, std::string_view table) : + stream_from{tx, from_table, table} + {} +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /// @deprecated Use factories @ref table or @ref raw_table instead. + template + [[deprecated("Use the from_table_t overload instead.")]] stream_from( + transaction_base &tx, std::string_view table, Columns const &columns) : + stream_from{tx, from_table, table, columns} + {} + + /// @deprecated Use factories @ref table or @ref raw_table instead. + template + [[deprecated("Use the from_table_t overload instead.")]] stream_from( + transaction_base &, std::string_view table, Iter columns_begin, + Iter columns_end); + + ~stream_from() noexcept; + + /// May this stream still produce more data? + [[nodiscard]] constexpr operator bool() const noexcept + { + return not m_finished; + } + /// Has this stream produced all the data it is going to produce? + [[nodiscard]] constexpr bool operator!() const noexcept + { + return m_finished; + } + + /// Finish this stream. Call this before continuing to use the connection. + /** Consumes all remaining lines, and closes the stream. + * + * This may take a while if you're abandoning the stream before it's done, so + * skip it in error scenarios where you're not planning to use the connection + * again afterwards. + */ + void complete(); + + /// Read one row into a tuple. + /** Converts the row's fields into the fields making up the tuple. + * + * For a column which can contain nulls, be sure to give the corresponding + * tuple field a type which can be null. For example, to read a field as + * `int` when it may contain nulls, read it as `std::optional`. + * Using `std::shared_ptr` or `std::unique_ptr` will also work. + */ + template stream_from &operator>>(Tuple &); + + /// Doing this with a `std::variant` is going to be horrifically borked. + template + stream_from &operator>>(std::variant &) = delete; + + /// Iterate over this stream. Supports range-based "for" loops. + /** Produces an input iterator over the stream. + * + * Do not call this yourself. Use it like "for (auto data : stream.iter())". + */ + template [[nodiscard]] auto iter() & + { + return pqxx::internal::stream_input_iteration{*this}; + } + + /// Read a row. Return fields as views, valid until you read the next row. + /** Returns `nullptr` when there are no more rows to read. Do not attempt + * to read any further rows after that. + * + * Do not access the vector, or the storage referenced by the views, after + * closing or completing the stream, or after attempting to read a next row. + * + * A @ref pqxx::zview is like a `std::string_view`, but with the added + * guarantee that if its data pointer is non-null, the string is followed by + * a terminating zero (which falls just outside the view itself). + * + * If any of the views' data pointer is null, that means that the + * corresponding SQL field is null. + * + * @warning The return type may change in the future, to support C++20 + * coroutine-based usage. + */ + std::vector const *read_row() &; + + /// Read a raw line of text from the COPY command. + /** @warning Do not use this unless you really know what you're doing. */ + raw_line get_raw_line(); + +private: + // TODO: Clean up this signature once we cull the deprecated constructors. + /// @deprecated + stream_from( + transaction_base &tx, std::string_view table, std::string_view columns, + from_table_t); + + // TODO: Clean up this signature once we cull the deprecated constructors. + /// @deprecated + stream_from( + transaction_base &, std::string_view unquoted_table, + std::string_view columns, from_table_t, int); + + template + void extract_fields(Tuple &t, std::index_sequence) const + { + (extract_value(t), ...); + } + + pqxx::internal::glyph_scanner_func *m_glyph_scanner; + + /// Current row's fields' text, combined into one reusable string. + std::string m_row; + + /// The current row's fields. + std::vector m_fields; + + bool m_finished = false; + + void close(); + + template + void extract_value(Tuple &) const; + + /// Read a line of COPY data, write `m_row` and `m_fields`. + void parse_line(); +}; + + +template +inline stream_from::stream_from( + transaction_base &tx, from_table_t, std::string_view table_name, + Columns const &columns) : + stream_from{ + tx, from_table, table_name, std::begin(columns), std::end(columns)} +{} + + +template +inline stream_from::stream_from( + transaction_base &tx, from_table_t, std::string_view table, + Iter columns_begin, Iter columns_end) : + stream_from{ + tx, table, separated_list(",", columns_begin, columns_end), + from_table, 1} +{} + + +template inline stream_from &stream_from::operator>>(Tuple &t) +{ + if (m_finished) + return *this; + static constexpr auto tup_size{std::tuple_size_v}; + m_fields.reserve(tup_size); + parse_line(); + if (m_finished) + return *this; + + if (std::size(m_fields) != tup_size) + throw usage_error{internal::concat( + "Tried to extract ", tup_size, " field(s) from a stream of ", + std::size(m_fields), ".")}; + + extract_fields(t, std::make_index_sequence{}); + return *this; +} + + +template +inline void stream_from::extract_value(Tuple &t) const +{ + using field_type = strip_t(t))>; + using nullity = nullness; + assert(index < std::size(m_fields)); + if constexpr (nullity::always_null) + { + if (std::data(m_fields[index]) != nullptr) + throw conversion_error{"Streaming non-null value into null field."}; + } + else if (std::data(m_fields[index]) == nullptr) + { + if constexpr (nullity::has_null) + std::get(t) = nullity::null(); + else + internal::throw_null_conversion(type_name); + } + else + { + // Don't ever try to convert a non-null value to nullptr_t! + std::get(t) = from_string(m_fields[index]); + } +} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/stream_to b/ext/libpqxx-7.7.3/include/pqxx/stream_to new file mode 100644 index 000000000..8760cf1f4 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/stream_to @@ -0,0 +1,8 @@ +/** pqxx::stream_to class. + * + * pqxx::stream_to enables optimized batch updates to a database table. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/stream_to.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/stream_to.hxx b/ext/libpqxx-7.7.3/include/pqxx/stream_to.hxx new file mode 100644 index 000000000..2a49d8f85 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/stream_to.hxx @@ -0,0 +1,455 @@ +/* Definition of the pqxx::stream_to class. + * + * pqxx::stream_to enables optimized batch updates to a database table. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/stream_to.hxx instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STREAM_TO +#define PQXX_H_STREAM_TO + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/separated_list.hxx" +#include "pqxx/transaction_base.hxx" + + +namespace pqxx +{ +/// Efficiently write data directly to a database table. +/** If you wish to insert rows of data into a table, you can compose INSERT + * statements and execute them. But it's slow and tedious, and you need to + * worry about quoting and escaping the data. + * + * If you're just inserting a single row, it probably won't matter much. You + * can use prepared or parameterised statements to take care of the escaping + * for you. But if you're inserting large numbers of rows you will want + * something better. + * + * Inserting rows one by one using INSERT statements involves a lot of + * pointless overhead, especially when you are working with a remote database + * server over the network. You may end up sending each row over the network + * as a separate query, and waiting for a reply. Do it "in bulk" using + * `stream_to`, and you may find that it goes many times faster. Sometimes + * you gain orders of magnitude in speed. + * + * Here's how it works: you create a `stream_to` stream to start writing to + * your table. You will probably want to specify the columns. Then, you + * feed your data into the stream one row at a time. And finally, you call the + * stream's @ref complete function to tell it to finalise the operation, wait + * for completion, and check for errors. + * + * (You _must_ complete the stream before committing or aborting the + * transaction. The connection is in a special state while the stream is + * active, where it can't process commands, and can't commit or abort a + * transaction.) + * + * So how do you feed a row of data into the stream? There's several ways, but + * the preferred one is to call its @ref write_values. Pass the field values + * as arguments. Doesn't matter what type they are, as long as libpqxx knows + * how to convert them to PostgreSQL's text format: `int`, `std::string` or + * `std:string_view`, `float` and `double`, `bool`... lots of basic types + * are supported. If some of the values are null, feel free to use + * `std::optional`, `std::shared_ptr`, or `std::unique_ptr`. + * + * The arguments' types don't even have to match the fields' SQL types. If you + * want to insert an `int` into a `DECIMAL` column, that's your choice -- it + * will produce a `DECIMAL` value which happens to be integral. Insert a + * `float` into a `VARCHAR` column? That's fine, you'll get a string whose + * contents happen to read like a number. And so on. You can even insert + * different types of value in the same column on different rows. If you have + * a code path where a particular field is always null, just insert `nullptr`. + * + * There is another way to insert rows: the `<<` ("shift-left") operator. + * It's not as fast and it doesn't support variable arguments: each row must be + * either a `std::tuple` or something iterable, such as a `std::vector`, or + * anything else with a `begin()` and `end()`. + * + * @warning While a stream is active, you cannot execute queries, open a + * pipeline, etc. on the same transaction. A transaction can have at most one + * object of a type derived from @ref pqxx::transaction_focus active on it at a + * time. + */ +class PQXX_LIBEXPORT stream_to : transaction_focus +{ +public: + /// Stream data to a pre-quoted table and columns. + /** This factory can be useful when it's not convenient to provide the + * columns list in the form of a `std::initializer_list`, or when the list + * of columns is simply not known at compile time. + * + * Also use this if you need to create multiple streams using the same table + * path and/or columns list, and you want to save a bit of work on composing + * the internal SQL statement for starting the stream. It lets you compose + * the string representations for the table path and the columns list, so you + * can compute these once and then re-use them later. + * + * @param tx The transaction within which the stream will operate. + * @param path Name or path for the table upon which the stream will + * operate. If any part of the table path may contain special + * characters or be case-sensitive, quote the path using + * pqxx::connection::quote_table(). + * @param columns Columns to which the stream will write. They should be + * comma-separated and, if needed, quoted. You can produce the string + * using pqxx::connection::quote_columns(). If you omit this argument, + * the stream will write all columns in the table, in schema order. + */ + static stream_to raw_table( + transaction_base &tx, std::string_view path, std::string_view columns = "") + { + return {tx, path, columns}; + } + + /// Create a `stream_to` writing to a named table and columns. + /** Use this to stream data to a table, where the list of columns is known at + * compile time. + * + * @param tx The transaction within which the stream will operate. + * @param path A @ref table_path designating the target table. + * @param columns Optionally, the columns to which the stream should write. + * If you do not pass this, the stream will write to all columns in the + * table, in schema order. + */ + static stream_to table( + transaction_base &tx, table_path path, + std::initializer_list columns = {}) + { + auto const &conn{tx.conn()}; + return raw_table(tx, conn.quote_table(path), conn.quote_columns(columns)); + } + +#if defined(PQXX_HAVE_CONCEPTS) + /// Create a `stream_to` writing to a named table and columns. + /** Use this version to stream data to a table, when the list of columns is + * not known at compile time. + * + * @param tx The transaction within which the stream will operate. + * @param path A @ref table_path designating the target table. + * @param columns The columns to which the stream should write. + */ + template + static stream_to + table(transaction_base &tx, table_path path, COLUMNS const &columns) + { + auto const &conn{tx.conn()}; + return stream_to::raw_table( + tx, conn.quote_table(path), tx.conn().quote_columns(columns)); + } + + /// Create a `stream_to` writing to a named table and columns. + /** Use this version to stream data to a table, when the list of columns is + * not known at compile time. + * + * @param tx The transaction within which the stream will operate. + * @param path A @ref table_path designating the target table. + * @param columns The columns to which the stream should write. + */ + template + static stream_to + table(transaction_base &tx, std::string_view path, COLUMNS const &columns) + { + return stream_to::raw_table(tx, path, tx.conn().quote_columns(columns)); + } +#endif // PQXX_HAVE_CONCEPTS + + /// Create a stream, without specifying columns. + /** @deprecated Use @ref table or @ref raw_table as a factory. + * + * Fields will be inserted in whatever order the columns have in the + * database. + * + * You'll probably want to specify the columns, so that the mapping between + * your data fields and the table is explicit in your code, and not hidden + * in an "implicit contract" between your code and your schema. + */ + [[deprecated("Use table() or raw_table() factory.")]] stream_to( + transaction_base &tx, std::string_view table_name) : + stream_to{tx, table_name, ""sv} + {} + + /// Create a stream, specifying column names as a container of strings. + /** @deprecated Use @ref table or @ref raw_table as a factory. + */ + template + [[deprecated("Use table() or raw_table() factory.")]] stream_to( + transaction_base &, std::string_view table_name, Columns const &columns); + + /// Create a stream, specifying column names as a sequence of strings. + /** @deprecated Use @ref table or @ref raw_table as a factory. + */ + template + [[deprecated("Use table() or raw_table() factory.")]] stream_to( + transaction_base &, std::string_view table_name, Iter columns_begin, + Iter columns_end); + + ~stream_to() noexcept; + + /// Does this stream still need to @ref complete()? + [[nodiscard]] constexpr operator bool() const noexcept + { + return not m_finished; + } + /// Has this stream been through its concluding @c complete()? + [[nodiscard]] constexpr bool operator!() const noexcept + { + return m_finished; + } + + /// Complete the operation, and check for errors. + /** Always call this to close the stream in an orderly fashion, even after + * an error. (In the case of an error, abort the transaction afterwards.) + * + * The only circumstance where it's safe to skip this is after an error, if + * you're discarding the entire connection. + */ + void complete(); + + /// Insert a row of data. + /** Returns a reference to the stream, so you can chain the calls. + * + * The @c row can be a tuple, or any type that can be iterated. Each + * item becomes a field in the row, in the same order as the columns you + * specified when creating the stream. + * + * If you don't already happen to have your fields in the form of a tuple or + * container, prefer @c write_values. It's faster and more convenient. + */ + template stream_to &operator<<(Row const &row) + { + write_row(row); + return *this; + } + + /// Stream a `stream_from` straight into a `stream_to`. + /** This can be useful when copying between different databases. If the + * source and the destination are on the same database, you'll get better + * performance doing it all in a regular query. + */ + stream_to &operator<<(stream_from &); + + /// Insert a row of data, given in the form of a @c std::tuple or container. + /** The @c row can be a tuple, or any type that can be iterated. Each + * item becomes a field in the row, in the same order as the columns you + * specified when creating the stream. + * + * The preferred way to insert a row is @c write_values. + */ + template void write_row(Row const &row) + { + fill_buffer(row); + write_buffer(); + } + + /// Insert values as a row. + /** This is the recommended way of inserting data. Pass your field values, + * of any convertible type. + */ + template void write_values(Ts const &...fields) + { + fill_buffer(fields...); + write_buffer(); + } + +private: + /// Stream a pre-quoted table name and columns list. + stream_to( + transaction_base &tx, std::string_view path, std::string_view columns); + + bool m_finished = false; + + /// Reusable buffer for a row. Saves doing an allocation for each row. + std::string m_buffer; + + /// Reusable buffer for converting/escaping a field. + std::string m_field_buf; + + /// Glyph scanner, for parsing the client encoding. + internal::glyph_scanner_func *m_scanner; + + /// Write a row of raw text-format data into the destination table. + void write_raw_line(std::string_view); + + /// Write a row of data from @c m_buffer into the destination table. + /** Resets the buffer for the next row. + */ + void write_buffer(); + + /// COPY encoding for a null field, plus subsequent separator. + static constexpr std::string_view null_field{"\\N\t"}; + + /// Estimate buffer space needed for a field which is always null. + template + static std::enable_if_t::always_null, std::size_t> + estimate_buffer(T const &) + { + return std::size(null_field); + } + + /// Estimate buffer space needed for field f. + /** The estimate is not very precise. We don't actually know how much space + * we'll need once the escaping comes in. + */ + template + static std::enable_if_t::always_null, std::size_t> + estimate_buffer(T const &field) + { + return is_null(field) ? std::size(null_field) : size_buffer(field); + } + + /// Append escaped version of @c data to @c m_buffer, plus a tab. + void escape_field_to_buffer(std::string_view data); + + /// Append string representation for @c f to @c m_buffer. + /** This is for the general case, where the field may contain a value. + * + * Also appends a tab. The tab is meant to be a separator, not a terminator, + * so if you write any fields at all, you'll end up with one tab too many + * at the end of the buffer. + */ + template + std::enable_if_t::always_null> + append_to_buffer(Field const &f) + { + // We append each field, terminated by a tab. That will leave us with + // one tab too many, assuming we write any fields at all; we remove that + // at the end. + if (is_null(f)) + { + // Easy. Append null and tab in one go. + m_buffer.append(null_field); + } + else + { + // Convert f into m_buffer. + + using traits = string_traits; + auto const budget{estimate_buffer(f)}; + auto const offset{std::size(m_buffer)}; + + if constexpr (std::is_arithmetic_v) + { + // Specially optimised for "safe" types, which never need any + // escaping. Convert straight into m_buffer. + + // The budget we get from size_buffer() includes room for the trailing + // zero, which we must remove. But we're also inserting tabs between + // fields, so we re-purpose the extra byte for that. + auto const total{offset + budget}; + m_buffer.resize(total); + auto const data{m_buffer.data()}; + char *const end{traits::into_buf(data + offset, data + total, f)}; + *(end - 1) = '\t'; + // Shrink to fit. Keep the tab though. + m_buffer.resize(static_cast(end - data)); + } + else if constexpr ( + std::is_same_v or + std::is_same_v or + std::is_same_v) + { + // This string may need escaping. + m_field_buf.resize(budget); + escape_field_to_buffer(f); + } + else + { + // This field needs to be converted to a string, and after that, + // escaped as well. + m_field_buf.resize(budget); + auto const data{m_field_buf.data()}; + escape_field_to_buffer( + traits::to_buf(data, data + std::size(m_field_buf), f)); + } + } + } + + /// Append string representation for a null field to @c m_buffer. + /** This special case is for types which are always null. + * + * Also appends a tab. The tab is meant to be a separator, not a terminator, + * so if you write any fields at all, you'll end up with one tab too many + * at the end of the buffer. + */ + template + std::enable_if_t::always_null> + append_to_buffer(Field const &) + { + m_buffer.append(null_field); + } + + /// Write raw COPY line into @c m_buffer, based on a container of fields. + template + std::enable_if_t> + fill_buffer(Container const &c) + { + // To avoid unnecessary allocations and deallocations, we run through c + // twice: once to determine how much buffer space we may need, and once to + // actually write it into the buffer. + std::size_t budget{0}; + for (auto const &f : c) budget += estimate_buffer(f); + m_buffer.reserve(budget); + for (auto const &f : c) append_to_buffer(f); + } + + /// Estimate how many buffer bytes we need to write tuple. + template + static std::size_t + budget_tuple(Tuple const &t, std::index_sequence) + { + return (estimate_buffer(std::get(t)) + ...); + } + + /// Write tuple of fields to @c m_buffer. + template + void append_tuple(Tuple const &t, std::index_sequence) + { + (append_to_buffer(std::get(t)), ...); + } + + /// Write raw COPY line into @c m_buffer, based on a tuple of fields. + template void fill_buffer(std::tuple const &t) + { + using indexes = std::make_index_sequence; + + m_buffer.reserve(budget_tuple(t, indexes{})); + append_tuple(t, indexes{}); + } + + /// Write raw COPY line into @c m_buffer, based on varargs fields. + template void fill_buffer(const Ts &...fields) + { + (..., append_to_buffer(fields)); + } + + constexpr static std::string_view s_classname{"stream_to"}; +}; + + +template +inline stream_to::stream_to( + transaction_base &tx, std::string_view table_name, Columns const &columns) : + stream_to{tx, table_name, std::begin(columns), std::end(columns)} +{} + + +template +inline stream_to::stream_to( + transaction_base &tx, std::string_view table_name, Iter columns_begin, + Iter columns_end) : + stream_to{ + tx, + tx.quote_name( + table_name, + separated_list(",", columns_begin, columns_end, [&tx](auto col) { + return tx.quote_name(*col); + }))} +{} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/subtransaction b/ext/libpqxx-7.7.3/include/pqxx/subtransaction new file mode 100644 index 000000000..e0d154903 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/subtransaction @@ -0,0 +1,8 @@ +/** pqxx::subtransaction class. + * + * pqxx::subtransaction is a nested transaction, i.e. one inside a transaction. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/subtransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/subtransaction.hxx b/ext/libpqxx-7.7.3/include/pqxx/subtransaction.hxx new file mode 100644 index 000000000..e66b7a7a8 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/subtransaction.hxx @@ -0,0 +1,96 @@ +/* Definition of the pqxx::subtransaction class. + * + * pqxx::subtransaction is a nested transaction, i.e. one within a transaction. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/subtransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_SUBTRANSACTION +#define PQXX_H_SUBTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/dbtransaction.hxx" + +namespace pqxx +{ +/** + * @ingroup transactions + */ +/// "Transaction" nested within another transaction +/** A subtransaction can be executed inside a backend transaction, or inside + * another subtransaction. This can be useful when, for example, statements in + * a transaction may harmlessly fail and you don't want them to abort the + * entire transaction. Here's an example of how a temporary table may be + * dropped before re-creating it, without failing if the table did not exist: + * + * ```cxx + * void do_job(connection &C) + * { + * string const temptable = "fleetingtable"; + * + * work W(C, "do_job"); + * do_firstpart(W); + * + * // Attempt to delete our temporary table if it already existed. + * try + * { + * subtransaction S(W, "droptemp"); + * S.exec0("DROP TABLE " + temptable); + * S.commit(); + * } + * catch (undefined_table const &) + * { + * // Table did not exist. Which is what we were hoping to achieve anyway. + * // Carry on without regrets. + * } + * + * // S may have gone into a failed state and been destroyed, but the + * // upper-level transaction W is still fine. We can continue to use it. + * W.exec0("CREATE TEMP TABLE " + temptable + "(bar integer, splat + * varchar)"); + * + * do_lastpart(W); + * } + * ``` + * + * (This is just an example. If you really wanted to do drop a table without + * an error if it doesn't exist, you'd use DROP TABLE IF EXISTS.) + * + * There are no isolation levels inside a transaction. They are not needed + * because all actions within the same backend transaction are always performed + * sequentially anyway. + * + * @warning While the subtransaction is "live," you cannot execute queries or + * open streams etc. on its parent transaction. A transaction can have at most + * one object of a type derived from @ref pqxx::transaction_focus active on it + * at a time. + */ +class PQXX_LIBEXPORT subtransaction : public transaction_focus, + public dbtransaction +{ +public: + /// Nest a subtransaction nested in another transaction. + explicit subtransaction(dbtransaction &t, std::string_view tname = ""sv); + + /// Nest a subtransaction in another subtransaction. + explicit subtransaction(subtransaction &t, std::string_view name = ""sv); + + virtual ~subtransaction() noexcept override; + +private: + std::string quoted_name() const + { + return quote_name(transaction_focus::name()); + } + virtual void do_commit() override; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/time b/ext/libpqxx-7.7.3/include/pqxx/time new file mode 100644 index 000000000..85df05744 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/time @@ -0,0 +1,6 @@ +/** Date/time string conversions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/time.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/time.hxx b/ext/libpqxx-7.7.3/include/pqxx/time.hxx new file mode 100644 index 000000000..effed05e0 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/time.hxx @@ -0,0 +1,88 @@ +/** Support for date/time values. + * + * At the moment this supports dates, but not times. + */ +#ifndef PQXX_H_TIME +#define PQXX_H_TIME + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/internal/concat.hxx" +#include "pqxx/strconv.hxx" + + +#if defined(PQXX_HAVE_YEAR_MONTH_DAY) + +namespace pqxx +{ +using namespace std::literals; + +template<> +struct nullness + : no_null +{}; + + +/// String representation for a Gregorian date in ISO-8601 format. +/** @warning Experimental. There may still be design problems, particularly + * when it comes to BC years. + * + * PostgreSQL supports a choice of date formats, but libpqxx does not. The + * other formats in turn support a choice of "month before day" versus "day + * before month," meaning that it's not necessarily known which format a given + * date is supposed to be. So I repeat: ISO-8601-style format only! + * + * Invalid dates will not convert. This includes February 29 on non-leap + * years, which is why it matters that `year_month_day` represents a + * _Gregorian_ date. + * + * The range of years is limited. At the time of writing, PostgreSQL 14 + * supports years from 4713 BC to 294276 AD inclusive, and C++20 supports + * a range of 32767 BC to 32767 AD inclusive. So in practice, years must fall + * between 4713 BC and 32767 AD, inclusive. + * + * @warning Support for BC (or BCE) years is still experimental. I still need + * confirmation on this issue: it looks as if C++ years are astronomical years, + * which means they have a Year Zero. Regular BC/AD years do not have a year + * zero, so the year 1 AD follows directly after 1 BC. + * + * So, what to our calendars (and to PostgreSQL) is the year "0001 BC" seems to + * count as year "0" in a `std::chrono::year_month_day`. The year 0001 AD is + * still equal to 1 as you'd expect, and all AD years work normally, but all + * years before then are shifted by one. For instance, the year 543 BC would + * be -542 in C++. + */ +template<> struct PQXX_LIBEXPORT string_traits +{ + [[nodiscard]] static zview + to_buf(char *begin, char *end, std::chrono::year_month_day const &value) + { + return generic_to_buf(begin, end, value); + } + + static char * + into_buf(char *begin, char *end, std::chrono::year_month_day const &value); + + [[nodiscard]] static std::chrono::year_month_day + from_string(std::string_view text); + + [[nodiscard]] static std::size_t + size_buffer(std::chrono::year_month_day const &) noexcept + { + static_assert(int{(std::chrono::year::min)()} >= -99999); + static_assert(int{(std::chrono::year::max)()} <= 99999); + return 5 + 1 + 2 + 1 + 2 + std::size(s_bc) + 1; + } + +private: + /// The "BC" suffix for years before 1 AD. + static constexpr std::string_view s_bc{" BC"sv}; +}; +} // namespace pqxx +#endif // PQXX_HAVE_YEAR_MONTH_DAY +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/transaction b/ext/libpqxx-7.7.3/include/pqxx/transaction new file mode 100644 index 000000000..a7ae39d43 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/transaction @@ -0,0 +1,8 @@ +/** pqxx::transaction class. + * + * pqxx::transaction represents a standard database transaction. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/transaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/transaction.hxx b/ext/libpqxx-7.7.3/include/pqxx/transaction.hxx new file mode 100644 index 000000000..e90917e38 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/transaction.hxx @@ -0,0 +1,108 @@ +/* Definition of the pqxx::transaction class. + * pqxx::transaction represents a standard database transaction. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/transaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTION +#define PQXX_H_TRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/dbtransaction.hxx" + +namespace pqxx::internal +{ +/// Helper base class for the @ref transaction class template. +class PQXX_LIBEXPORT basic_transaction : public dbtransaction +{ +protected: + basic_transaction( + connection &c, zview begin_command, std::string_view tname); + basic_transaction(connection &c, zview begin_command, std::string &&tname); + basic_transaction(connection &c, zview begin_command); + + virtual ~basic_transaction() noexcept override = 0; + +private: + virtual void do_commit() override; +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/** + * @ingroup transactions + */ +//@{ + +/// Standard back-end transaction, templatised on isolation level. +/** This is the type you'll normally want to use to represent a transaction on + * the database. + * + * Usage example: double all wages. + * + * ```cxx + * extern connection C; + * work T(C); + * try + * { + * T.exec0("UPDATE employees SET wage=wage*2"); + * T.commit(); // NOTE: do this inside try block + * } + * catch (exception const &e) + * { + * cerr << e.what() << endl; + * T.abort(); // Usually not needed; same happens when T's life ends. + * } + * ``` + */ +template< + isolation_level ISOLATION = isolation_level::read_committed, + write_policy READWRITE = write_policy::read_write> +class transaction final : public internal::basic_transaction +{ +public: + /// Begin a transaction. + /** + * @param c Connection for this transaction to operate on. + * @param tname Optional name for transaction. Must begin with a letter and + * may contain letters and digits only. + */ + transaction(connection &c, std::string_view tname) : + internal::basic_transaction{ + c, internal::begin_cmd, tname} + {} + + /// Begin a transaction. + /** + * @param c Connection for this transaction to operate on. + * may contain letters and digits only. + */ + explicit transaction(connection &c) : + internal::basic_transaction{ + c, internal::begin_cmd} + {} + + virtual ~transaction() noexcept override { close(); } +}; + + +/// The default transaction type. +using work = transaction<>; + +/// Read-only transaction. +using read_transaction = + transaction; + +//@} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/transaction_base b/ext/libpqxx-7.7.3/include/pqxx/transaction_base new file mode 100644 index 000000000..c39219aac --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/transaction_base @@ -0,0 +1,9 @@ +/** Base for the transaction classes. + * + * pqxx::transaction_base defines the interface for any abstract class that + * represents a database transaction. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/transaction_base.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/transaction_base.hxx b/ext/libpqxx-7.7.3/include/pqxx/transaction_base.hxx new file mode 100644 index 000000000..4363cc56a --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/transaction_base.hxx @@ -0,0 +1,810 @@ +/* Common code and definitions for the transaction classes. + * + * pqxx::transaction_base defines the interface for any abstract class that + * represents a database transaction. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/transaction_base instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTION_BASE +#define PQXX_H_TRANSACTION_BASE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +/* End-user programs need not include this file, unless they define their own + * transaction classes. This is not something the typical program should want + * to do. + * + * However, reading this file is worthwhile because it defines the public + * interface for the available transaction classes such as transaction and + * nontransaction. + */ + +#include "pqxx/connection.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/encoding_group.hxx" +#include "pqxx/isolation.hxx" +#include "pqxx/result.hxx" +#include "pqxx/row.hxx" +#include "pqxx/stream_from.hxx" +#include "pqxx/util.hxx" + +namespace pqxx::internal::gate +{ +class transaction_subtransaction; +class transaction_sql_cursor; +class transaction_stream_to; +class transaction_transaction_focus; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +using namespace std::literals; + + +class transaction_focus; + + +/** + * @defgroup transactions Transaction classes + * + * All database access goes through instances of these classes. + * However, not all implementations of this interface need to provide full + * transactional integrity. + * + * Several implementations of this interface are shipped with libpqxx, + * including the plain transaction class, the entirely unprotected + * nontransaction, and the more cautious robusttransaction. + */ + +/// Interface definition (and common code) for "transaction" classes. +/** + * @ingroup transactions + * + * Abstract base class for all transaction types. + */ +class PQXX_LIBEXPORT PQXX_NOVTABLE transaction_base +{ +public: + transaction_base() = delete; + transaction_base(transaction_base const &) = delete; + transaction_base(transaction_base &&) = delete; + transaction_base &operator=(transaction_base const &) = delete; + transaction_base &operator=(transaction_base &&) = delete; + + virtual ~transaction_base() = 0; + + /// Commit the transaction. + /** Make the effects of this transaction definite. If you destroy a + * transaction without invoking its @ref commit() first, that will implicitly + * abort it. (For the @ref nontransaction class though, "commit" and "abort" + * really don't do anything, hence its name.) + * + * There is, however, a minute risk that you might lose your connection to + * the database at just the wrong moment here. In that case, libpqxx may be + * unable to determine whether the database was able to complete the + * transaction, or had to roll it back. In that scenario, @ref commit() will + * throw an in_doubt_error. There is a different transaction class called + * @ref robusttransaction which takes some special precautions to reduce this + * risk. + */ + void commit(); + + /// Abort the transaction. + /** No special effort is required to call this function; it will be called + * implicitly when the transaction is destructed. + */ + void abort(); + + /** + * @ingroup escaping-functions + * + * Use these when writing SQL queries that incorporate C++ values as SQL + * constants. + * + * The functions you see here are just convenience shortcuts to the same + * functions on the connection object. + */ + //@{ + /// Escape string for use as SQL string literal in this transaction. + template [[nodiscard]] auto esc(ARGS &&...args) const + { + return conn().esc(std::forward(args)...); + } + + /// Escape binary data for use as SQL string literal in this transaction. + /** Raw, binary data is treated differently from regular strings. Binary + * strings are never interpreted as text, so they may safely include byte + * values or byte sequences that don't happen to represent valid characters + * in the character encoding being used. + * + * The binary string does not stop at the first zero byte, as is the case + * with textual strings. Instead, it may contain zero bytes anywhere. If + * it happens to contain bytes that look like quote characters, or other + * things that can disrupt their use in SQL queries, they will be replaced + * with special escape sequences. + */ + template [[nodiscard]] auto esc_raw(ARGS &&...args) const + { + return conn().esc_raw(std::forward(args)...); + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(zview text) const + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return conn().unesc_raw(text); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard]] std::basic_string unesc_bin(zview text) + { + return conn().unesc_bin(text); + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(char const *text) const + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return conn().unesc_raw(text); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard]] std::basic_string unesc_bin(char const text[]) + { + return conn().unesc_bin(text); + } + + /// Represent object as SQL string, including quoting & escaping. + /** Nulls are recognized and represented as SQL nulls. */ + template [[nodiscard]] std::string quote(T const &t) const + { + return conn().quote(t); + } + + [[deprecated( + "Use std::basic_string instead of binarystring.")]] std::string + quote(binarystring const &t) const + { + return conn().quote(t.bytes_view()); + } + + /// Binary-escape and quote a binary string for use as an SQL constant. + [[deprecated("Use quote(std::basic_string_view).")]] std::string + quote_raw(unsigned char const bin[], std::size_t len) const + { + return quote(binary_cast(bin, len)); + } + + /// Binary-escape and quote a binary string for use as an SQL constant. + [[deprecated("Use quote(std::basic_string_view).")]] std::string + quote_raw(zview bin) const; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Binary-escape and quote a binary string for use as an SQL constant. + /** For binary data you can also just use @ref quote(data). */ + template + [[nodiscard]] std::string quote_raw(DATA const &data) const + { + return conn().quote_raw(data); + } +#endif + + /// Escape an SQL identifier for use in a query. + [[nodiscard]] std::string quote_name(std::string_view identifier) const + { + return conn().quote_name(identifier); + } + + /// Escape string for literal LIKE match. + [[nodiscard]] std::string + esc_like(std::string_view bin, char escape_char = '\\') const + { + return conn().esc_like(bin, escape_char); + } + //@} + + /** + * @name Command execution + * + * There are many functions for executing (or "performing") a command (or + * "query"). This is the most fundamental thing you can do with the library, + * and you always do it from a transaction class. + * + * Command execution can throw many types of exception, including sql_error, + * broken_connection, and many sql_error subtypes such as + * feature_not_supported or insufficient_privilege. But any exception thrown + * by the C++ standard library may also occur here. All exceptions you will + * see libpqxx throw are derived from std::exception. + * + * One unusual feature in libpqxx is that you can give your query a name or + * description. This does not mean anything to the database, but sometimes + * it can help libpqxx produce more helpful error messages, making problems + * in your code easier to debug. + * + * Many of the execution functions used to accept a `desc` argument, a + * human-readable description of the statement for use in error messages. + * This could make failures easier to debug. Future versions will use + * C++20's `std::source_location` to identify the failing statement. + */ + //@{ + + /// Execute a command. + /** + * @param query Query or command to execute. + * @param desc Optional identifier for query, to help pinpoint SQL errors. + * @return A result set describing the query's or command's result. + */ + [[deprecated("The desc parameter is going away.")]] result + exec(std::string_view query, std::string_view desc); + + /// Execute a command. + /** + * @param query Query or command to execute. + * @return A result set describing the query's or command's result. + */ + result exec(std::string_view query) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec(query, std::string_view{}); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute a command. + /** + * @param query Query or command to execute. + * @param desc Optional identifier for query, to help pinpoint SQL errors. + * @return A result set describing the query's or command's result. + */ + [[deprecated( + "Pass your query as a std::string_view, not stringstream.")]] result + exec(std::stringstream const &query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec(query.str(), desc); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute command, which should return zero rows of data. + /** Works like @ref exec, but fails if the result contains data. It still + * returns a result, however, which may contain useful metadata. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + [[deprecated("The desc parameter is going away.")]] result + exec0(zview query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec_n(0, query, desc); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute command, which should return zero rows of data. + /** Works like @ref exec, but fails if the result contains data. It still + * returns a result, however, which may contain useful metadata. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + result exec0(zview query) { return exec_n(0, query); } + + /// Execute command returning a single row of data. + /** Works like @ref exec, but requires the result to contain exactly one row. + * The row can be addressed directly, without the need to find the first row + * in a result set. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + [[deprecated("The desc parameter is going away.")]] row + exec1(zview query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec_n(1, query, desc).front(); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute command returning a single row of data. + /** Works like @ref exec, but requires the result to contain exactly one row. + * The row can be addressed directly, without the need to find the first row + * in a result set. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + row exec1(zview query) { return exec_n(1, query).front(); } + + /// Execute command, expect given number of rows. + /** Works like @ref exec, but checks that the result has exactly the expected + * number of rows. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + [[deprecated("The desc parameter is going away.")]] result + exec_n(result::size_type rows, zview query, std::string_view desc); + + /// Execute command, expect given number of rows. + /** Works like @ref exec, but checks that the result has exactly the expected + * number of rows. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + result exec_n(result::size_type rows, zview query) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec_n(rows, query, std::string_view{}); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Perform query, expecting exactly 1 row with 1 field, and convert it. + /** This is convenience shorthand for querying exactly one value from the + * database. It returns that value, converted to the type you specify. + */ + template + [[deprecated("The desc parameter is going away.")]] TYPE + query_value(zview query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row const r{exec1(query, desc)}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + if (std::size(r) != 1) + throw usage_error{internal::concat( + "Queried single value from result with ", std::size(r), " columns.")}; + return r[0].as(); + } + + /// Perform query, expecting exactly 1 row with 1 field, and convert it. + /** This is convenience shorthand for querying exactly one value from the + * database. It returns that value, converted to the type you specify. + */ + template TYPE query_value(zview query) + { + row const r{exec1(query)}; + if (std::size(r) != 1) + throw usage_error{internal::concat( + "Queried single value from result with ", std::size(r), " columns.")}; + return r[0].as(); + } + + /// Execute a query, and loop over the results row by row. + /** Converts the rows to `std::tuple`, of the column types you specify. + * + * Use this with a range-based "for" loop. It executes the query, and + * directly maps the resulting rows onto a `std::tuple` of the types you + * specify. It starts before all the data from the server is in, so if your + * network connection to the server breaks while you're iterating, you'll get + * an exception partway through. + * + * The stream lives entirely within the lifetime of the transaction. Make + * sure you destroy the stream before you destroy the transaction. Either + * iterate the stream all the way to the end, or destroy first the stream + * and then the transaction without touching either in any other way. Until + * the stream has finished, the transaction is in a special state where it + * cannot execute queries. + * + * As a special case, tuple may contain `std::string_view` fields, but the + * strings to which they point will only remain valid until you extract the + * next row. After that, the memory holding the string may be overwritten or + * deallocated. + * + * If any of the columns can be null, and the C++ type to which it translates + * does not have a null value, wrap the type in `std::optional` (or if + * you prefer, `std::shared_ptr` or `std::unique_ptr)`. These templates + * do recognise null values, and libpqxx will know how to convert to them. + * + * The connection is in a special state until the iteration finishes. So if + * it does not finish due to a `break` or a `return` or an exception, then + * the entire connection becomes effectively unusable. + * + * Querying in this way is faster than the `exec()` methods for larger + * results (but slower for small ones). You can start processing rows before + * the full result is in. Also, `stream()` scales better in terms of memory + * usage. Where @ref exec() reads the entire result into memory at once, + * `stream()` will read and process one row at at a time. + * + * Your query executes as part of a COPY command, not as a stand-alone query, + * so there are limitations to what you can do in the query. It can be + * either a SELECT or VALUES query; or an INSERT, UPDATE, or DELETE with a + * RETURNING clause. See the documentation for PostgreSQL's COPY command for + * the details: + * + * https://www.postgresql.org/docs/current/sql-copy.html + * + * Iterating in this way does require each of the field types you pass to be + * default-constructible, copy-constructible, and assignable. These + * requirements may be loosened once libpqxx moves on to C++20. + */ + template + [[nodiscard]] auto stream(std::string_view query) & + { + // Tricky: std::make_unique() supports constructors but not RVO functions. + return pqxx::internal::owning_stream_input_iteration{ + std::unique_ptr{ + new stream_from{stream_from::query(*this, query)}}}; + } + + // C++20: Concept like std::invocable, but without specifying param types. + /// Perform a streaming query, and for each result row, call `func`. + /** Here, `func` can be a function, a `std::function`, a lambda, or an + * object that supports the function call operator. Of course `func` must + * have an unambiguous signature; it can't be overloaded or generic. + * + * The `for_each` function executes `query` in a stream using + * @ref pqxx::stream_from. Every time a row of data comes in from the + * server, it converts the row's fields to the types of `func`'s respective + * parameters, and calls `func` with those values. + * + * This will not work for all queries, but straightforward `SELECT` and + * `UPDATE ... RETURNING` queries should work. Consult the documentation for + * @ref pqxx::stream_from and PostgreSQL's underlying `COPY` command for the + * full details. + * + * Streaming a query like this is likely to be slower than the @ref exec() + * functions for small result sets, but faster for large result sets. So if + * performance matters, you'll want to use `for_each` if you query large + * amounts of data, but not if you do lots of queries with small outputs. + */ + template + inline auto for_each(std::string_view query, CALLABLE &&func) + { + using param_types = + pqxx::internal::strip_types_t>; + param_types const *const sample{nullptr}; + auto data_stream{stream_like(query, sample)}; + for (auto const &fields : data_stream) std::apply(func, fields); + } + + /** + * @name Parameterized statements + * + * You'll often need parameters in the queries you execute: "select the + * car with this licence plate." If the parameter is a string, you need to + * quote it and escape any special characters inside it, or it may become a + * target for an SQL injection attack. If it's an integer (for example), + * you need to convert it to a string, but in the database's format, without + * locale-specific niceties like "," separators between the thousands. + * + * Parameterised statements are an easier and safer way to do this. They're + * like prepared statements, but for a single use. You don't need to name + * them, and you don't need to prepare them first. + * + * Your query will include placeholders like `$1` and `$2` etc. in the places + * where you want the arguments to go. Then, you pass the argument values + * and the actual query is constructed for you. + * + * Pass the exact right number of parameters, and in the right order. The + * parameters in the query don't have to be neatly ordered from `$1` to + * `$2` to `$3` - but you must pass the argument for `$1` first, the one + * for `$2` second, etc. + * + * @warning Beware of "nul" bytes. Any string you pass as a parameter will + * end at the first char with value zero. If you pass a string that contains + * a zero byte, the last byte in the value will be the one just before the + * zero. + */ + //@{ + /// Execute an SQL statement with parameters. + template result exec_params(zview query, Args &&...args) + { + params pp(args...); + return internal_exec_params(query, pp.make_c_params()); + } + + // Execute parameterised statement, expect a single-row result. + /** @throw unexpected_rows if the result does not consist of exactly one row. + */ + template row exec_params1(zview query, Args &&...args) + { + return exec_params_n(1, query, std::forward(args)...).front(); + } + + // Execute parameterised statement, expect a result with zero rows. + /** @throw unexpected_rows if the result contains rows. + */ + template result exec_params0(zview query, Args &&...args) + { + return exec_params_n(0, query, std::forward(args)...); + } + + // Execute parameterised statement, expect exactly a given number of rows. + /** @throw unexpected_rows if the result contains the wrong number of rows. + */ + template + result exec_params_n(std::size_t rows, zview query, Args &&...args) + { + auto const r{exec_params(query, std::forward(args)...)}; + check_rowcount_params(rows, std::size(r)); + return r; + } + //@} + + /** + * @name Prepared statements + * + * These are very similar to parameterised statements. The difference is + * that you prepare them in advance, giving them identifying names. You can + * then call them by these names, passing in the argument values appropriate + * for that call. + * + * You prepare a statement on the connection, using + * @ref pqxx::connection::prepare(). But you then call the statement in a + * transaction, using the functions you see here. + * + * Never try to prepare, execute, or unprepare a prepared statement manually + * using direct SQL queries when you also use the libpqxx equivalents. For + * any given statement, either prepare, manage, and execute it through the + * dedicated libpqxx functions; or do it all directly in SQL. Don't mix the + * two, or the code may get confused. + * + * See \ref prepared for a full discussion. + * + * @warning Beware of "nul" bytes. Any string you pass as a parameter will + * end at the first char with value zero. If you pass a string that contains + * a zero byte, the last byte in the value will be the one just before the + * zero. If you need a zero byte, you're dealing with binary strings, not + * regular strings. Represent binary strings on the SQL side as `BYTEA` + * (or as large objects). On the C++ side, use types like + * `std::basic_string` or `std::basic_string_view` + * or (in C++20) `std::vector`. Also, consider large objects on + * the SQL side and @ref blob on the C++ side. + */ + //@{ + + /// Execute a prepared statement, with optional arguments. + template + result exec_prepared(zview statement, Args &&...args) + { + params pp(args...); + return internal_exec_prepared(statement, pp.make_c_params()); + } + + /// Execute a prepared statement, and expect a single-row result. + /** @throw pqxx::unexpected_rows if the result was not exactly 1 row. + */ + template + row exec_prepared1(zview statement, Args &&...args) + { + return exec_prepared_n(1, statement, std::forward(args)...).front(); + } + + /// Execute a prepared statement, and expect a result with zero rows. + /** @throw pqxx::unexpected_rows if the result contained rows. + */ + template + result exec_prepared0(zview statement, Args &&...args) + { + return exec_prepared_n(0, statement, std::forward(args)...); + } + + /// Execute a prepared statement, expect a result with given number of rows. + /** @throw pqxx::unexpected_rows if the result did not contain exactly the + * given number of rows. + */ + template + result + exec_prepared_n(result::size_type rows, zview statement, Args &&...args) + { + auto const r{exec_prepared(statement, std::forward(args)...)}; + check_rowcount_prepared(statement, rows, std::size(r)); + return r; + } + + //@} + + /** + * @name Error/warning output + */ + //@{ + /// Have connection process a warning message. + void process_notice(char const msg[]) const { m_conn.process_notice(msg); } + /// Have connection process a warning message. + void process_notice(zview msg) const { m_conn.process_notice(msg); } + //@} + + /// The connection in which this transaction lives. + [[nodiscard]] constexpr connection &conn() const noexcept { return m_conn; } + + /// Set session variable using SQL "SET" command. + /** @deprecated To set a transaction-local variable, execute an SQL `SET` + * command. To set a session variable, use the connection's + * @ref set_session_var function. + * + * @warning When setting a string value, you must make sure that the string + * is "safe." If you call @ref quote() on the string, it will return a + * safely escaped and quoted version for use as an SQL literal. + * + * @warning This function executes SQL. Do not try to set or get variables + * while a pipeline or table stream is active. + * + * @param var The variable to set. + * @param value The new value to store in the variable. This can be any SQL + * expression. + */ + [[deprecated( + "Set transaction-local variables using SQL SET statements.")]] void + set_variable(std::string_view var, std::string_view value); + + /// Read session variable using SQL "SHOW" command. + /** @warning This executes SQL. Do not try to set or get variables while a + * pipeline or table stream is active. + */ + [[deprecated("Read variables using SQL SHOW statements.")]] std::string + get_variable(std::string_view); + + // C++20: constexpr. + /// Transaction name, if you passed one to the constructor; or empty string. + [[nodiscard]] std::string_view name() const &noexcept { return m_name; } + +protected: + /// Create a transaction (to be called by implementation classes only). + /** The name, if nonempty, must begin with a letter and may contain letters + * and digits only. + */ + transaction_base( + connection &c, std::string_view tname, + std::shared_ptr rollback_cmd) : + m_conn{c}, m_name{tname}, m_rollback_cmd{rollback_cmd} + {} + + /// Create a transaction (to be called by implementation classes only). + /** Its rollback command will be "ROLLBACK". + * + * The name, if nonempty, must begin with a letter and may contain letters + * and digits only. + */ + transaction_base(connection &c, std::string_view tname); + + /// Create a transaction (to be called by implementation classes only). + explicit transaction_base(connection &c); + + /// Register this transaction with the connection. + void register_transaction(); + + /// End transaction. To be called by implementing class' destructor. + void close() noexcept; + + /// To be implemented by derived implementation class: commit transaction. + virtual void do_commit() = 0; + + /// Transaction type-specific way of aborting a transaction. + /** @warning This will become "final", since this function can be called + * from the implementing class destructor. + */ + virtual void do_abort(); + + /// Set the rollback command. + void set_rollback_cmd(std::shared_ptr cmd) + { + m_rollback_cmd = cmd; + } + + /// Execute query on connection directly. + result direct_exec(std::string_view, std::string_view desc = ""sv); + result + direct_exec(std::shared_ptr, std::string_view desc = ""sv); + +private: + enum class status + { + active, + aborted, + committed, + in_doubt + }; + + PQXX_PRIVATE void check_pending_error(); + + result + internal_exec_prepared(zview statement, internal::c_params const &args); + + result internal_exec_params(zview query, internal::c_params const &args); + + /// Throw unexpected_rows if prepared statement returned wrong no. of rows. + void check_rowcount_prepared( + zview statement, result::size_type expected_rows, + result::size_type actual_rows); + + /// Throw unexpected_rows if wrong row count from parameterised statement. + void + check_rowcount_params(std::size_t expected_rows, std::size_t actual_rows); + + /// Describe this transaction to humans, e.g. "transaction 'foo'". + [[nodiscard]] std::string description() const; + + friend class pqxx::internal::gate::transaction_transaction_focus; + PQXX_PRIVATE void register_focus(transaction_focus *); + PQXX_PRIVATE void unregister_focus(transaction_focus *) noexcept; + PQXX_PRIVATE void register_pending_error(zview) noexcept; + PQXX_PRIVATE void register_pending_error(std::string &&) noexcept; + + /// Like @ref stream(), but takes a tuple rather than a parameter pack. + template + auto stream_like(std::string_view query, std::tuple const *) + { + return stream(query); + } + + connection &m_conn; + + /// Current "focus": a pipeline, a nested transaction, a stream... + /** This pointer is used for only one purpose: sanity checks against mistakes + * such as opening one while another is still active. + */ + transaction_focus const *m_focus = nullptr; + + status m_status = status::active; + bool m_registered = false; + std::string m_name; + std::string m_pending_error; + + /// SQL command for aborting this type of transaction. + std::shared_ptr m_rollback_cmd; + + static constexpr std::string_view s_type_name{"transaction"sv}; +}; + + +// C++20: Can borrowed_range help? +/// Forbidden specialisation: underlying buffer immediately goes out of scope. +template<> +std::string_view transaction_base::query_value( + zview query, std::string_view desc) = delete; +/// Forbidden specialisation: underlying buffer immediately goes out of scope. +template<> +zview transaction_base::query_value( + zview query, std::string_view desc) = delete; + +} // namespace pqxx + + +namespace pqxx::internal +{ +/// The SQL command for starting a given type of transaction. +template +extern const zview begin_cmd; + +// These are not static members, so "constexpr" does not imply "inline". +template<> +inline constexpr zview begin_cmd{ + "BEGIN"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN READ ONLY"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL REPEATABLE READ"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL REPEATABLE READ READ ONLY"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL SERIALIZABLE"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL SERIALIZABLE READ ONLY"_zv}; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/transaction_focus b/ext/libpqxx-7.7.3/include/pqxx/transaction_focus new file mode 100644 index 000000000..fe78a9bcc --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/transaction_focus @@ -0,0 +1,7 @@ +/** + * Transaction focus: types which monopolise a transaction's attention. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/types.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/transaction_focus.hxx b/ext/libpqxx-7.7.3/include/pqxx/transaction_focus.hxx new file mode 100644 index 000000000..0707e3cc4 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/transaction_focus.hxx @@ -0,0 +1,89 @@ +/** Transaction focus: types which monopolise a transaction's attention. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTION_FOCUS +#define PQXX_H_TRANSACTION_FOCUS + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/util.hxx" + +namespace pqxx +{ +/// Base class for things that monopolise a transaction's attention. +/** You probably won't need to use this class. But it can be useful to _know_ + * that a given libpqxx class is derived from it. + * + * Pipelines, SQL statements, and data streams are examples of classes derived + * from `transaction_focus`. For any given transaction, only one object of + * such a class can be active at any given time. + */ +class PQXX_LIBEXPORT transaction_focus +{ +public: + transaction_focus( + transaction_base &t, std::string_view cname, std::string_view oname) : + m_trans{t}, m_classname{cname}, m_name{oname} + {} + + transaction_focus( + transaction_base &t, std::string_view cname, std::string &&oname) : + m_trans{t}, m_classname{cname}, m_name{std::move(oname)} + {} + + transaction_focus(transaction_base &t, std::string_view cname) : + m_trans{t}, m_classname{cname} + {} + + transaction_focus() = delete; + transaction_focus(transaction_focus const &) = delete; + transaction_focus &operator=(transaction_focus const &) = delete; + + /// Class name, for human consumption. + [[nodiscard]] constexpr std::string_view classname() const noexcept + { + return m_classname; + } + + /// Name for this object, if the caller passed one; empty string otherwise. + [[nodiscard]] std::string_view name() const &noexcept { return m_name; } + + [[nodiscard]] std::string description() const + { + return pqxx::internal::describe_object(m_classname, m_name); + } + + /// Can't move a transaction_focus. + /** Moving the transaction_focus would break the transaction's reference back + * to the object. + */ + transaction_focus(transaction_focus &&) = delete; + + /// Can't move a transaction_focus. + /** Moving the transaction_focus would break the transaction's reference back + * to the object. + */ + transaction_focus &operator=(transaction_focus &&) = delete; + +protected: + void register_me(); + void unregister_me() noexcept; + void reg_pending_error(std::string const &) noexcept; + bool registered() const noexcept { return m_registered; } + + transaction_base &m_trans; + +private: + bool m_registered = false; + std::string_view m_classname; + std::string m_name; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/transactor b/ext/libpqxx-7.7.3/include/pqxx/transactor new file mode 100644 index 000000000..29d1b9640 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/transactor @@ -0,0 +1,8 @@ +/** pqxx::transactor class. + * + * pqxx::transactor is a framework-style wrapper for safe transactions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/transactor.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/transactor.hxx b/ext/libpqxx-7.7.3/include/pqxx/transactor.hxx new file mode 100644 index 000000000..eefd04ba1 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/transactor.hxx @@ -0,0 +1,147 @@ +/* Transactor framework, a wrapper for safely retryable transactions. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/transactor instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTOR +#define PQXX_H_TRANSACTOR + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/connection.hxx" +#include "pqxx/transaction.hxx" + +namespace pqxx +{ +/** + * @defgroup transactor Transactor framework + * + * Sometimes a transaction can fail for completely transient reasons, such as a + * conflict with another transaction in SERIALIZABLE isolation. The right way + * to handle those failures is often just to re-run the transaction from + * scratch. + * + * For example, your REST API might be handling each HTTP request in its own + * database transaction, and if this kind of transient failure happens, you + * simply want to "replay" the whole request, in a fresh transaction. + * + * You won't necessarily want to execute the exact same SQL commands with the + * exact same data. Some of your SQL statements may depend on state that can + * vary between retries. Data in the database may already have changed, for + * instance. So instead of dumbly replaying the SQL, you re-run the same + * application code that produced those SQL commands, from the start. + * + * The transactor framework makes it a little easier for you to do this safely, + * and avoid typical pitfalls. You encapsulate the work that you want to do + * into a callable that you pass to the @ref perform function. + * + * Here's how it works. You write your transaction code as a lambda or + * function, which creates its own transaction object, does its work, and + * commits at the end. You pass that callback to @ref pqxx::perform, which + * runs it for you. + * + * If there's a failure inside your callback, there will be an exception. Your + * transaction object goes out of scope and gets destroyed, so that it aborts + * implicitly. Seeing this, @ref perform tries running your callback again. It + * stops doing that when the callback succeeds, or when it has failed too many + * times, or when there's an error that leaves the database in an unknown + * state, such as a lost connection just while we're waiting for the database + * to confirm a commit. It all depends on the type of exception. + * + * The callback takes no arguments. If you're using lambdas, the easy way to + * pass arguments is for the lambda to "capture" them from your variables. Or, + * if you're using functions, you may want to use `std::bind`. + * + * Once your callback succeeds, it can return a result, and @ref perform will + * return that result back to you. + */ +//@{ + +/// Simple way to execute a transaction with automatic retry. +/** + * Executes your transaction code as a callback. Repeats it until it completes + * normally, or it throws an error other than the few libpqxx-generated + * exceptions that the framework understands, or after a given number of failed + * attempts, or if the transaction ends in an "in-doubt" state. + * + * (An in-doubt state is one where libpqxx cannot determine whether the server + * finally committed a transaction or not. This can happen if the network + * connection to the server is lost just while we're waiting for its reply to + * a "commit" statement. The server may have completed the commit, or not, but + * it can't tell you because there's no longer a connection. + * + * Using this still takes a bit of care. If your callback makes use of data + * from the database, you'll probably have to query that data within your + * callback. If the attempt to perform your callback fails, and the framework + * tries again, you'll be in a new transaction and the data in the database may + * have changed under your feet. + * + * Also be careful about changing variables or data structures from within + * your callback. The run may still fail, and perhaps get run again. The + * ideal way to do it (in most cases) is to return your result from your + * callback, and change your program's data state only after @ref perform + * completes successfully. + * + * @param callback Transaction code that can be called with no arguments. + * @param attempts Maximum number of times to attempt performing callback. + * Must be greater than zero. + * @return Whatever your callback returns. + */ +template +inline auto perform(TRANSACTION_CALLBACK &&callback, int attempts = 3) + -> std::invoke_result_t +{ + if (attempts <= 0) + throw std::invalid_argument{ + "Zero or negative number of attempts passed to pqxx::perform()."}; + + for (; attempts > 0; --attempts) + { + try + { + return std::invoke(callback); + } + catch (in_doubt_error const &) + { + // Not sure whether transaction went through or not. The last thing in + // the world that we should do now is try again! + throw; + } + catch (statement_completion_unknown const &) + { + // Not sure whether our last statement succeeded. Don't risk running it + // again. + throw; + } + catch (broken_connection const &) + { + // Connection failed. May be worth retrying, if the transactor opens its + // own connection. + if (attempts <= 1) + throw; + continue; + } + catch (transaction_rollback const &) + { + // Some error that may well be transient, such as serialization failure + // or deadlock. Worth retrying. + if (attempts <= 1) + throw; + continue; + } + } + throw pqxx::internal_error{"No outcome reached on perform()."}; +} +} // namespace pqxx +//@} +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/types b/ext/libpqxx-7.7.3/include/pqxx/types new file mode 100644 index 000000000..23a5caae1 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/types @@ -0,0 +1,7 @@ +/** + * Basic typedefs and forward declarations. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/types.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/types.hxx b/ext/libpqxx-7.7.3/include/pqxx/types.hxx new file mode 100644 index 000000000..f95b598f8 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/types.hxx @@ -0,0 +1,173 @@ +/* Basic type aliases and forward declarations. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TYPES +#define PQXX_H_TYPES + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#if defined(PQXX_HAVE_CONCEPTS) && __has_include() +# include +#endif + + +namespace pqxx +{ +/// Number of rows in a result set. +using result_size_type = int; + +/// Difference between result sizes. +using result_difference_type = int; + +/// Number of fields in a row of database data. +using row_size_type = int; + +/// Difference between row sizes. +using row_difference_type = int; + +/// Number of bytes in a field of database data. +using field_size_type = std::size_t; + +/// Number of bytes in a large object. +using large_object_size_type = int64_t; + + +// Forward declarations, to help break compilation dependencies. +// These won't necessarily include all classes in libpqxx. +class binarystring; +class connection; +class const_result_iterator; +class const_reverse_result_iterator; +class const_reverse_row_iterator; +class const_row_iterator; +class dbtransaction; +class field; +class largeobjectaccess; +class notification_receiver; +struct range_error; +class result; +class row; +class stream_from; +class transaction_base; + +/// Marker for @ref stream_from constructors: "stream from table." +/** @deprecated Use @ref stream_from::table() instead. + */ +struct from_table_t +{}; + +/// Marker for @ref stream_from constructors: "stream from query." +/** @deprecated Use @ref stream_from::query() instead. + */ +struct from_query_t +{}; + + +/// Format code: is data text or binary? +/** Binary-compatible with libpq's format codes. + */ +enum class format : int +{ + text = 0, + binary = 1, +}; + + +/// Remove any constness, volatile, and reference-ness from a type. +/** @deprecated In C++20 we'll replace this with std::remove_cvref. + */ +template +using strip_t = std::remove_cv_t>; + + +#if defined(PQXX_HAVE_CONCEPTS) +/// The type of a container's elements. +/** At the time of writing there's a similar thing in `std::experimental`, + * which we may or may not end up using for this. + */ +template +using value_type = strip_t()))>; +#else // PQXX_HAVE_CONCEPTS +/// The type of a container's elements. +/** At the time of writing there's a similar thing in `std::experimental`, + * which we may or may not end up using for this. + */ +template +using value_type = strip_t()))>; +#endif // PQXX_HAVE_CONCEPTS + + +#if defined(PQXX_HAVE_CONCEPTS) +/// Concept: Any type that we can read as a string of `char`. +template +concept char_string = std::ranges::contiguous_range and + std::same_as < strip_t>, +char > ; + +/// Concept: Anything we can iterate to get things we can read as strings. +template +concept char_strings = + std::ranges::range and char_string>>; + +/// Concept: Anything we might want to treat as binary data. +template +concept potential_binary = std::ranges::contiguous_range and + (sizeof(value_type) == 1); +#endif // PQXX_HAVE_CONCEPTS + + +// C++20: Retire these compatibility definitions. +#if defined(PQXX_HAVE_CONCEPTS) + +/// Template argument type for a range. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_RANGE_ARG std::ranges::range + +/// Template argument type for @ref char_string. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRING_ARG pqxx::char_string + +/// Template argument type for @ref char_strings +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRINGS_ARG pqxx::char_strings + +#else // PQXX_HAVE_CONCEPTS + +/// Template argument type for a range. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_RANGE_ARG typename + +/// Template argument type for @ref char_string. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRING_ARG typename + +/// Template argument type for @ref char_strings +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRINGS_ARG typename + +#endif // PQXX_HAVE_CONCEPTS +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/util b/ext/libpqxx-7.7.3/include/pqxx/util new file mode 100644 index 000000000..6d85ab611 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/util @@ -0,0 +1,6 @@ +/** Various utility definitions for libpqxx. + */ +// Actual definitions in .hxx file so editors and such recognize file type +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/util.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/util.hxx b/ext/libpqxx-7.7.3/include/pqxx/util.hxx new file mode 100644 index 000000000..4aa5ecf57 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/util.hxx @@ -0,0 +1,521 @@ +/* Various utility definitions for libpqxx. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/util instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_UTIL +#define PQXX_H_UTIL + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if __has_include() +# include +#endif + +#include "pqxx/except.hxx" +#include "pqxx/internal/encodings.hxx" +#include "pqxx/types.hxx" +#include "pqxx/version.hxx" + + +/// The home of all libpqxx classes, functions, templates, etc. +namespace pqxx +{} + +#include + + +/// Internal items for libpqxx' own use. Do not use these yourself. +namespace pqxx::internal +{ + +// C++20: Retire wrapper. +/// Same as `std::cmp_less`, or a workaround where that's not available. +template +inline constexpr bool cmp_less(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_less(lhs, rhs); +#else + // We need a variable just because lgtm.com gives off a false positive + // warning when we compare the values directly. It considers that a + // "self-comparison." + constexpr bool left_signed{std::is_signed_v}; + if constexpr (left_signed == std::is_signed_v) + return lhs < rhs; + else if constexpr (std::is_signed_v) + return (lhs <= 0) ? true : (std::make_unsigned_t(lhs) < rhs); + else + return (rhs <= 0) ? false : (lhs < std::make_unsigned_t(rhs)); +#endif +} + + +// C++20: Retire wrapper. +/// C++20 std::cmp_greater, or workaround if not available. +template +inline constexpr bool cmp_greater(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_greater(lhs, rhs); +#else + return cmp_less(rhs, lhs); +#endif +} + + +// C++20: Retire wrapper. +/// C++20 std::cmp_less_equal, or workaround if not available. +template +inline constexpr bool cmp_less_equal(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_less_equal(lhs, rhs); +#else + return not cmp_less(rhs, lhs); +#endif +} + + +// C++20: Retire wrapper. +/// C++20 std::cmp_greater_equal, or workaround if not available. +template +inline constexpr bool cmp_greater_equal(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_greater_equal(lhs, rhs); +#else + return not cmp_less(lhs, rhs); +#endif +} + + +/// Efficiently concatenate two strings. +/** This is a special case of concatenate(), needed because dependency + * management does not let us use that function here. + */ +[[nodiscard]] inline std::string cat2(std::string_view x, std::string_view y) +{ + std::string buf; + auto const xs{std::size(x)}, ys{std::size(y)}; + buf.resize(xs + ys); + x.copy(std::data(buf), xs); + y.copy(std::data(buf) + xs, ys); + return buf; +} +} // namespace pqxx::internal + + +namespace pqxx +{ +using namespace std::literals; + +/// Suppress compiler warning about an unused item. +template inline constexpr void ignore_unused(T &&...) noexcept +{} + + +/// Cast a numeric value to another type, or throw if it underflows/overflows. +/** Both types must be arithmetic types, and they must either be both integral + * or both floating-point types. + */ +template +inline TO check_cast(FROM value, std::string_view description) +{ + static_assert(std::is_arithmetic_v); + static_assert(std::is_arithmetic_v); + static_assert(std::is_integral_v == std::is_integral_v); + + // The rest of this code won't quite work for bool, but bool is trivially + // convertible to other arithmetic types as far as I can see. + if constexpr (std::is_same_v) + return static_cast(value); + + // Depending on our "if constexpr" conditions, this parameter may not be + // needed. Some compilers will warn. + ignore_unused(description); + + using from_limits = std::numeric_limits; + using to_limits = std::numeric_limits; + if constexpr (std::is_signed_v) + { + if constexpr (std::is_signed_v) + { + if (value < to_limits::lowest()) + throw range_error{internal::cat2("Cast underflow: "sv, description)}; + } + else + { + // FROM is signed, but TO is not. Treat this as a special case, because + // there may not be a good broader type in which the compiler can even + // perform our check. + if (value < 0) + throw range_error{internal::cat2( + "Casting negative value to unsigned type: "sv, description)}; + } + } + else + { + // No need to check: the value is unsigned so can't fall below the range + // of the TO type. + } + + if constexpr (std::is_integral_v) + { + using unsigned_from = std::make_unsigned_t; + using unsigned_to = std::make_unsigned_t; + constexpr auto from_max{static_cast((from_limits::max)())}; + constexpr auto to_max{static_cast((to_limits::max)())}; + if constexpr (from_max > to_max) + { + if (internal::cmp_greater(value, to_max)) + throw range_error{internal::cat2("Cast overflow: "sv, description)}; + } + } + else if constexpr ((from_limits::max)() > (to_limits::max)()) + { + if (value > (to_limits::max)()) + throw range_error{internal::cat2("Cast overflow: ", description)}; + } + + return static_cast(value); +} + + +/** Check library version at link time. + * + * Ensures a failure when linking an application against a radically + * different libpqxx version than the one against which it was compiled. + * + * Sometimes application builds fail in unclear ways because they compile + * using headers from libpqxx version X, but then link against libpqxx + * binary version Y. A typical scenario would be one where you're building + * against a libpqxx which you have built yourself, but a different version + * is installed on the system. + * + * The check_library_version template is declared for any library version, + * but only actually defined for the version of the libpqxx binary against + * which the code is linked. + * + * If the library binary is a different version than the one declared in + * these headers, then this call will fail to link: there will be no + * definition for the function with these exact template parameter values. + * There will be a definition, but the version in the parameter values will + * be different. + */ +inline PQXX_PRIVATE void check_version() noexcept +{ + // There is no particular reason to do this here in @ref connection, except + // to ensure that every meaningful libpqxx client will execute it. The call + // must be in the execution path somewhere or the compiler won't try to link + // it. We can't use it to initialise a global or class-static variable, + // because a smart compiler might resolve it at compile time. + // + // On the other hand, we don't want to make a useless function call too + // often for performance reasons. A local static variable is initialised + // only on the definition's first execution. Compilers will be well + // optimised for this behaviour, so there's a minimal one-time cost. + static auto const version_ok{internal::PQXX_VERSION_CHECK()}; + ignore_unused(version_ok); +} + + +/// Descriptor of library's thread-safety model. +/** This describes what the library knows about various risks to thread-safety. + */ +struct PQXX_LIBEXPORT thread_safety_model +{ + /// Is the underlying libpq build thread-safe? + bool safe_libpq = false; + + /// Is Kerberos thread-safe? + /** @warning Is currently always `false`. + * + * If your application uses Kerberos, all accesses to libpqxx or Kerberos + * must be serialized. Confine their use to a single thread, or protect it + * with a global lock. + */ + bool safe_kerberos = false; + + /// A human-readable description of any thread-safety issues. + std::string description; +}; + + +/// Describe thread safety available in this build. +[[nodiscard]] PQXX_LIBEXPORT thread_safety_model describe_thread_safety(); + + +#if defined(PQXX_HAVE_CONCEPTS) +# define PQXX_POTENTIAL_BINARY_ARG pqxx::potential_binary +#else +# define PQXX_POTENTIAL_BINARY_ARG typename +#endif + + +/// Cast binary data to a type that libpqxx will recognise as binary. +/** There are many different formats for storing binary data in memory. You + * may have yours as a `std::string`, or a `std::vector`, or one of + * many other types. + * + * But for libpqxx to recognise your data as binary, it needs to be a + * `std::basic_string`, or a `std::basic_string_view`; + * or in C++20 or better, any contiguous block of `std::byte`. + * + * Use `binary_cast` as a convenience helper to cast your data as a + * `std::basic_string_view`. + * + * @warning There are two things you should be aware of! First, the data must + * be contiguous in memory. In C++20 the compiler will enforce this, but in + * C++17 it's your own problem. Second, you must keep the object where you + * store the actual data alive for as long as you might use this function's + * return value. + */ +template +std::basic_string_view binary_cast(TYPE const &data) +{ + static_assert(sizeof(value_type) == 1); + return { + reinterpret_cast( + const_cast const *>( + std::data(data))), + std::size(data)}; +} + + +#if defined(PQXX_HAVE_CONCEPTS) +template +concept char_sized = (sizeof(CHAR) == 1); +# define PQXX_CHAR_SIZED_ARG char_sized +#else +# define PQXX_CHAR_SIZED_ARG typename +#endif + +/// Construct a type that libpqxx will recognise as binary. +/** Takes a data pointer and a size, without being too strict about their + * types, and constructs a `std::basic_string_view` pointing to + * the same data. + * + * This makes it a little easier to turn binary data, in whatever form you + * happen to have it, into binary data as libpqxx understands it. + */ +template +std::basic_string_view binary_cast(CHAR const *data, SIZE size) +{ + static_assert(sizeof(CHAR) == 1); + return { + reinterpret_cast(data), + check_cast(size, "binary data size")}; +} + + +/// The "null" oid. +constexpr oid oid_none{0}; +} // namespace pqxx + + +/// Private namespace for libpqxx's internal use; do not access. +/** This namespace hides definitions internal to libpqxx. These are not + * supposed to be used by client programs, and they may change at any time + * without notice. + * + * Conversely, if you find something in this namespace tremendously useful, by + * all means do lodge a request for its publication. + * + * @warning Here be dragons! + */ +namespace pqxx::internal +{ +using namespace std::literals; + + +/// A safer and more generic replacement for `std::isdigit`. +/** Turns out `std::isdigit` isn't as easy to use as it sounds. It takes an + * `int`, but requires it to be nonnegative. Which means it's an outright + * liability on systems where `char` is signed. + */ +template inline constexpr bool is_digit(CHAR c) noexcept +{ + return (c >= '0') and (c <= '9'); +} + + +/// Describe an object for humans, based on class name and optional name. +/** Interprets an empty name as "no name given." + */ +[[nodiscard]] std::string +describe_object(std::string_view class_name, std::string_view name); + + +/// Check validity of registering a new "guest" in a "host." +/** The host might be e.g. a connection, and the guest a transaction. The + * host can only have one guest at a time, so it is an error to register a new + * guest while the host already has a guest. + * + * If the new registration is an error, this function throws a descriptive + * exception. + * + * Pass the old guest (if any) and the new guest (if any), for both, a type + * name (at least if the guest is not null), and optionally an object name + * (but which may be omitted if the caller did not assign one). + */ +void check_unique_register( + void const *old_guest, std::string_view old_class, std::string_view old_name, + void const *new_guest, std::string_view new_class, + std::string_view new_name); + + +/// Like @ref check_unique_register, but for un-registering a guest. +/** Pass the guest which was registered, as well as the guest which is being + * unregistered, so that the function can check that they are the same one. + */ +void check_unique_unregister( + void const *old_guest, std::string_view old_class, std::string_view old_name, + void const *new_guest, std::string_view new_class, + std::string_view new_name); + + +/// Compute buffer size needed to escape binary data for use as a BYTEA. +/** This uses the hex-escaping format. The return value includes room for the + * "\x" prefix. + */ +inline constexpr std::size_t size_esc_bin(std::size_t binary_bytes) noexcept +{ + return 2 + (2 * binary_bytes) + 1; +} + + +/// Compute binary size from the size of its escaped version. +/** Do not include a terminating zero in `escaped_bytes`. + */ +inline constexpr std::size_t size_unesc_bin(std::size_t escaped_bytes) noexcept +{ + return (escaped_bytes - 2) / 2; +} + + +// TODO: Use actual binary type for "data". +/// Hex-escape binary data into a buffer. +/** The buffer must be able to accommodate + * `size_esc_bin(std::size(binary_data))` bytes, and the function will write + * exactly that number of bytes into the buffer. This includes a trailing + * zero. + */ +void PQXX_LIBEXPORT +esc_bin(std::basic_string_view binary_data, char buffer[]) noexcept; + + +/// Hex-escape binary data into a std::string. +std::string PQXX_LIBEXPORT +esc_bin(std::basic_string_view binary_data); + + +/// Reconstitute binary data from its escaped version. +void PQXX_LIBEXPORT +unesc_bin(std::string_view escaped_data, std::byte buffer[]); + + +/// Reconstitute binary data from its escaped version. +std::basic_string + PQXX_LIBEXPORT unesc_bin(std::string_view escaped_data); + + +/// Transitional: std::ssize(), or custom implementation if not available. +template auto ssize(T const &c) +{ +#if defined(__cpp_lib_ssize) && __cplusplus >= __cpp_lib_ssize + return std::ssize(c); +#else + using signed_t = std::make_signed_t; + return static_cast(std::size(c)); +#endif // __cpp_lib_ssize +} + + +/// Helper for determining a function's parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple args_f(RETURN (&func)(ARGS...)); + + +/// Helper for determining a `std::function`'s parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple args_f(std::function const &); + + +/// Helper for determining a member function's parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple member_args_f(RETURN (CLASS::*)(ARGS...)); + + +/// Helper for determining a const member function's parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple member_args_f(RETURN (CLASS::*)(ARGS...) const); + + +/// Helper for determining a callable type's parameter types. +/** This specialisation should work for lambdas. + * + * This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +auto args_f(CALLABLE const &f) + -> decltype(member_args_f(&CALLABLE::operator())); + + +/// A callable's parameter types, as a tuple. +template +using args_t = decltype(args_f(std::declval())); + + +/// Helper: Apply `strip_t` to each of a tuple type's component types. +/** This function has no definition. It is not meant to be called, only to be + * used to deduce the right types. + */ +template +std::tuple...> strip_types(std::tuple const &); + + +/// Take a tuple type and apply @ref strip_t to its component types. +template +using strip_types_t = decltype(strip_types(std::declval())); +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/version b/ext/libpqxx-7.7.3/include/pqxx/version new file mode 100644 index 000000000..8dd5e48d4 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/version @@ -0,0 +1,7 @@ +/** libpqxx version info. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/version.hxx" +#include "pqxx/internal/header-post.hxx" + diff --git a/ext/libpqxx-7.7.3/include/pqxx/version.hxx b/ext/libpqxx-7.7.3/include/pqxx/version.hxx new file mode 100644 index 000000000..a159f1bed --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/version.hxx @@ -0,0 +1,55 @@ +/* Version info for libpqxx. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/version instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_VERSION + +# if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +# endif + +/// Full libpqxx version string. +# define PQXX_VERSION "7.7.3" +/// Library ABI version. +# define PQXX_ABI "7.7" + +/// Major version number. +# define PQXX_VERSION_MAJOR 7 +/// Minor version number. +# define PQXX_VERSION_MINOR 7 + +# define PQXX_VERSION_CHECK check_pqxx_version_7_7 + +namespace pqxx::internal +{ +/// Library version check stub. +/** Helps detect version mismatches between libpqxx headers and the libpqxx + * library binary. + * + * Sometimes users run into trouble linking their code against libpqxx because + * they build their own libpqxx, but the system also has a different version + * installed. The declarations in the headers against which they compile their + * code will differ from the ones used to build the libpqxx version they're + * using, leading to confusing link errors. The solution is to generate a link + * error when the libpqxx binary is not the same version as the libpqxx headers + * used to compile the code. + * + * This function's definition is in the libpqxx binary, so it's based on the + * version as found in the binary. The headers contain a call to the function, + * whose name contains the libpqxx version as found in the headers. (The + * library build process will use its own local headers even if another version + * of the headers is installed on the system.) + * + * If the libpqxx binary was compiled for a different version than the user's + * code, linking will fail with an error: `check_pqxx_version_*_*` will not + * exist for the given version number. + */ +PQXX_LIBEXPORT int PQXX_VERSION_CHECK() noexcept; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/version.hxx.template b/ext/libpqxx-7.7.3/include/pqxx/version.hxx.template new file mode 100644 index 000000000..40837f16a --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/version.hxx.template @@ -0,0 +1,55 @@ +/* Version info for libpqxx. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/version instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_VERSION + +#if !defined(PQXX_HEADER_PRE) +#error "Include libpqxx headers as , not ." +#endif + +/// Full libpqxx version string. +# define PQXX_VERSION "@PQXXVERSION@" +/// Library ABI version. +# define PQXX_ABI "@PQXX_ABI@" + +/// Major version number. +# define PQXX_VERSION_MAJOR @PQXX_MAJOR@ +/// Minor version number. +# define PQXX_VERSION_MINOR @PQXX_MINOR@ + +# define PQXX_VERSION_CHECK check_pqxx_version_@PQXX_MAJOR@_@PQXX_MINOR@ + +namespace pqxx::internal +{ +/// Library version check stub. +/** Helps detect version mismatches between libpqxx headers and the libpqxx + * library binary. + * + * Sometimes users run into trouble linking their code against libpqxx because + * they build their own libpqxx, but the system also has a different version + * installed. The declarations in the headers against which they compile their + * code will differ from the ones used to build the libpqxx version they're + * using, leading to confusing link errors. The solution is to generate a link + * error when the libpqxx binary is not the same version as the libpqxx headers + * used to compile the code. + * + * This function's definition is in the libpqxx binary, so it's based on the + * version as found in the binary. The headers contain a call to the function, + * whose name contains the libpqxx version as found in the headers. (The + * library build process will use its own local headers even if another version + * of the headers is installed on the system.) + * + * If the libpqxx binary was compiled for a different version than the user's + * code, linking will fail with an error: `check_pqxx_version_*_*` will not + * exist for the given version number. + */ +PQXX_LIBEXPORT int PQXX_VERSION_CHECK() noexcept; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/include/pqxx/zview b/ext/libpqxx-7.7.3/include/pqxx/zview new file mode 100644 index 000000000..66ea2a625 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/zview @@ -0,0 +1,6 @@ +/** Zero-terminated string view class. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/zview.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/include/pqxx/zview.hxx b/ext/libpqxx-7.7.3/include/pqxx/zview.hxx new file mode 100644 index 000000000..36a779f51 --- /dev/null +++ b/ext/libpqxx-7.7.3/include/pqxx/zview.hxx @@ -0,0 +1,163 @@ +/* Zero-terminated string view. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/zview instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ZVIEW +#define PQXX_H_ZVIEW + +#include +#include +#include + +#include "pqxx/types.hxx" + + +namespace pqxx +{ +/// Marker-type wrapper: zero-terminated `std::string_view`. +/** @warning Use this only if the underlying string is zero-terminated. + * + * When you construct a zview, you are promising that if the data pointer is + * non-null, the underlying string is zero-terminated. It otherwise behaves + * exactly like a std::string_view. + * + * The terminating zero is not "in" the string, so it does not count as part of + * the view's length. + * + * The added guarantee lets the view be used as a C-style string, which often + * matters since libpqxx builds on top of a C library. For this reason, zview + * also adds a @ref c_str method. + */ +class zview : public std::string_view +{ +public: + constexpr zview() noexcept = default; + + /// Convenience overload: construct using pointer and signed length. + constexpr zview(char const text[], std::ptrdiff_t len) : + std::string_view{text, static_cast(len)} + {} + + /// Convenience overload: construct using pointer and signed length. + constexpr zview(char text[], std::ptrdiff_t len) : + std::string_view{text, static_cast(len)} + {} + + /// Explicitly promote a `string_view` to a `zview`. + explicit constexpr zview(std::string_view other) noexcept : + std::string_view{other} + {} + + /// Construct from any initialiser you might use for `std::string_view`. + /** @warning Only do this if you are sure that the string is zero-terminated. + */ + template + explicit constexpr zview(Args &&...args) : + std::string_view(std::forward(args)...) + {} + + // C++20: constexpr. + /// @warning There's an implicit conversion from `std::string`. + zview(std::string const &str) noexcept : + std::string_view{str.c_str(), str.size()} + {} + + /// Construct a `zview` from a C-style string. + /** @warning This scans the string to discover its length. So if you need to + * do it many times, it's probably better to create the `zview` once and + * re-use it. + */ + constexpr zview(char const str[]) : std::string_view{str} {} + + /// Construct a `zview` from a string literal. + /** A C++ string literal ("foo") normally looks a lot like a pointer to + * char const, but that's not really true. It's actually an array of char, + * which _devolves_ to a pointer when you pass it. + * + * For the purpose of creating a `zview` there is one big difference: if we + * know the array's size, we don't need to scan through the string in order + * to find out its length. + */ + template + constexpr zview(char const (&literal)[size]) : zview(literal, size - 1) + {} + + /// Either a null pointer, or a zero-terminated text buffer. + [[nodiscard]] constexpr char const *c_str() const &noexcept + { + return data(); + } +}; + + +/// Support @ref zview literals. +/** You can "import" this selectively into your namespace, without pulling in + * all of the @ref pqxx namespace: + * + * ```cxx + * using pqxx::operator"" _zv; + * ``` + */ +constexpr zview operator"" _zv(char const str[], std::size_t len) noexcept +{ + return zview{str, len}; +} +} // namespace pqxx + + +#if defined(PQXX_HAVE_CONCEPTS) +/// A zview is a view. +template<> inline constexpr bool std::ranges::enable_view{true}; + + +/// A zview is a borrowed range. +template<> +inline constexpr bool std::ranges::enable_borrowed_range{true}; + +namespace pqxx::internal +{ +/// Concept: T is a known zero-terminated string type. +/** There's no unified API for these string types. It's just a check for some + * known types. Any code that makes use of the concept will still have to + * support each of these individually. + */ +template +concept ZString = std::is_convertible_v < strip_t, +char const * > or std::is_convertible_v, zview> or + std::is_convertible_v; +} // namespace pqxx::internal +#endif // PQXX_HAVE_CONCEPTS + + +namespace pqxx::internal +{ +/// Get a raw C string pointer. +inline constexpr char const *as_c_string(char const str[]) noexcept +{ + return str; +} +/// Get a raw C string pointer. +template +inline constexpr char const *as_c_string(char (&str)[N]) noexcept +{ + return str; +} +/// Get a raw C string pointer. +inline constexpr char const *as_c_string(pqxx::zview str) noexcept +{ + return str.c_str(); +} +// C++20: Make this constexpr. +/// Get a raw C string pointer. +inline char const *as_c_string(std::string const &str) noexcept +{ + return str.c_str(); +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/array b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/array new file mode 100644 index 000000000..689f5b27b --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/array @@ -0,0 +1,6 @@ +/** Handling of SQL arrays. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/array.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/array.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/array.hxx new file mode 100644 index 000000000..8440a244f --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/array.hxx @@ -0,0 +1,103 @@ +/* Handling of SQL arrays. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/field instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ARRAY +#define PQXX_H_ARRAY + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/internal/encoding_group.hxx" +#include "pqxx/internal/encodings.hxx" + + +namespace pqxx +{ +/// Low-level array parser. +/** Use this to read an array field retrieved from the database. + * + * This parser will only work reliably if your client encoding is UTF-8, ASCII, + * or a single-byte encoding which is a superset of ASCII (such as Latin-1). + * + * Also, the parser only supports array element types which use either a comma + * or a semicolon ("," or ";") as the separator between array elements. All + * built-in types use comma, except for one which uses semicolon, but some + * custom types may not work. + * + * The input is a C-style string containing the textual representation of an + * array, as returned by the database. The parser reads this representation + * on the fly. The string must remain in memory until parsing is done. + * + * Parse the array by making calls to @ref get_next until it returns a + * @ref juncture of "done". The @ref juncture tells you what the parser found + * in that step: did the array "nest" to a deeper level, or "un-nest" back up? + */ +class PQXX_LIBEXPORT array_parser +{ +public: + /// What's the latest thing found in the array? + enum class juncture + { + /// Starting a new row. + row_start, + /// Ending the current row. + row_end, + /// Found a NULL value. + null_value, + /// Found a string value. + string_value, + /// Parsing has completed. + done, + }; + + // TODO: constexpr noexcept. Breaks ABI. + /// Constructor. You don't need this; use @ref field::as_array instead. + /** The parser only remains valid while the data underlying the @ref result + * remains valid. Once all `result` objects referring to that data have been + * destroyed, the parser will no longer refer to valid memory. + */ + explicit array_parser( + std::string_view input, + internal::encoding_group = internal::encoding_group::MONOBYTE); + + /// Parse the next step in the array. + /** Returns what it found. If the juncture is @ref juncture::string_value, + * the string will contain the value. Otherwise, it will be empty. + * + * Call this until the @ref array_parser::juncture it returns is + * @ref juncture::done. + */ + std::pair get_next(); + +private: + std::string_view m_input; + internal::glyph_scanner_func *const m_scan; + + /// Current parsing position in the input. + std::string::size_type m_pos = 0u; + + std::string::size_type scan_single_quoted_string() const; + std::string parse_single_quoted_string(std::string::size_type end) const; + std::string::size_type scan_double_quoted_string() const; + std::string parse_double_quoted_string(std::string::size_type end) const; + std::string::size_type scan_unquoted_string() const; + std::string parse_unquoted_string(std::string::size_type end) const; + + std::string::size_type scan_glyph(std::string::size_type pos) const; + std::string::size_type + scan_glyph(std::string::size_type pos, std::string::size_type end) const; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/binarystring b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/binarystring new file mode 100644 index 000000000..77551d9f7 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/binarystring @@ -0,0 +1,6 @@ +/** BYTEA (binary string) conversions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/binarystring.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/binarystring.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/binarystring.hxx new file mode 100644 index 000000000..47c82a035 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/binarystring.hxx @@ -0,0 +1,236 @@ +/* Deprecated representation for raw, binary data. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/binarystring instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_BINARYSTRING +#define PQXX_H_BINARYSTRING + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/result.hxx" +#include "pqxx/strconv.hxx" + +namespace pqxx +{ +class binarystring; +template<> struct string_traits; + + +/// Binary data corresponding to PostgreSQL's "BYTEA" binary-string type. +/** @ingroup escaping-functions + * @deprecated Use @c std::basic_string and + * @c std::basic_string_view for binary data. In C++20 or better, + * any @c contiguous_range of @c std::byte will do. + * + * This class represents a binary string as stored in a field of type @c bytea. + * + * Internally a binarystring is zero-terminated, but it may also contain null + * bytes, they're just like any other byte value. So don't assume that it's + * safe to treat the contents as a C-style string. + * + * The binarystring retains its value even if the result it was obtained from + * is destroyed, but it cannot be copied or assigned. + * + * \relatesalso transaction_base::quote_raw + * + * To include a @c binarystring value in an SQL query, escape and quote it + * using the transaction's @c quote_raw function. + * + * @warning This class is implemented as a reference-counting smart pointer. + * Copying, swapping, and destroying binarystring objects that refer to the + * same underlying data block is not thread-safe. If you wish to pass + * binarystrings around between threads, make sure that each of these + * operations is protected against concurrency with similar operations on the + * same object, or other objects pointing to the same data block. + */ +class PQXX_LIBEXPORT binarystring +{ +public: + using char_type = unsigned char; + using value_type = std::char_traits::char_type; + using size_type = std::size_t; + using difference_type = long; + using const_reference = value_type const &; + using const_pointer = value_type const *; + using const_iterator = const_pointer; + using const_reverse_iterator = std::reverse_iterator; + + [[deprecated("Use std::byte for binary data.")]] binarystring( + binarystring const &) = default; + + /// Read and unescape bytea field. + /** The field will be zero-terminated, even if the original bytea field + * isn't. + * @param F the field to read; must be a bytea field + */ + [[deprecated("Use std::byte for binary data.")]] explicit binarystring( + field const &); + + /// Copy binary data from std::string_view on binary data. + /** This is inefficient in that it copies the data to a buffer allocated on + * the heap. + */ + [[deprecated("Use std::byte for binary data.")]] explicit binarystring( + std::string_view); + + /// Copy binary data of given length straight out of memory. + [[deprecated("Use std::byte for binary data.")]] binarystring( + void const *, std::size_t); + + /// Efficiently wrap a buffer of binary data in a @c binarystring. + [[deprecated("Use std::byte for binary data.")]] binarystring( + std::shared_ptr ptr, size_type size) : + m_buf{std::move(ptr)}, m_size{size} + {} + + /// Size of converted string in bytes. + [[nodiscard]] size_type size() const noexcept { return m_size; } + /// Size of converted string in bytes. + [[nodiscard]] size_type length() const noexcept { return size(); } + [[nodiscard]] bool empty() const noexcept { return size() == 0; } + + [[nodiscard]] const_iterator begin() const noexcept { return data(); } + [[nodiscard]] const_iterator cbegin() const noexcept { return begin(); } + [[nodiscard]] const_iterator end() const noexcept { return data() + m_size; } + [[nodiscard]] const_iterator cend() const noexcept { return end(); } + + [[nodiscard]] const_reference front() const noexcept { return *begin(); } + [[nodiscard]] const_reference back() const noexcept + { + return *(data() + m_size - 1); + } + + [[nodiscard]] const_reverse_iterator rbegin() const + { + return const_reverse_iterator{end()}; + } + [[nodiscard]] const_reverse_iterator crbegin() const { return rbegin(); } + [[nodiscard]] const_reverse_iterator rend() const + { + return const_reverse_iterator{begin()}; + } + [[nodiscard]] const_reverse_iterator crend() const { return rend(); } + + /// Unescaped field contents. + [[nodiscard]] value_type const *data() const noexcept { return m_buf.get(); } + + [[nodiscard]] const_reference operator[](size_type i) const noexcept + { + return data()[i]; + } + + [[nodiscard]] PQXX_PURE bool operator==(binarystring const &) const noexcept; + [[nodiscard]] bool operator!=(binarystring const &rhs) const noexcept + { + return not operator==(rhs); + } + + binarystring &operator=(binarystring const &); + + /// Index contained string, checking for valid index. + const_reference at(size_type) const; + + /// Swap contents with other binarystring. + void swap(binarystring &); + + /// Raw character buffer (no terminating zero is added). + /** @warning No terminating zero is added! If the binary data did not end in + * a null character, you will not find one here. + */ + [[nodiscard]] char const *get() const noexcept + { + return reinterpret_cast(m_buf.get()); + } + + /// Read contents as a std::string_view. + [[nodiscard]] std::string_view view() const noexcept + { + return std::string_view(get(), size()); + } + + /// Read as regular C++ string (may include null characters). + /** This creates and returns a new string object. Don't call this + * repeatedly; retrieve your string once and keep it in a local variable. + * Also, do not expect to be able to compare the string's address to that of + * an earlier invocation. + */ + [[nodiscard]] std::string str() const; + + /// Access data as a pointer to @c std::byte. + [[nodiscard]] std::byte const *bytes() const + { + return reinterpret_cast(get()); + } + + /// Read data as a @c std::basic_string_view. + [[nodiscard]] std::basic_string_view bytes_view() const + { + return std::basic_string_view{bytes(), size()}; + } + +private: + std::shared_ptr m_buf; + size_type m_size{0}; +}; + + +template<> struct nullness : no_null +{}; + + +/// String conversion traits for @c binarystring. +/** Defines the conversions between a @c binarystring and its PostgreSQL + * textual format, for communication with the database. + * + * These conversions rely on the "hex" format which was introduced in + * PostgreSQL 9.0. Both your libpq and the server must be recent enough to + * speak this format. + */ +template<> struct string_traits +{ + static std::size_t size_buffer(binarystring const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview to_buf(char *begin, char *end, binarystring const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, binarystring const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + std::string_view text{value.view()}; + internal::esc_bin(binary_cast(text), begin); + return begin + budget; + } + + static binarystring from_string(std::string_view text) + { + auto const size{pqxx::internal::size_unesc_bin(std::size(text))}; + std::shared_ptr buf{ + new unsigned char[size], [](unsigned char const *x) { delete[] x; }}; + pqxx::internal::unesc_bin(text, reinterpret_cast(buf.get())); +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return binarystring{std::move(buf), size}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + } +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/blob b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/blob new file mode 100644 index 000000000..3fd0afac9 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/blob @@ -0,0 +1,6 @@ +/** Binary Large Objects interface. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/blob.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/blob.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/blob.hxx new file mode 100644 index 000000000..6d77be724 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/blob.hxx @@ -0,0 +1,351 @@ +/* Binary Large Objects interface. + * + * Read or write large objects, stored in their own storage on the server. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/largeobject instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_BLOB +#define PQXX_H_BLOB + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#if defined(PQXX_HAVE_PATH) +# include +#endif + +#if defined(PQXX_HAVE_RANGES) && __has_include() +# include +#endif + +#if defined(PQXX_HAVE_SPAN) && __has_include() +# include +#endif + +#include "pqxx/dbtransaction.hxx" + + +namespace pqxx +{ +/** Binary large object. + * + * This is how you store data that may be too large for the `BYTEA` type. + * Access operations are similar to those for a file: you can read, write, + * query or set the current reading/writing position, and so on. + * + * These large objects live in their own storage on the server, indexed by an + * integer object identifier ("oid"). + * + * Two `blob` objects may refer to the same actual large object in the + * database at the same time. Each will have its own reading/writing position, + * but writes to the one will of course affect what the other sees. + */ +class PQXX_LIBEXPORT blob +{ +public: + /// Create a new, empty large object. + /** You may optionally specify an oid for the new blob. If you do, then + * the new object will have that oid -- or creation will fail if there + * already is an object with that oid. + */ + [[nodiscard]] static oid create(dbtransaction &, oid = 0); + + /// Delete a large object, or fail if it does not exist. + static void remove(dbtransaction &, oid); + + /// Open blob for reading. Any attempt to write to it will fail. + [[nodiscard]] static blob open_r(dbtransaction &, oid); + // Open blob for writing. Any attempt to read from it will fail. + [[nodiscard]] static blob open_w(dbtransaction &, oid); + // Open blob for reading and/or writing. + [[nodiscard]] static blob open_rw(dbtransaction &, oid); + + /// You can default-construct a blob, but it won't do anything useful. + /** Most operations on a default-constructed blob will throw @ref + * usage_error. + */ + blob() = default; + + /// You can move a blob, but not copy it. The original becomes unusable. + blob(blob &&); + /// You can move a blob, but not copy it. The original becomes unusable. + blob &operator=(blob &&); + + blob(blob const &) = delete; + blob &operator=(blob const &) = delete; + ~blob(); + + /// Maximum number of bytes that can be read or written at a time. + /** The underlying protocol only supports reads and writes up to 2 GB + * exclusive. + * + * If you need to read or write more data to or from a binary large object, + * you'll have to break it up into chunks. + */ + static constexpr std::size_t chunk_limit = 0x7fffffff; + + /// Read up to `size` bytes of the object into `buf`. + /** Uses a buffer that you provide, resizing it as needed. If it suits you, + * this lets you allocate the buffer once and then re-use it multiple times. + * + * Resizes `buf` as needed. + * + * @warning The underlying protocol only supports reads up to 2GB at a time. + * If you need to read more, try making repeated calls to @ref append_to_buf. + */ + std::size_t read(std::basic_string &buf, std::size_t size); + +#if defined(PQXX_HAVE_SPAN) + /// Read up to `std::size(buf)` bytes from the object. + /** Retrieves bytes from the blob, at the current position, until `buf` is + * full or there are no more bytes to read, whichever comes first. + * + * Returns the filled portion of `buf`. This may be empty. + */ + template + std::span read(std::span buf) + { + return buf.subspan(0, raw_read(std::data(buf), std::size(buf))); + } +#endif // PQXX_HAVE_SPAN + +#if defined(PQXX_HAVE_CONCEPTS) && defined(PQXX_HAVE_SPAN) + /// Read up to `std::size(buf)` bytes from the object. + /** Retrieves bytes from the blob, at the current position, until `buf` is + * full or there are no more bytes to read, whichever comes first. + * + * Returns the filled portion of `buf`. This may be empty. + */ + template std::span read(DATA &buf) + { + return {std::data(buf), raw_read(std::data(buf), std::size(buf))}; + } +#else // PQXX_HAVE_CONCEPTS && PQXX_HAVE_SPAN + /// Read up to `std::size(buf)` bytes from the object. + /** @deprecated As libpqxx moves to C++20 as its baseline language version, + * this will take and return `std::span`. + * + * Retrieves bytes from the blob, at the current position, until `buf` is + * full (i.e. its current size is reached), or there are no more bytes to + * read, whichever comes first. + * + * This function will not change either the size or the capacity of `buf`, + * only its contents. + * + * Returns the filled portion of `buf`. This may be empty. + */ + template + std::basic_string_view read(std::vector &buf) + { + return {std::data(buf), raw_read(std::data(buf), std::size(buf))}; + } +#endif // PQXX_HAVE_CONCEPTS && PQXX_HAVE_SPAN + +#if defined(PQXX_HAVE_CONCEPTS) + /// Write `data` to large object, at the current position. + /** If the writing position is at the end of the object, this will append + * `data` to the object's contents and move the writing position so that + * it's still at the end. + * + * If the writing position was not at the end, writing will overwrite the + * prior data, but it will not remove data that follows the part where you + * wrote your new data. + * + * @warning This is a big difference from writing to a file. You can + * overwrite some data in a large object, but this does not truncate the + * data that was already there. For example, if the object contained binary + * data "abc", and you write "12" at the starting position, the object will + * contain "12c". + * + * @warning The underlying protocol only supports writes up to 2 GB at a + * time. If you need to write more, try making repeated calls to + * @ref append_from_buf. + */ + template void write(DATA const &data) + { + raw_write(std::data(data), std::size(data)); + } +#else + /// Write `data` large object, at the current position. + /** If the writing position is at the end of the object, this will append + * `data` to the object's contents and move the writing position so that + * it's still at the end. + * + * If the writing position was not at the end, writing will overwrite the + * prior data, but it will not remove data that follows the part where you + * wrote your new data. + * + * @warning This is a big difference from writing to a file. You can + * overwrite some data in a large object, but this does not truncate the + * data that was already there. For example, if the object contained binary + * data "abc", and you write "12" at the starting position, the object will + * contain "12c". + * + * @warning The underlying protocol only supports writes up to 2 GB at a + * time. If you need to write more, try making repeated calls to + * @ref append_from_buf. + */ + template void write(DATA const &data) + { + raw_write(std::data(data), std::size(data)); + } +#endif + + /// Resize large object to `size` bytes. + /** If the blob is more than `size` bytes long, this removes the end so as + * to make the blob the desired length. + * + * If the blob is less than `size` bytes long, it adds enough zero bytes to + * make it the desired length. + */ + void resize(std::int64_t size); + + /// Return the current reading/writing position in the large object. + [[nodiscard]] std::int64_t tell() const; + + /// Set the current reading/writing position to an absolute offset. + /** Returns the new file offset. */ + std::int64_t seek_abs(std::int64_t offset = 0); + /// Move the current reading/writing position forwards by an offset. + /** To move backwards, pass a negative offset. + * + * Returns the new file offset. + */ + std::int64_t seek_rel(std::int64_t offset = 0); + /// Set the current position to an offset relative to the end of the blob. + /** You'll probably want an offset of zero or less. + * + * Returns the new file offset. + */ + std::int64_t seek_end(std::int64_t offset = 0); + + /// Create a binary large object containing given `data`. + /** You may optionally specify an oid for the new object. If you do, and an + * object with that oid already exists, creation will fail. + */ + static oid from_buf( + dbtransaction &tx, std::basic_string_view data, oid id = 0); + + /// Append `data` to binary large object. + /** The underlying protocol only supports appending blocks up to 2 GB. + */ + static void append_from_buf( + dbtransaction &tx, std::basic_string_view data, oid id); + + /// Read client-side file and store it server-side as a binary large object. + [[nodiscard]] static oid from_file(dbtransaction &, char const path[]); + +#if defined(PQXX_HAVE_PATH) && !defined(_WIN32) + /// Read client-side file and store it server-side as a binary large object. + /** This overload is not available on Windows, where `std::filesystem::path` + * converts to a `wchar_t` string rather than a `char` string. + */ + [[nodiscard]] static oid + from_file(dbtransaction &tx, std::filesystem::path const &path) + { + return from_file(tx, path.c_str()); + } +#endif + + /// Read client-side file and store it server-side as a binary large object. + /** In this version, you specify the binary large object's oid. If that oid + * is already in use, the operation will fail. + */ + static oid from_file(dbtransaction &, char const path[], oid); + +#if defined(PQXX_HAVE_PATH) && !defined(_WIN32) + /// Read client-side file and store it server-side as a binary large object. + /** In this version, you specify the binary large object's oid. If that oid + * is already in use, the operation will fail. + * + * This overload is not available on Windows, where `std::filesystem::path` + * converts to a `wchar_t` string rather than a `char` string. + */ + static oid + from_file(dbtransaction &tx, std::filesystem::path const &path, oid id) + { + return from_file(tx, path.c_str(), id); + } +#endif + + /// Convenience function: Read up to `max_size` bytes from blob with `id`. + /** You could easily do this yourself using the @ref open_r and @ref read + * functions, but it can save you a bit of code to do it this way. + */ + static void to_buf( + dbtransaction &, oid, std::basic_string &, + std::size_t max_size); + + /// Read part of the binary large object with `id`, and append it to `buf`. + /** Use this to break up a large read from one binary large object into one + * massive buffer. Just keep calling this function until it returns zero. + * + * The `offset` is how far into the large object your desired chunk is, and + * `append_max` says how much to try and read in one go. + */ + static std::size_t append_to_buf( + dbtransaction &tx, oid id, std::int64_t offset, + std::basic_string &buf, std::size_t append_max); + + /// Write a binary large object's contents to a client-side file. + static void to_file(dbtransaction &, oid, char const path[]); + +#if defined(PQXX_HAVE_PATH) && !defined(_WIN32) + /// Write a binary large object's contents to a client-side file. + /** This overload is not available on Windows, where `std::filesystem::path` + * converts to a `wchar_t` string rather than a `char` string. + */ + static void + to_file(dbtransaction &tx, oid id, std::filesystem::path const &path) + { + to_file(tx, id, path.c_str()); + } +#endif + + /// Close this blob. + /** This does not delete the blob from the database; it only terminates your + * local object for accessing the blob. + * + * Resets the blob to a useless state similar to one that was + * default-constructed. + * + * The destructor will do this for you automatically. Still, there is a + * reason to `close()` objects explicitly where possible: if an error should + * occur while closing, `close()` can throw an exception. A destructor + * cannot. + */ + void close(); + +private: + PQXX_PRIVATE blob(connection &conn, int fd) noexcept : + m_conn{&conn}, m_fd{fd} + {} + static PQXX_PRIVATE blob open_internal(dbtransaction &, oid, int); + static PQXX_PRIVATE pqxx::internal::pq::PGconn * + raw_conn(pqxx::connection *) noexcept; + static PQXX_PRIVATE pqxx::internal::pq::PGconn * + raw_conn(pqxx::dbtransaction const &) noexcept; + static PQXX_PRIVATE std::string errmsg(connection const *); + static PQXX_PRIVATE std::string errmsg(dbtransaction const &tx) + { + return errmsg(&tx.conn()); + } + PQXX_PRIVATE std::string errmsg() const { return errmsg(m_conn); } + PQXX_PRIVATE std::int64_t seek(std::int64_t offset, int whence); + std::size_t raw_read(std::byte buf[], std::size_t size); + void raw_write(std::byte const buf[], std::size_t size); + + connection *m_conn = nullptr; + int m_fd = -1; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/composite b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/composite new file mode 100644 index 000000000..2bfa7ade9 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/composite @@ -0,0 +1,6 @@ +/** Handling of SQL "composite types." + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/composite.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/composite.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/composite.hxx new file mode 100644 index 000000000..439b133a8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/composite.hxx @@ -0,0 +1,149 @@ +#ifndef PQXX_H_COMPOSITE +#define PQXX_H_COMPOSITE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/internal/array-composite.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/util.hxx" + +namespace pqxx +{ +/// Parse a string representation of a value of a composite type. +/** @warning This code is still experimental. Use with care. + * + * You may use this as a helper while implementing your own @ref string_traits + * for a composite type. + * + * This function interprets `text` as the string representation of a value of + * some composite type, and sets each of `fields` to the respective values of + * its fields. The field types must be copy-assignable. + * + * The number of fields must match the number of fields in the composite type, + * and there must not be any other text in the input. The function is meant to + * handle any value string that the backend can produce, but not necessarily + * every valid alternative spelling. + * + * Fields in composite types can be null. When this happens, the C++ type of + * the corresponding field reference must be of a type that can handle nulls. + * If you are working with a type that does not have an inherent null value, + * such as e.g. `int`, consider using `std::optional`. + */ +template +inline void parse_composite( + pqxx::internal::encoding_group enc, std::string_view text, T &...fields) +{ + static_assert(sizeof...(fields) > 0); + + auto const scan{pqxx::internal::get_glyph_scanner(enc)}; + auto const data{std::data(text)}; + auto const size{std::size(text)}; + if (size == 0) + throw conversion_error{"Cannot parse composite value from empty string."}; + + std::size_t here{0}, next{scan(data, size, here)}; + if (next != 1 or data[here] != '(') + throw conversion_error{ + internal::concat("Invalid composite value string: ", text)}; + + here = next; + + constexpr auto num_fields{sizeof...(fields)}; + std::size_t index{0}; + (pqxx::internal::parse_composite_field( + index, text, here, fields, scan, num_fields - 1), + ...); + if (here != std::size(text)) + throw conversion_error{internal::concat( + "Composite value did not end at the closing parenthesis: '", text, + "'.")}; + if (text[here - 1] != ')') + throw conversion_error{internal::concat( + "Composive value did not end in parenthesis: '", text, "'")}; +} + + +/// Parse a string representation of a value of a composite type. +/** @warning This version only works for UTF-8 and single-byte encodings. + * + * For proper encoding support, use the composite-type support in the + * `field` class. + */ +template +inline void parse_composite(std::string_view text, T &...fields) +{ + parse_composite(pqxx::internal::encoding_group::MONOBYTE, text, fields...); +} +} // namespace pqxx + + +namespace pqxx::internal +{ +constexpr char empty_composite_str[]{"()"}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/// Estimate the buffer size needed to represent a value of a composite type. +/** Returns a conservative estimate. + */ +template +[[nodiscard]] inline std::size_t +composite_size_buffer(T const &...fields) noexcept +{ + constexpr auto num{sizeof...(fields)}; + + // Size for a multi-field composite includes room for... + // + opening parenthesis + // + field budgets + // + separating comma per field + // - comma after final field + // + closing parenthesis + // + terminating zero + + if constexpr (num == 0) + return std::size(pqxx::internal::empty_composite_str); + else + return 1 + (pqxx::internal::size_composite_field_buffer(fields) + ...) + + num + 1; +} + + +/// Render a series of values as a single composite SQL value. +/** @warning This code is still experimental. Use with care. + * + * You may use this as a helper while implementing your own `string_traits` + * for a composite type. + */ +template +inline char *composite_into_buf(char *begin, char *end, T const &...fields) +{ + if (std::size_t(end - begin) < composite_size_buffer(fields...)) + throw conversion_error{ + "Buffer space may not be enough to represent composite value."}; + + constexpr auto num_fields{sizeof...(fields)}; + if constexpr (num_fields == 0) + { + constexpr char empty[]{"()"}; + std::memcpy(begin, empty, std::size(empty)); + return begin + std::size(empty); + } + + char *pos{begin}; + *pos++ = '('; + + (pqxx::internal::write_composite_field(pos, end, fields), ...); + + // If we've got multiple fields, "backspace" that last comma. + if constexpr (num_fields > 1) + --pos; + *pos++ = ')'; + *pos++ = '\0'; + return pos; +} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/config-public-compiler.h b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/config-public-compiler.h new file mode 100644 index 000000000..3668a10f8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/config-public-compiler.h @@ -0,0 +1,81 @@ +/* include/pqxx/config.h.in. Generated from configure.ac by autoheader. */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DLFCN_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_INTTYPES_H */ +/* Define to 1 if you have the `pq' library (-lpq). */ +/* #undef HAVE_LIBPQ */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MEMORY_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_STDINT_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_STDLIB_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_STRINGS_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_STRING_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_STAT_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_TYPES_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_UNISTD_H */ +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +/* #undef LT_OBJDIR */ +/* Name of package */ +/* #undef PACKAGE */ +/* Define to the address where bug reports for this package should be sent. */ +/* #undef PACKAGE_BUGREPORT */ +/* Define to the full name of this package. */ +/* #undef PACKAGE_NAME */ +/* Define to the full name and version of this package. */ +/* #undef PACKAGE_STRING */ +/* Define to the one symbol short name of this package. */ +/* #undef PACKAGE_TARNAME */ +/* Define to the home page for this package. */ +/* #undef PACKAGE_URL */ +/* Define to the version of this package. */ +/* #undef PACKAGE_VERSION */ +/* Define if supports floating-point conversion. */ +#define PQXX_HAVE_CHARCONV_FLOAT +/* Define if supports integer conversion. */ +#define PQXX_HAVE_CHARCONV_INT +/* Define if compiler has C++20 std::cmp_greater etc. */ +/* #undef PQXX_HAVE_CMP */ +/* Define if compiler supports Concepts and header. */ +/* #undef PQXX_HAVE_CONCEPTS */ +/* Define if compiler supports __cxa_demangle */ +#define PQXX_HAVE_CXA_DEMANGLE +/* Define if g++ supports pure attribute */ +#define PQXX_HAVE_GCC_PURE +/* Define if g++ supports visibility attribute. */ +#define PQXX_HAVE_GCC_VISIBILITY +/* Define if likely & unlikely work. */ +/* #undef PQXX_HAVE_LIKELY */ +/* Define if operator[] can take multiple arguments. */ +/* #undef PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT */ +/* Define if compiler has usable std::filesystem::path. */ +#define PQXX_HAVE_PATH +/* Define if poll() is available. */ +#define PQXX_HAVE_POLL +/* Define if libpq has PQencryptPasswordConn (since pg 10). */ +#define PQXX_HAVE_PQENCRYPTPASSWORDCONN +/* Define if libpq has pipeline mode (since pg 14). */ +#define PQXX_HAVE_PQ_PIPELINE +/* Define if std::this_thread::sleep_for works. */ +#define PQXX_HAVE_SLEEP_FOR +/* Define if compiler has std::span. */ +/* #undef PQXX_HAVE_SPAN */ +/* Define if strerror_r() is available. */ +#define PQXX_HAVE_STRERROR_R +/* Define if strerror_s() is available. */ +/* #undef PQXX_HAVE_STRERROR_S */ +/* Define if thread_local is fully supported. */ +#define PQXX_HAVE_THREAD_LOCAL +/* Define if std::chrono has year_month_day etc. */ +/* #undef PQXX_HAVE_YEAR_MONTH_DAY */ +/* Define to 1 if you have the ANSI C header files. */ +/* #undef STDC_HEADERS */ +/* Version number of package */ +/* #undef VERSION */ diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/connection b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/connection new file mode 100644 index 000000000..82ff43aa5 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/connection @@ -0,0 +1,8 @@ +/** pqxx::connection class. + * + * pqxx::connection encapsulates a connection to a database. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/connection.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/connection.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/connection.hxx new file mode 100644 index 000000000..92454bb47 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/connection.hxx @@ -0,0 +1,1261 @@ +/* Definition of the connection class. + * + * pqxx::connection encapsulates a connection to a database. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/connection instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_CONNECTION +#define PQXX_H_CONNECTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Double-check in order to suppress an overzealous Visual C++ warning (#418). +#if defined(PQXX_HAVE_CONCEPTS) && __has_include() +# include +#endif + +#include "pqxx/errorhandler.hxx" +#include "pqxx/except.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/params.hxx" +#include "pqxx/separated_list.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/types.hxx" +#include "pqxx/util.hxx" +#include "pqxx/zview.hxx" + + +/** + * @addtogroup connections + * + * Use of the libpqxx library starts here. + * + * Everything that can be done with a database through libpqxx must go through + * a @ref pqxx::connection object. It connects to a database when you create + * it, and it terminates that communication during destruction. + * + * Many things come together in this class. Handling of error and warning + * messages, for example, is defined by @ref pqxx::errorhandler objects in the + * context of a connection. Prepared statements are also defined here. + * + * When you connect to a database, you pass a connection string containing any + * parameters and options, such as the server address and the database name. + * + * These are identical to the ones in libpq, the C language binding upon which + * libpqxx itself is built: + * + * https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING + * + * There are also environment variables you can set to provide defaults, again + * as defined by libpq: + * + * https://www.postgresql.org/docs/current/libpq-envars.html + * + * You can also create a database connection _asynchronously_ using an + * intermediate @ref pqxx::connecting object. + */ + +namespace pqxx::internal +{ +class sql_cursor; + +#if defined(PQXX_HAVE_CONCEPTS) +/// Concept: T is a range of pairs of zero-terminated strings. +template +concept ZKey_ZValues = std::ranges::input_range and requires(T t) +{ + {std::cbegin(t)}; + { + std::get<0>(*std::cbegin(t)) + } -> ZString; + { + std::get<1>(*std::cbegin(t)) + } -> ZString; +} and std::tuple_size_v::value_type> +== 2; +#endif // PQXX_HAVE_CONCEPTS +} // namespace pqxx::internal + + +namespace pqxx::internal::gate +{ +class connection_dbtransaction; +class connection_errorhandler; +class connection_largeobject; +class connection_notification_receiver; +class connection_pipeline; +class connection_sql_cursor; +class connection_stream_from; +class connection_stream_to; +class connection_transaction; +class const_connection_largeobject; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +/// Representation of a PostgreSQL table path. +/** A "table path" consists of a table name, optionally prefixed by a schema + * name, which in turn is optionally prefixed by a database name. + * + * A minimal example of a table path would be `{mytable}`. But a table path + * may also take the forms `{myschema,mytable}` or + * `{mydb,myschema,mytable}`. + */ +using table_path = std::initializer_list; + + +/// Encrypt a password. @deprecated Use connection::encrypt_password instead. +[[nodiscard, + deprecated("Use connection::encrypt_password instead.")]] std::string + PQXX_LIBEXPORT + encrypt_password(char const user[], char const password[]); + +/// Encrypt password. @deprecated Use connection::encrypt_password instead. +[[nodiscard, + deprecated("Use connection::encrypt_password instead.")]] inline std::string +encrypt_password(zview user, zview password) +{ +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return encrypt_password(user.c_str(), password.c_str()); +#include "pqxx/internal/ignore-deprecated-post.hxx" +} + + +/// Error verbosity levels. +enum class error_verbosity : int +{ + // These values must match those in libpq's PGVerbosity enum. + terse = 0, + normal = 1, + verbose = 2 +}; + + +/// Connection to a database. +/** This is the first class to look at when you wish to work with a database + * through libpqxx. The connection opens during construction, and closes upon + * destruction. + * + * When creating a connection, you can pass a connection URI or a postgres + * connection string, to specify the database server's address, a login + * username, and so on. If you don't, the connection will try to obtain them + * from certain environment variables. If those are not set either, the + * default is to try and connect to the local system's port 5432. + * + * Find more about connection strings here: + * + * https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING + * + * The variables are documented here: + * + * https://www.postgresql.org/docs/current/libpq-envars.html + * + * To query or manipulate the database once connected, use one of the + * transaction classes (see pqxx/transaction_base.hxx) and perhaps also the + * transactor framework (see pqxx/transactor.hxx). + * + * When a connection breaks, you will typically get a @ref broken_connection + * exception. This can happen at almost any point. + * + * @warning On Unix-like systems, including GNU and BSD systems, your program + * may receive the SIGPIPE signal when the connection to the backend breaks. By + * default this signal will abort your program. Use "signal(SIGPIPE, SIG_IGN)" + * if you want your program to continue running after a connection fails. + */ +class PQXX_LIBEXPORT connection +{ +public: + connection() : connection{""} {} + + /// Connect to a database, using `options` string. + explicit connection(char const options[]) + { + check_version(); + init(options); + } + + /// Connect to a database, using `options` string. + explicit connection(zview options) : connection{options.c_str()} + { + // (Delegates to other constructor which calls check_version for us.) + } + + /// Move constructor. + /** Moving a connection is not allowed if it has an open transaction, or has + * error handlers or notification receivers registered on it. In those + * situations, other objects may hold references to the old object which + * would become invalid and might produce hard-to-diagnose bugs. + */ + connection(connection &&rhs); + +#if defined(PQXX_HAVE_CONCEPTS) + /// Connect to a database, passing options as a range of key/value pairs. + /** @warning Experimental. Requires C++20 "concepts" support. Define + * `PQXX_HAVE_CONCEPTS` to enable it. + * + * There's no need to escape the parameter values. + * + * See the PostgreSQL libpq documentation for the full list of possible + * options: + * + * https://postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS + * + * The options can be anything that can be iterated as a series of pairs of + * zero-terminated strings: `std::pair`, or + * `std::tuple`, or + * `std::map`, and so on. + */ + template + inline connection(MAPPING const ¶ms); +#endif // PQXX_HAVE_CONCEPTS + + ~connection() + { + try + { + close(); + } + catch (std::exception const &) + {} + } + + /// Move assignment. + /** Neither connection can have an open transaction, registered error + * handlers, or registered notification receivers. + */ + connection &operator=(connection &&rhs); + + connection(connection const &) = delete; + connection &operator=(connection const &) = delete; + + /// Is this connection open at the moment? + /** @warning This function is **not** needed in most code. Resist the + * temptation to check it after opening a connection. The `connection` + * constructor will throw a @ref broken_connection exception if can't connect + * to the database. + */ + [[nodiscard]] bool PQXX_PURE is_open() const noexcept; + + /// Invoke notice processor function. The message should end in newline. + void process_notice(char const[]) noexcept; + /// Invoke notice processor function. Newline at end is recommended. + /** The zview variant, with a message ending in newline, is the most + * efficient way to call process_notice. + */ + void process_notice(zview) noexcept; + + /// Enable tracing to a given output stream, or nullptr to disable. + void trace(std::FILE *) noexcept; + + /** + * @name Connection properties + * + * These are probably not of great interest, since most are derived from + * information supplied by the client program itself, but they are included + * for completeness. + * + * The connection needs to be currently active for these to work. + */ + //@{ + /// Name of database we're connected to, if any. + [[nodiscard]] char const *dbname() const; + + /// Database user ID we're connected under, if any. + [[nodiscard]] char const *username() const; + + /// Address of server, or nullptr if none specified (i.e. default or local) + [[nodiscard]] char const *hostname() const; + + /// Server port number we're connected to. + [[nodiscard]] char const *port() const; + + /// Process ID for backend process, or 0 if inactive. + [[nodiscard]] int PQXX_PURE backendpid() const &noexcept; + + /// Socket currently used for connection, or -1 for none. Use with care! + /** Query the current socket number. This is intended for event loops based + * on functions such as select() or poll(), where you're waiting for any of + * multiple file descriptors to become ready for communication. + * + * Please try to stay away from this function. It is really only meant for + * event loops that need to wait on more than one file descriptor. If all + * you need is to block until a notification arrives, for instance, use + * await_notification(). If you want to issue queries and retrieve results + * in nonblocking fashion, check out the pipeline class. + */ + [[nodiscard]] int PQXX_PURE sock() const &noexcept; + + /// What version of the PostgreSQL protocol is this connection using? + /** The answer can be 0 (when there is no connection); 3 for protocol 3.0; or + * possibly higher values as newer protocol versions come into use. + */ + [[nodiscard]] int PQXX_PURE protocol_version() const noexcept; + + /// What version of the PostgreSQL server are we connected to? + /** The result is a bit complicated: each of the major, medium, and minor + * release numbers is written as a two-digit decimal number, and the three + * are then concatenated. Thus server version 9.4.2 will be returned as the + * decimal number 90402. If there is no connection to the server, this + * returns zero. + * + * @warning When writing version numbers in your code, don't add zero at the + * beginning! Numbers beginning with zero are interpreted as octal (base-8) + * in C++. Thus, 070402 is not the same as 70402, and 080000 is not a number + * at all because there is no digit "8" in octal notation. Use strictly + * decimal notation when it comes to these version numbers. + */ + [[nodiscard]] int PQXX_PURE server_version() const noexcept; + //@} + + /// @name Text encoding + /** + * Each connection is governed by a "client encoding," which dictates how + * strings and other text is represented in bytes. The database server will + * send text data to you in this encoding, and you should use it for the + * queries and data which you send to the server. + * + * Search the PostgreSQL documentation for "character set encodings" to find + * out more about the available encodings, how to extend them, and how to use + * them. Not all server-side encodings are compatible with all client-side + * encodings or vice versa. + * + * Encoding names are case-insensitive, so e.g. "UTF8" is equivalent to + * "utf8". + * + * You can change the client encoding, but this may not work when the + * connection is in a special state, such as when streaming a table. It's + * not clear what happens if you change the encoding during a transaction, + * and then abort the transaction. + */ + //@{ + /// Get client-side character encoding, by name. + [[nodiscard]] std::string get_client_encoding() const; + + /// Set client-side character encoding, by name. + /** + * @param encoding Name of the character set encoding to use. + */ + void set_client_encoding(zview encoding) & + { + set_client_encoding(encoding.c_str()); + } + + /// Set client-side character encoding, by name. + /** + * @param encoding Name of the character set encoding to use. + */ + void set_client_encoding(char const encoding[]) &; + + /// Get the connection's encoding, as a PostgreSQL-defined code. + [[nodiscard]] int PQXX_PRIVATE encoding_id() const; + + //@} + + /// Set session variable, using SQL's `SET` command. + /** @deprecated To set a session variable, use @ref set_session_var. To set + * a transaction-local variable, execute an SQL `SET` command. + * + * @warning When setting a string value, you must escape and quote it first. + * Use the @ref quote() function to do that. + * + * @warning This executes an SQL query, so do not get or set variables while + * a table stream or pipeline is active on the same connection. + * + * @param var Variable to set. + * @param value New value for Var. This can be any SQL expression. If it's + * a string, be sure that it's properly escaped and quoted. + */ + [[deprecated("To set session variables, use set_session_var.")]] void + set_variable(std::string_view var, std::string_view value) &; + + /// Set one of the session variables to a new value. + /** This executes SQL, so do not do it while a pipeline or stream is active + * on the connection. + * + * The value you set here will last for the rest of the connection's + * duration, or until you set a new value. + * + * If you set the value while in a @ref dbtransaction (i.e. any transaction + * that is not a @ref nontransaction), then rolling back the transaction will + * undo the change. + * + * All applies to setting _session_ variables. You can also set the same + * variables as _local_ variables, in which case they will always revert to + * their previous value when the transaction ends (or when you overwrite them + * of course). To set a local variable, simply execute an SQL statement + * along the lines of "`SET LOCAL var = 'value'`" inside your transaction. + * + * @param var The variable to set. + * @param value The new value for the variable. + * @throw @ref variable_set_to_null if the value is null; this is not + * allowed. + */ + template + void set_session_var(std::string_view var, TYPE const &value) & + { + if constexpr (nullness::has_null) + { + if (nullness::is_null(value)) + throw variable_set_to_null{ + internal::concat("Attempted to set variable ", var, " to null.")}; + } + exec(internal::concat("SET ", quote_name(var), "=", quote(value))); + } + + /// Read session variable, using SQL's `SHOW` command. + /** @warning This executes an SQL query, so do not get or set variables while + * a table stream or pipeline is active on the same connection. + */ + [[deprecated("Use get_var instead.")]] std::string + get_variable(std::string_view); + + /// Read currently applicable value of a variable. + /** This function executes an SQL statement, so it won't work while a + * @ref pipeline or query stream is active on the connection. + * + * @return a blank `std::optional` if the variable's value is null, or its + * string value otherwise. + */ + std::string get_var(std::string_view var); + + /// Read currently applicable value of a variable. + /** This function executes an SQL statement, so it won't work while a + * @ref pipeline or query stream is active on the connection. + * + * If there is any possibility that the variable is null, ensure that `TYPE` + * can represent null values. + */ + template TYPE get_var_as(std::string_view var) + { + return from_string(get_var(var)); + } + + /** + * @name Notifications and Receivers + */ + //@{ + /// Check for pending notifications and take appropriate action. + /** This does not block. To wait for incoming notifications, either call + * await_notification() (it calls this function); or wait for incoming data + * on the connection's socket (i.e. wait to read), and then call this + * function repeatedly until it returns zero. After that, there are no more + * pending notifications so you may want to wait again. + * + * If any notifications are pending when you call this function, it + * processes them by finding any receivers that match the notification string + * and invoking those. If no receivers match, there is nothing to invoke but + * we do consider the notification processed. + * + * If any of the client-registered receivers throws an exception, the + * function will report it using the connection's errorhandlers. It does not + * re-throw the exceptions. + * + * @return Number of notifications processed. + */ + int get_notifs(); + + /// Wait for a notification to come in. + /** There are other events that will also terminate the wait, such as the + * backend failing. It will also wake up periodically. + * + * If a notification comes in, the call will process it, along with any other + * notifications that may have been pending. + * + * To wait for notifications into your own event loop instead, wait until + * there is incoming data on the connection's socket to be read, then call + * @ref get_notifs() repeatedly until it returns zero. + * + * @return Number of notifications processed. + */ + int await_notification(); + + /// Wait for a notification to come in, or for given timeout to pass. + /** There are other events that will also terminate the wait, such as the + * backend failing, or timeout expiring. + * + * If a notification comes in, the call will process it, along with any other + * notifications that may have been pending. + * + * To wait for notifications into your own event loop instead, wait until + * there is incoming data on the connection's socket to be read, then call + * @ref get_notifs repeatedly until it returns zero. + * + * @return Number of notifications processed + */ + int await_notification(std::time_t seconds, long microseconds); + //@} + + /** + * @name Password encryption + * + * Use this when setting a new password for the user if password encryption + * is enabled. Inputs are the SQL name for the user for whom you with to + * encrypt a password; the plaintext password; and the hash algorithm. + * + * The algorithm must be one of "md5", "scram-sha-256" (introduced in + * PostgreSQL 10), or `nullptr`. If the pointer is null, this will query + * the `password_encryption setting` from the server, and use the default + * algorithm as defined there. + * + * @return encrypted version of the password, suitable for encrypted + * PostgreSQL authentication. + * + * Thus you can change a user's password with: + * ```cxx + * void setpw(transaction_base &t, string const &user, string const &pw) + * { + * t.exec0("ALTER USER " + user + " " + * "PASSWORD '" + t.conn().encrypt_password(user,pw) + "'"); + * } + * ``` + * + * When building this against a libpq older than version 10, this will use + * an older function which only supports md5. In that case, requesting a + * different algorithm than md5 will result in a @ref feature_not_supported + * exception. + */ + //@{ + /// Encrypt a password for a given user. + [[nodiscard]] std::string + encrypt_password(zview user, zview password, zview algorithm) + { + return encrypt_password(user.c_str(), password.c_str(), algorithm.c_str()); + } + /// Encrypt a password for a given user. + [[nodiscard]] std::string encrypt_password( + char const user[], char const password[], char const *algorithm = nullptr); + //@} + + /** + * @name Prepared statements + * + * PostgreSQL supports prepared SQL statements, i.e. statements that you can + * register under a name you choose, optimized once by the backend, and + * executed any number of times under the given name. + * + * Prepared statement definitions are not sensitive to transaction + * boundaries. A statement defined inside a transaction will remain defined + * outside that transaction, even if the transaction itself is subsequently + * aborted. Once a statement has been prepared, it will only go away if you + * close the connection or explicitly "unprepare" the statement. + * + * Use the `pqxx::transaction_base::exec_prepared` functions to execute a + * prepared statement. See @ref prepared for a full discussion. + * + * @warning Using prepared statements can save time, but if your statement + * takes parameters, it may also make your application significantly slower! + * The reason is that the server works out a plan for executing the query + * when you prepare it. At that time, of course it does not know the values + * for the parameters that you will pass. If you execute a query without + * preparing it, then the server works out the plan on the spot, with full + * knowledge of the parameter values. + * + * A statement's definition can refer to its parameters as `$1`, `$2`, etc. + * The first parameter you pass to the call provides a value for `$1`, and + * so on. + * + * Here's an example of how to use prepared statements. + * + * ```cxx + * using namespace pqxx; + * void foo(connection &c) + * { + * c.prepare("findtable", "select * from pg_tables where name=$1"); + * work tx{c}; + * result r = tx.exec_prepared("findtable", "mytable"); + * if (std::empty(r)) throw runtime_error{"mytable not found!"}; + * } + * ``` + */ + //@{ + + /// Define a prepared statement. + /** + * @param name unique name for the new prepared statement. + * @param definition SQL statement to prepare. + */ + void prepare(zview name, zview definition) & + { + prepare(name.c_str(), definition.c_str()); + } + + /** + * @param name unique name for the new prepared statement. + * @param definition SQL statement to prepare. + */ + void prepare(char const name[], char const definition[]) &; + + /// Define a nameless prepared statement. + /** + * This can be useful if you merely want to pass large binary parameters to a + * statement without otherwise wishing to prepare it. If you use this + * feature, always keep the definition and the use close together to avoid + * the nameless statement being redefined unexpectedly by code somewhere + * else. + */ + void prepare(char const definition[]) &; + void prepare(zview definition) & { return prepare(definition.c_str()); } + + /// Drop prepared statement. + void unprepare(std::string_view name); + + //@} + + // C++20: constexpr. Breaks ABI. + /// Suffix unique number to name to make it unique within session context. + /** Used internally to generate identifiers for SQL objects (such as cursors + * and nested transactions) based on a given human-readable base name. + */ + [[nodiscard]] std::string adorn_name(std::string_view); + + /** + * @defgroup escaping-functions String-escaping functions + */ + //@{ + + /// Escape string for use as SQL string literal on this connection. + /** @warning This accepts a length, and it does not require a terminating + * zero byte. But if there is a zero byte, escaping stops there even if + * it's not at the end of the string! + */ + [[deprecated("Use std::string_view or pqxx:zview.")]] std::string + esc(char const text[], std::size_t maxlen) const + { + return esc(std::string_view{text, maxlen}); + } + + /// Escape string for use as SQL string literal on this connection. + [[nodiscard]] std::string esc(char const text[]) const + { + return esc(std::string_view{text}); + } + +#if defined(PQXX_HAVE_SPAN) + /// Escape string for use as SQL string literal, into `buffer`. + /** Use this variant when you want to re-use the same buffer across multiple + * calls. If that's not the case, or convenience and simplicity are more + * important, use the single-argument variant. + * + * For every byte in `text`, there must be at least 2 bytes of space in + * `buffer`; plus there must be one byte of space for a trailing zero. + * Throws @ref range_error if this space is not available. + * + * Returns a reference to the escaped string, which is actually stored in + * `buffer`. + */ + [[nodiscard]] std::string_view + esc(std::string_view text, std::span buffer) + { + auto const size{std::size(text)}, space{std::size(buffer)}; + auto const needed{2 * size + 1}; + if (space < needed) + throw range_error{internal::concat( + "Not enough room to escape string of ", size, " byte(s): need ", + needed, " bytes of buffer space, but buffer size is ", space, ".")}; + auto const data{buffer.data()}; + return {data, esc_to_buf(text, data)}; + } +#endif + + /// Escape string for use as SQL string literal on this connection. + /** @warning This is meant for text strings only. It cannot contain bytes + * whose value is zero ("nul bytes"). + */ + [[nodiscard]] std::string esc(std::string_view text) const; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Escape binary string for use as SQL string literal on this connection. + /** This is identical to `esc_raw(data)`. */ + template [[nodiscard]] std::string esc(DATA const &data) const + { + return esc_raw(data); + } +#endif + +#if defined(PQXX_HAVE_CONCEPTS) && defined(PQXX_HAVE_SPAN) + /// Escape binary string for use as SQL string literal, into `buffer`. + /** Use this variant when you want to re-use the same buffer across multiple + * calls. If that's not the case, or convenience and simplicity are more + * important, use the single-argument variant. + * + * For every byte in `data`, there must be at least two bytes of space in + * `buffer`; plus there must be two bytes of space for a header and one for + * a trailing zero. Throws @ref range_error if this space is not available. + * + * Returns a reference to the escaped string, which is actually stored in + * `buffer`. + */ + template + [[nodiscard]] zview esc(DATA const &data, std::span buffer) const + { + auto const size{std::size(data)}, space{std::size(buffer)}; + auto const needed{internal::size_esc_bin(std::size(data))}; + if (space < needed) + throw range_error{internal::concat( + "Not enough room to escape binary string of ", size, " byte(s): need ", + needed, " bytes of buffer space, but buffer size is ", space, ".")}; + + std::basic_string_view view{std::data(data), std::size(data)}; + auto const out{std::data(buffer)}; + // Actually, in the modern format, we know beforehand exactly how many + // bytes we're going to fill. Just leave out the trailing zero. + internal::esc_bin(view, out); + return zview{out, needed - 1}; + } +#endif + + /// Escape binary string for use as SQL string literal on this connection. + [[deprecated("Use std::byte for binary data.")]] std::string + esc_raw(unsigned char const bin[], std::size_t len) const; + + /// Escape binary string for use as SQL string literal on this connection. + /** You can also just use @ref esc with a binary string. */ + [[nodiscard]] std::string esc_raw(std::basic_string_view) const; + +#if defined(PQXX_HAVE_SPAN) + /// Escape binary string for use as SQL string literal, into `buffer`. + /** You can also just use @ref esc with a binary string. */ + [[nodiscard]] std::string + esc_raw(std::basic_string_view, std::span buffer) const; +#endif + +#if defined(PQXX_HAVE_CONCEPTS) + /// Escape binary string for use as SQL string literal on this connection. + /** You can also just use @ref esc with a binary string. */ + template + [[nodiscard]] std::string esc_raw(DATA const &data) const + { + return esc_raw( + std::basic_string_view{std::data(data), std::size(data)}); + } +#endif + +#if defined(PQXX_HAVE_CONCEPTS) && defined(PQXX_HAVE_SPAN) + /// Escape binary string for use as SQL string literal, into `buffer`. + template + [[nodiscard]] zview esc_raw(DATA const &data, std::span buffer) const + { + return this->esc(binary_cast(data), buffer); + } +#endif + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(zview text) const + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return unesc_raw(text.c_str()); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(char const text[]) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + * + * (The data must be encoded in PostgreSQL's "hex" format. The legacy + * "bytea" escape format, used prior to PostgreSQL 9.0, is no longer + * supported.) + */ + [[nodiscard]] std::basic_string + unesc_bin(std::string_view text) const + { + std::basic_string buf; + buf.resize(pqxx::internal::size_unesc_bin(std::size(text))); + pqxx::internal::unesc_bin(text, buf.data()); + return buf; + } + + /// Escape and quote a string of binary data. + [[deprecated("Use quote(std::basic_string_view).")]] std::string + quote_raw(unsigned char const bin[], std::size_t len) const; + + /// Escape and quote a string of binary data. + std::string quote_raw(std::basic_string_view) const; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Escape and quote a string of binary data. + /** You can also just use @ref quote with binary data. */ + template + [[nodiscard]] std::string quote_raw(DATA const &data) const + { + return quote_raw( + std::basic_string_view{std::data(data), std::size(data)}); + } +#endif + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote an SQL identifier for use in a query. + [[nodiscard]] std::string quote_name(std::string_view identifier) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote a table name. + /** When passing just a table name, this is just another name for + * @ref quote_name. + */ + [[nodiscard]] std::string quote_table(std::string_view name) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote a table path. + /** A table path consists of a table name, optionally prefixed by a schema + * name; and if both are given, they are in turn optionally prefixed by a + * database name. + * + * Each portion of the path (database name, schema name, table name) will be + * quoted separately, and they will be joined together by dots. So for + * example, `myschema.mytable` will become `"myschema"."mytable"`. + */ + [[nodiscard]] std::string quote_table(table_path) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Quote and comma-separate a series of column names. + /** Use this to save a bit of work in cases where you repeatedly need to pass + * the same list of column names, e.g. with @ref stream_to and @ref + * stream_from. Some functions that need to quote the columns list + * internally, will have a "raw" alternative which let you do the quoting + * yourself. It's a bit of extra work, but it can in rare cases let you + * eliminate some duplicate work in quoting them repeatedly. + */ + template + inline std::string quote_columns(STRINGS const &columns) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Represent object as SQL string, including quoting & escaping. + /** + * Recognises nulls and represents them as SQL nulls. They get no quotes. + */ + template + [[nodiscard]] inline std::string quote(T const &t) const; + + [[deprecated("Use std::byte for binary data.")]] std::string + quote(binarystring const &) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote binary data for use as a BYTEA value in SQL statement. + [[nodiscard]] std::string + quote(std::basic_string_view bytes) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape string for literal LIKE match. + /** Use this when part of an SQL "LIKE" pattern should match only as a + * literal string, not as a pattern, even if it contains "%" or "_" + * characters that would normally act as wildcards. + * + * The string does not get string-escaped or quoted. You do that later. + * + * For instance, let's say you have a string `name` entered by the user, + * and you're searching a `file` column for items that match `name` + * followed by a dot and three letters. Even if `name` contains wildcard + * characters "%" or "_", you only want those to match literally, so "_" + * only matches "_" and "%" only matches a single "%". + * + * You do that by "like-escaping" `name`, appending the wildcard pattern + * `".___"`, and finally, escaping and quoting the result for inclusion in + * your query: + * + * ```cxx + * tx.exec( + * "SELECT file FROM item WHERE file LIKE " + + * tx.quote(tx.esc_like(name) + ".___")); + * ``` + * + * The SQL "LIKE" operator also lets you choose your own escape character. + * This is supported, but must be a single-byte character. + */ + [[nodiscard]] std::string + esc_like(std::string_view text, char escape_char = '\\') const; + //@} + + /// Attempt to cancel the ongoing query, if any. + /** You can use this from another thread, and/or while a query is executing + * in a pipeline, but it's up to you to ensure that you're not canceling the + * wrong query. This may involve locking. + */ + void cancel_query(); + +#if defined(_WIN32) || __has_include() + /// Set socket to blocking (true) or nonblocking (false). + /** @warning Do not use this unless you _really_ know what you're doing. + * @warning This function is available on most systems, but not necessarily + * all. + */ + void set_blocking(bool block) &; +#endif // defined(_WIN32) || __has_include() + + /// Set session verbosity. + /** Set the verbosity of error messages to "terse", "normal" (the default), + * or "verbose." + * + * If "terse", returned messages include severity, primary text, and + * position only; this will normally fit on a single line. "normal" produces + * messages that include the above plus any detail, hint, or context fields + * (these might span multiple lines). "verbose" includes all available + * fields. + */ + void set_verbosity(error_verbosity verbosity) &noexcept; + + /// Return pointers to the active errorhandlers. + /** The entries are ordered from oldest to newest handler. + * + * You may use this to find errorhandlers that your application wants to + * delete when destroying the connection. Be aware, however, that libpqxx + * may also add errorhandlers of its own, and those will be included in the + * list. If this is a problem for you, derive your errorhandlers from a + * custom base class derived from pqxx::errorhandler. Then use dynamic_cast + * to find which of the error handlers are yours. + * + * The pointers point to the real errorhandlers. The container it returns + * however is a copy of the one internal to the connection, not a reference. + */ + [[nodiscard]] std::vector get_errorhandlers() const; + + /// Return a connection string encapsulating this connection's options. + /** The connection must be currently open for this to work. + * + * Returns a reconstruction of this connection's connection string. It may + * not exactly match the connection string you passed in when creating this + * connection. + */ + [[nodiscard]] std::string connection_string() const; + + /// Explicitly close the connection. + /** The destructor will do this for you automatically. Still, there is a + * reason to `close()` objects explicitly where possible: if an error should + * occur while closing, `close()` can throw an exception. A destructor + * cannot. + * + * Closing a connection is idempotent. Closing a connection that's already + * closed does nothing. + */ + void close(); + + /// Seize control of a raw libpq connection. + /** @warning Do not do this. Please. It's for very rare, very specific + * use-cases. The mechanism may change (or break) in unexpected ways in + * future versions. + * + * @param raw_conn a raw libpq `PQconn` pointer. + */ + static connection seize_raw_connection(internal::pq::PGconn *raw_conn) + { + return connection{raw_conn}; + } + + /// Release the raw connection without closing it. + /** @warning Do not do this. It's for very rare, very specific use-cases. + * The mechanism may change (or break) in unexpected ways in future versions. + * + * The `connection` object becomes unusable after this. + */ + internal::pq::PGconn *release_raw_connection() && + { + return std::exchange(m_conn, nullptr); + } + +private: + friend class connecting; + enum connect_mode + { + connect_nonblocking + }; + connection(connect_mode, zview connection_string); + + /// For use by @ref seize_raw_connection. + explicit connection(internal::pq::PGconn *raw_conn) : m_conn{raw_conn} {} + + /// Poll for ongoing connection, try to progress towards completion. + /** Returns a pair of "now please wait to read data from socket" and "now + * please wait to write data to socket." Both will be false when done. + * + * Throws an exception if polling indicates that the connection has failed. + */ + std::pair poll_connect(); + + // Initialise based on connection string. + void init(char const options[]); + // Initialise based on parameter names and values. + void init(char const *params[], char const *values[]); + void complete_init(); + + result make_result( + internal::pq::PGresult *pgr, std::shared_ptr const &query, + std::string_view desc = ""sv); + + void PQXX_PRIVATE set_up_state(); + + int PQXX_PRIVATE PQXX_PURE status() const noexcept; + + /// Escape a string, into a buffer allocated by the caller. + /** The buffer must have room for at least `2*std::size(text) + 1` bytes. + * + * Returns the number of bytes written, including the trailing zero. + */ + std::size_t esc_to_buf(std::string_view text, char *buf) const; + + friend class internal::gate::const_connection_largeobject; + char const *PQXX_PURE err_msg() const noexcept; + + void PQXX_PRIVATE process_notice_raw(char const msg[]) noexcept; + + result exec_prepared(std::string_view statement, internal::c_params const &); + + /// Throw @ref usage_error if this connection is not in a movable state. + void check_movable() const; + /// Throw @ref usage_error if not in a state where it can be move-assigned. + void check_overwritable() const; + + friend class internal::gate::connection_errorhandler; + void PQXX_PRIVATE register_errorhandler(errorhandler *); + void PQXX_PRIVATE unregister_errorhandler(errorhandler *) noexcept; + + friend class internal::gate::connection_transaction; + result exec(std::string_view, std::string_view = ""sv); + result + PQXX_PRIVATE exec(std::shared_ptr, std::string_view = ""sv); + void PQXX_PRIVATE register_transaction(transaction_base *); + void PQXX_PRIVATE unregister_transaction(transaction_base *) noexcept; + + friend class internal::gate::connection_stream_from; + std::pair>, std::size_t> + PQXX_PRIVATE read_copy_line(); + + friend class internal::gate::connection_stream_to; + void PQXX_PRIVATE write_copy_line(std::string_view); + void PQXX_PRIVATE end_copy_write(); + + friend class internal::gate::connection_largeobject; + internal::pq::PGconn *raw_connection() const { return m_conn; } + + friend class internal::gate::connection_notification_receiver; + void add_receiver(notification_receiver *); + void remove_receiver(notification_receiver *) noexcept; + + friend class internal::gate::connection_pipeline; + void PQXX_PRIVATE start_exec(char const query[]); + bool PQXX_PRIVATE consume_input() noexcept; + bool PQXX_PRIVATE is_busy() const noexcept; + internal::pq::PGresult *get_result(); + + friend class internal::gate::connection_dbtransaction; + friend class internal::gate::connection_sql_cursor; + + result exec_params(std::string_view query, internal::c_params const &args); + + /// Connection handle. + internal::pq::PGconn *m_conn = nullptr; + + /// Active transaction on connection, if any. + /** We don't use this for anything, except to check for open transactions + * when we close the connection or start a new transaction. + * + * We also don't allow move construction or move assignment while there's a + * transaction, since moving the connection in that case would leave one or + * more pointers back from the transaction to the connection dangling. + */ + transaction_base const *m_trans = nullptr; + + std::list m_errorhandlers; + + using receiver_list = + std::multimap; + /// Notification receivers. + receiver_list m_receivers; + + /// Unique number to use as suffix for identifiers (see adorn_name()). + int m_unique_id = 0; +}; + + +/// @deprecated Old base class for connection. They are now the same class. +using connection_base = connection; + + +/// An ongoing, non-blocking stepping stone to a connection. +/** Use this when you want to create a connection to the database, but without + * blocking your whole thread. It is only available on systems that have + * the `` header, and Windows. + * + * Connecting in this way is probably not "faster" (it's more complicated and + * has some extra overhead), but in some situations you can use it to make your + * application as a whole faster. It all depends on having other useful work + * to do in the same thread, and being able to wait on a socket. If you have + * other I/O going on at the same time, your event loop can wait for both the + * libpqxx socket and your own sockets, and wake up whenever any of them is + * ready to do work. + * + * Connecting in this way is not properly "asynchronous;" it's merely + * "nonblocking." This means it's not a super-high-performance mechanism like + * you might get with e.g. `io_uring`. In particular, if we need to look up + * the database hostname in DNS, that will happen synchronously. + * + * To use this, create the `connecting` object, passing a connection string. + * Then loop: If @ref wait_to_read returns true, wait for the socket to have + * incoming data on it. If @ref wait_to_write returns true, wait for the + * socket to be ready for writing. Then call @ref process to process any + * incoming or outgoing data. Do all of this until @ref done returns true (or + * there is an exception). Finally, call @ref produce to get the completed + * connection. + * + * For example: + * + * ```cxx + * pqxx::connecting cg{}; + * + * // Loop until we're done connecting. + * while (!cg.done()) + * { + * wait_for_fd(cg.sock(), cg.wait_to_read(), cg.wait_to_write()); + * cg.process(); + * } + * + * pqxx::connection conn = std::move(cg).produce(); + * + * // At this point, conn is a working connection. You can no longer use + * // cg at all. + * ``` + */ +class PQXX_LIBEXPORT connecting +{ +public: + /// Start connecting. + connecting(zview connection_string = ""_zv); + + connecting(connecting const &) = delete; + connecting(connecting &&) = default; + connecting &operator=(connecting const &) = delete; + connecting &operator=(connecting &&) = default; + + /// Get the socket. The socket may change during the connection process. + [[nodiscard]] int sock() const &noexcept { return m_conn.sock(); } + + /// Should we currently wait to be able to _read_ from the socket? + [[nodiscard]] constexpr bool wait_to_read() const &noexcept + { + return m_reading; + } + + /// Should we currently wait to be able to _write_ to the socket? + [[nodiscard]] constexpr bool wait_to_write() const &noexcept + { + return m_writing; + } + + /// Progress towards completion (but don't block). + void process() &; + + /// Is our connection finished? + [[nodiscard]] constexpr bool done() const &noexcept + { + return not m_reading and not m_writing; + } + + /// Produce the completed connection object. + /** Use this only once, after @ref done returned `true`. Once you have + * called this, the `connecting` instance has no more use or meaning. You + * can't call any of its member functions afterwards. + * + * This member function is rvalue-qualified, meaning that you can only call + * it on an rvalue instance of the class. If what you have is not an rvalue, + * turn it into one by wrapping it in `std::move()`. + */ + [[nodiscard]] connection produce() &&; + +private: + connection m_conn; + bool m_reading{false}; + bool m_writing{true}; +}; + + +template inline std::string connection::quote(T const &t) const +{ + if constexpr (nullness::always_null) + { + return "NULL"; + } + else + { + if (is_null(t)) + return "NULL"; + auto const text{to_string(t)}; + + // Okay, there's an easy way to do this and there's a hard way. The easy + // way was "quote, esc(to_string(t)), quote". I'm going with the hard way + // because it's going to save some string manipulation that will probably + // incur some unnecessary memory allocations and deallocations. + std::string buf{'\''}; + buf.resize(2 + 2 * std::size(text) + 1); + auto const content_bytes{esc_to_buf(text, buf.data() + 1)}; + auto const closing_quote{1 + content_bytes}; + buf[closing_quote] = '\''; + auto const end{closing_quote + 1}; + buf.resize(end); + return buf; + } +} + + +template +inline std::string connection::quote_columns(STRINGS const &columns) const +{ + return separated_list( + ","sv, std::cbegin(columns), std::cend(columns), + [this](auto col) { return this->quote_name(*col); }); +} + + +#if defined(PQXX_HAVE_CONCEPTS) +template +inline connection::connection(MAPPING const ¶ms) +{ + check_version(); + + std::vector keys, values; + if constexpr (std::ranges::sized_range) + { + auto const size{std::ranges::size(params) + 1}; + keys.reserve(size); + values.reserve(size); + } + for (auto const &[key, value] : params) + { + keys.push_back(internal::as_c_string(key)); + values.push_back(internal::as_c_string(value)); + } + keys.push_back(nullptr); + values.push_back(nullptr); + init(std::data(keys), std::data(values)); +} +#endif // PQXX_HAVE_CONCEPTS +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/cursor b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/cursor new file mode 100644 index 000000000..e20b3a4fa --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/cursor @@ -0,0 +1,8 @@ +/** Definition of the iterator/container-style cursor classes. + * + * C++-style wrappers for SQL cursors + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/cursor.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/cursor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/cursor.hxx new file mode 100644 index 000000000..b392e2407 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/cursor.hxx @@ -0,0 +1,483 @@ +/* Definition of the iterator/container-style cursor classes. + * + * C++-style wrappers for SQL cursors. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/cursor instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_CURSOR +#define PQXX_H_CURSOR + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/result.hxx" +#include "pqxx/transaction_base.hxx" + + +namespace pqxx +{ +/// Common definitions for cursor types +/** In C++ terms, fetches are always done in pre-increment or pre-decrement + * fashion--i.e. the result does not include the row the cursor is on at the + * beginning of the fetch, and the cursor ends up being positioned on the last + * row in the result. + * + * There are singular positions akin to `end()` at both the beginning and the + * end of the cursor's range of movement, although these fit in so naturally + * with the semantics that one rarely notices them. The cursor begins at the + * first of these, but any fetch in the forward direction will move the cursor + * off this position and onto the first row before returning anything. + */ +class PQXX_LIBEXPORT cursor_base +{ +public: + using size_type = result_size_type; + using difference_type = result_difference_type; + + /// Cursor access-pattern policy + /** Allowing a cursor to move forward only can result in better performance, + * so use this access policy whenever possible. + */ + enum access_policy + { + /// Cursor can move forward only + forward_only, + /// Cursor can move back and forth + random_access + }; + + /// Cursor update policy + /** + * @warning Not all PostgreSQL versions support updatable cursors. + */ + enum update_policy + { + /// Cursor can be used to read data but not to write + read_only, + /// Cursor can be used to update data as well as read it + update + }; + + /// Cursor destruction policy + /** The normal thing to do is to make a cursor object the owner of the SQL + * cursor it represents. There may be cases, however, where a cursor needs + * to persist beyond the end of the current transaction (and thus also beyond + * the lifetime of the cursor object that created it!), where it can be + * "adopted" into a new cursor object. See the basic_cursor documentation + * for an explanation of cursor adoption. + * + * If a cursor is created with "loose" ownership policy, the object + * representing the underlying SQL cursor will not take the latter with it + * when its own lifetime ends, nor will its originating transaction. + * + * @warning Use this feature with care and moderation. Only one cursor + * object should be responsible for any one underlying SQL cursor at any + * given time. + */ + enum ownership_policy + { + /// Destroy SQL cursor when cursor object is closed at end of transaction + owned, + /// Leave SQL cursor in existence after close of object and transaction + loose + }; + + cursor_base() = delete; + cursor_base(cursor_base const &) = delete; + cursor_base &operator=(cursor_base const &) = delete; + + /** + * @name Special movement distances. + */ + //@{ + + // TODO: Make constexpr inline (but breaks ABI). + /// Special value: read until end. + /** @return Maximum value for result::difference_type, so the cursor will + * attempt to read the largest possible result set. + */ + [[nodiscard]] static difference_type all() noexcept; + + /// Special value: read one row only. + /** @return Unsurprisingly, 1. + */ + [[nodiscard]] static constexpr difference_type next() noexcept { return 1; } + + /// Special value: read backwards, one row only. + /** @return Unsurprisingly, -1. + */ + [[nodiscard]] static constexpr difference_type prior() noexcept + { + return -1; + } + + // TODO: Make constexpr inline (but breaks ABI). + /// Special value: read backwards from current position back to origin. + /** @return Minimum value for result::difference_type. + */ + [[nodiscard]] static difference_type backward_all() noexcept; + + //@} + + /// Name of underlying SQL cursor + /** + * @returns Name of SQL cursor, which may differ from original given name. + * @warning Don't use this to access the SQL cursor directly without going + * through the provided wrapper classes! + */ + [[nodiscard]] constexpr std::string const &name() const noexcept + { + return m_name; + } + +protected: + cursor_base(connection &, std::string_view Name, bool embellish_name = true); + + std::string const m_name; +}; +} // namespace pqxx + + +#include + + +namespace pqxx +{ +/// "Stateless cursor" class: easy API for retrieving parts of result sets +/** This is a front-end for SQL cursors, but with a more C++-like API. + * + * Actually, stateless_cursor feels entirely different from SQL cursors. You + * don't keep track of positions, fetches, and moves; you just say which rows + * you want. See the retrieve() member function. + */ +template +class stateless_cursor +{ +public: + using size_type = result_size_type; + using difference_type = result_difference_type; + + /// Create cursor. + /** + * @param tx The transaction within which you want to create the cursor. + * @param query The SQL query whose results the cursor should traverse. + * @param cname A hint for the cursor's name. The actual SQL cursor's name + * will be based on this (though not necessarily identical). + * @param hold Create a `WITH HOLD` cursor? Such cursors stay alive after + * the transaction has ended, so you can continue to use it. + */ + stateless_cursor( + transaction_base &tx, std::string_view query, std::string_view cname, + bool hold) : + m_cur{tx, query, cname, cursor_base::random_access, up, op, hold} + {} + + /// Adopt an existing scrolling SQL cursor. + /** This lets you define a cursor yourself, and then wrap it in a + * libpqxx-managed `stateless_cursor` object. + * + * @param tx The transaction within which you want to manage the cursor. + * @param adopted_cursor Your cursor's SQL name. + */ + stateless_cursor(transaction_base &tx, std::string_view adopted_cursor) : + m_cur{tx, adopted_cursor, op} + { + // Put cursor in known position + m_cur.move(cursor_base::backward_all()); + } + + /// Close this cursor. + /** The destructor will do this for you automatically. + * + * Closing a cursor is idempotent. Closing a cursor that's already closed + * does nothing. + */ + void close() noexcept { m_cur.close(); } + + /// Number of rows in cursor's result set + /** @note This function is not const; it may need to scroll to find the size + * of the result set. + */ + [[nodiscard]] size_type size() + { + return internal::obtain_stateless_cursor_size(m_cur); + } + + /// Retrieve rows from begin_pos (inclusive) to end_pos (exclusive) + /** Rows are numbered starting from 0 to size()-1. + * + * @param begin_pos First row to retrieve. May be one row beyond the end of + * the result set, to avoid errors for empty result sets. Otherwise, must be + * a valid row number in the result set. + * @param end_pos Row up to which to fetch. Rows are returned ordered from + * begin_pos to end_pos, i.e. in ascending order if begin_pos < end_pos but + * in descending order if begin_pos > end_pos. The end_pos may be + * arbitrarily inside or outside the result set; only existing rows are + * included in the result. + */ + result retrieve(difference_type begin_pos, difference_type end_pos) + { + return internal::stateless_cursor_retrieve( + m_cur, result::difference_type(size()), begin_pos, end_pos); + } + + /// Return this cursor's name. + [[nodiscard]] constexpr std::string const &name() const noexcept + { + return m_cur.name(); + } + +private: + internal::sql_cursor m_cur; +}; + + +class icursor_iterator; +} // namespace pqxx + + +namespace pqxx::internal::gate +{ +class icursor_iterator_icursorstream; +class icursorstream_icursor_iterator; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +/// Simple read-only cursor represented as a stream of results +/** SQL cursors can be tricky, especially in C++ since the two languages seem + * to have been designed on different planets. An SQL cursor has two singular + * positions akin to `end()` on either side of the underlying result set. + * + * These cultural differences are hidden from view somewhat by libpqxx, which + * tries to make SQL cursors behave more like familiar C++ entities such as + * iterators, sequences, streams, and containers. + * + * Data is fetched from the cursor as a sequence of result objects. Each of + * these will contain the number of rows defined as the stream's stride, except + * of course the last block of data which may contain fewer rows. + * + * This class can create or adopt cursors that live outside any backend + * transaction, which your backend version may not support. + */ +class PQXX_LIBEXPORT icursorstream +{ +public: + using size_type = cursor_base::size_type; + using difference_type = cursor_base::difference_type; + + /// Set up a read-only, forward-only cursor. + /** Roughly equivalent to a C++ Standard Library istream, this cursor type + * supports only two operations: reading a block of rows while moving + * forward, and moving forward without reading any data. + * + * @param context Transaction context in which this cursor will be active. + * @param query SQL query whose results this cursor shall iterate. + * @param basename Suggested name for the SQL cursor; the library will append + * a unique code to ensure its uniqueness. + * @param sstride Number of rows to fetch per read operation; must be a + * positive number. + */ + icursorstream( + transaction_base &context, std::string_view query, + std::string_view basename, difference_type sstride = 1); + + /// Adopt existing SQL cursor. Use with care. + /** Forms a cursor stream around an existing SQL cursor, as returned by e.g. + * a server-side function. The SQL cursor will be cleaned up by the stream's + * destructor as if it had been created by the stream; cleaning it up by hand + * or adopting the same cursor twice is an error. + * + * Passing the name of the cursor as a string is not allowed, both to avoid + * confusion with the other constructor and to discourage unnecessary use of + * adopted cursors. + * + * @warning It is technically possible to adopt a "WITH HOLD" cursor, i.e. a + * cursor that stays alive outside its creating transaction. However, any + * cursor stream (including the underlying SQL cursor, naturally) must be + * destroyed before its transaction context object is destroyed. Therefore + * the only way to use SQL's WITH HOLD feature is to adopt the cursor, but + * defer doing so until after entering the transaction context that will + * eventually destroy it. + * + * @param context Transaction context in which this cursor will be active. + * @param cname Result field containing the name of the SQL cursor to adopt. + * @param sstride Number of rows to fetch per read operation; must be a + * positive number. + * @param op Ownership policy. Determines whether the cursor underlying this + * stream will be destroyed when the stream is closed. + */ + icursorstream( + transaction_base &context, field const &cname, difference_type sstride = 1, + cursor_base::ownership_policy op = cursor_base::owned); + + /// Return `true` if this stream may still return more data. + constexpr operator bool() const &noexcept { return not m_done; } + + /// Read new value into given result object; same as operator `>>`. + /** The result set may continue any number of rows from zero to the chosen + * stride, inclusive. An empty result will only be returned if there are no + * more rows to retrieve. + * + * @param res Write the retrieved data into this result object. + * @return Reference to this very stream, to facilitate "chained" invocations + * ("C.get(r1).get(r2);") + */ + icursorstream &get(result &res) + { + res = fetchblock(); + return *this; + } + /// Read new value into given result object; same as `get(result&)`. + /** The result set may continue any number of rows from zero to the chosen + * stride, inclusive. An empty result will only be returned if there are no + * more rows to retrieve. + * + * @param res Write the retrieved data into this result object. + * @return Reference to this very stream, to facilitate "chained" invocations + * ("C >> r1 >> r2;") + */ + icursorstream &operator>>(result &res) { return get(res); } + + /// Move given number of rows forward without reading data. + /** Ignores any stride that you may have set. It moves by a given number of + * rows, not a number of strides. + * + * @return Reference to this stream itself, to facilitate "chained" + * invocations. + */ + icursorstream &ignore(std::streamsize n = 1) &; + + /// Change stride, i.e. the number of rows to fetch per read operation. + /** + * @param stride Must be a positive number. + */ + void set_stride(difference_type stride) &; + [[nodiscard]] constexpr difference_type stride() const noexcept + { + return m_stride; + } + +private: + result fetchblock(); + + friend class internal::gate::icursorstream_icursor_iterator; + size_type forward(size_type n = 1); + void insert_iterator(icursor_iterator *) noexcept; + void remove_iterator(icursor_iterator *) const noexcept; + + void service_iterators(difference_type); + + internal::sql_cursor m_cur; + + difference_type m_stride; + difference_type m_realpos, m_reqpos; + + mutable icursor_iterator *m_iterators; + + bool m_done; +}; + + +/// Approximate istream_iterator for icursorstream. +/** Intended as an implementation of an input_iterator (as defined by the C++ + * Standard Library), this class supports only two basic operations: reading + * the current element, and moving forward. In addition to the minimal + * guarantees for istream_iterators, this class supports multiple successive + * reads of the same position (the current result set is cached in the + * iterator) even after copying and even after new data have been read from the + * stream. This appears to be a requirement for input_iterators. Comparisons + * are also supported in the general case. + * + * The iterator does not care about its own position, however. Moving an + * iterator forward moves the underlying stream forward and reads the data from + * the new stream position, regardless of the iterator's old position in the + * stream. + * + * The stream's stride defines the granularity for all iterator movement or + * access operations, i.e. "ici += 1" advances the stream by one stride's worth + * of rows, and "*ici++" reads one stride's worth of rows from the stream. + * + * @warning Do not read from the underlying stream or its cursor, move its read + * position, or change its stride, between the time the first icursor_iterator + * on it is created and the time its last icursor_iterator is destroyed. + * + * @warning Manipulating these iterators within the context of a single cursor + * stream is not thread-safe. Creating a new iterator, copying one, + * or destroying one affects the stream as a whole. + */ +class PQXX_LIBEXPORT icursor_iterator +{ +public: + using iterator_category = std::input_iterator_tag; + using value_type = result; + using pointer = result const *; + using reference = result const &; + using istream_type = icursorstream; + using size_type = istream_type::size_type; + using difference_type = istream_type::difference_type; + + icursor_iterator() noexcept; + explicit icursor_iterator(istream_type &) noexcept; + icursor_iterator(icursor_iterator const &) noexcept; + ~icursor_iterator() noexcept; + + result const &operator*() const + { + refresh(); + return m_here; + } + result const *operator->() const + { + refresh(); + return &m_here; + } + icursor_iterator &operator++(); + icursor_iterator operator++(int); + icursor_iterator &operator+=(difference_type); + icursor_iterator &operator=(icursor_iterator const &) noexcept; + + [[nodiscard]] bool operator==(icursor_iterator const &rhs) const; + [[nodiscard]] bool operator!=(icursor_iterator const &rhs) const noexcept + { + return not operator==(rhs); + } + [[nodiscard]] bool operator<(icursor_iterator const &rhs) const; + [[nodiscard]] bool operator>(icursor_iterator const &rhs) const + { + return rhs < *this; + } + [[nodiscard]] bool operator<=(icursor_iterator const &rhs) const + { + return not(*this > rhs); + } + [[nodiscard]] bool operator>=(icursor_iterator const &rhs) const + { + return not(*this < rhs); + } + +private: + void refresh() const; + + friend class internal::gate::icursor_iterator_icursorstream; + difference_type pos() const noexcept { return m_pos; } + void fill(result const &); + + icursorstream *m_stream{nullptr}; + result m_here; + difference_type m_pos; + icursor_iterator *m_prev{nullptr}, *m_next{nullptr}; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/dbtransaction b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/dbtransaction new file mode 100644 index 000000000..fa8d26476 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/dbtransaction @@ -0,0 +1,8 @@ +/** pqxx::dbtransaction abstract base class. + * + * pqxx::dbransaction defines a real transaction on the database. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/dbtransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/dbtransaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/dbtransaction.hxx new file mode 100644 index 000000000..d85cb170f --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/dbtransaction.hxx @@ -0,0 +1,70 @@ +/* Definition of the pqxx::dbtransaction abstract base class. + * + * pqxx::dbransaction defines a real transaction on the database. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/dbtransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_DBTRANSACTION +#define PQXX_H_DBTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/transaction_base.hxx" + +namespace pqxx +{ +/// Abstract transaction base class: bracket transactions on the database. +/** + * @ingroup transactions + * + * Use a dbtransaction-derived object such as "work" (transaction<>) to enclose + * operations on a database in a single "unit of work." This ensures that the + * whole series of operations either succeeds as a whole or fails completely. + * In no case will it leave half-finished work behind in the database. + * + * Once processing on a transaction has succeeded and any changes should be + * allowed to become permanent in the database, call commit(). If something + * has gone wrong and the changes should be forgotten, call abort() instead. + * If you do neither, an implicit abort() is executed at destruction time. + * + * It is an error to abort a transaction that has already been committed, or to + * commit a transaction that has already been aborted. Aborting an already + * aborted transaction or committing an already committed one is allowed, to + * make error handling easier. Repeated aborts or commits have no effect after + * the first one. + * + * Database transactions are not suitable for guarding long-running processes. + * If your transaction code becomes too long or too complex, consider ways to + * break it up into smaller ones. Unfortunately there is no universal recipe + * for this. + * + * The actual operations for committing/aborting the backend transaction are + * implemented by a derived class. The implementing concrete class must also + * call @ref close from its destructor. + */ +class PQXX_LIBEXPORT PQXX_NOVTABLE dbtransaction : public transaction_base +{ +protected: + /// Begin transaction. + explicit dbtransaction(connection &c) : transaction_base{c} {} + /// Begin transaction. + dbtransaction(connection &c, std::string_view tname) : + transaction_base{c, tname} + {} + /// Begin transaction. + dbtransaction( + connection &c, std::string_view tname, + std::shared_ptr rollback_cmd) : + transaction_base{c, tname, rollback_cmd} + {} +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/errorhandler b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/errorhandler new file mode 100644 index 000000000..ea572ee79 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/errorhandler @@ -0,0 +1,8 @@ +/** pqxx::errorhandler class. + * + * Callbacks for handling errors and warnings. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/errorhandler.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/errorhandler.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/errorhandler.hxx new file mode 100644 index 000000000..2ffb5703c --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/errorhandler.hxx @@ -0,0 +1,92 @@ +/* Definition of the pqxx::errorhandler class. + * + * pqxx::errorhandler handlers errors and warnings in a database session. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/errorhandler instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ERRORHANDLER +#define PQXX_H_ERRORHANDLER + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/types.hxx" + + +namespace pqxx::internal::gate +{ +class errorhandler_connection; +} + + +namespace pqxx +{ +/** + * @addtogroup errorhandler + */ +//@{ + +/// Base class for error-handler callbacks. +/** To receive errors and warnings from a connection, subclass this with your + * own error-handler functor, and instantiate it for the connection. Destroying + * the handler un-registers it. + * + * A connection can have multiple error handlers at the same time. When the + * database connection emits an error or warning message, it passes the message + * to each error handler, starting with the most recently registered one and + * progressing towards the oldest one. However an error handler may also + * instruct the connection not to pass the message to further handlers by + * returning "false." + * + * @warning Strange things happen when a result object outlives its parent + * connection. If you register an error handler on a connection, then you must + * not access the result after destroying the connection. This applies even if + * you destroy the error handler first! + */ +class PQXX_LIBEXPORT errorhandler +{ +public: + explicit errorhandler(connection &); + virtual ~errorhandler(); + + /// Define in subclass: receive an error or warning message from the + /// database. + /** + * @return Whether the same error message should also be passed to the + * remaining, older errorhandlers. + */ + virtual bool operator()(char const msg[]) noexcept = 0; + + errorhandler() = delete; + errorhandler(errorhandler const &) = delete; + errorhandler &operator=(errorhandler const &) = delete; + +private: + connection *m_home; + + friend class internal::gate::errorhandler_connection; + void unregister() noexcept; +}; + + +/// An error handler that suppresses any previously registered error handlers. +class quiet_errorhandler : public errorhandler +{ +public: + /// Suppress error notices. + quiet_errorhandler(connection &conn) : errorhandler{conn} {} + + /// Revert to previous handling of error notices. + virtual bool operator()(char const[]) noexcept override { return false; } +}; + +//@} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/except b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/except new file mode 100644 index 000000000..e5dd508bf --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/except @@ -0,0 +1,8 @@ +/** libpqxx exception classes. + * + * pqxx::sql_error, pqxx::broken_connection, pqxx::in_doubt_error, ... + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/except.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/except.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/except.hxx new file mode 100644 index 000000000..24f959437 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/except.hxx @@ -0,0 +1,447 @@ +/* Definition of libpqxx exception classes. + * + * pqxx::sql_error, pqxx::broken_connection, pqxx::in_doubt_error, ... + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/except instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_EXCEPT +#define PQXX_H_EXCEPT + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + + +namespace pqxx +{ +/** + * @addtogroup exception Exception classes + * + * These exception classes follow, roughly, the two-level hierarchy defined by + * the PostgreSQL SQLSTATE error codes (see Appendix A of the PostgreSQL + * documentation corresponding to your server version). This is not a complete + * mapping though. There are other differences as well, e.g. the error code + * for `statement_completion_unknown` has a separate status in libpqxx as + * @ref in_doubt_error, and `too_many_connections` is classified as a + * `broken_connection` rather than a subtype of `insufficient_resources`. + * + * @see http://www.postgresql.org/docs/9.4/interactive/errcodes-appendix.html + * + * @{ + */ + +/// Run-time failure encountered by libpqxx, similar to std::runtime_error. +struct PQXX_LIBEXPORT failure : std::runtime_error +{ + explicit failure(std::string const &); +}; + + +/// Exception class for lost or failed backend connection. +/** + * @warning When this happens on Unix-like systems, you may also get a SIGPIPE + * signal. That signal aborts the program by default, so if you wish to be + * able to continue after a connection breaks, be sure to disarm this signal. + * + * If you're working on a Unix-like system, see the manual page for + * `signal` (2) on how to deal with SIGPIPE. The easiest way to make this + * signal harmless is to make your program ignore it: + * + * ```cxx + * #include + * + * int main() + * { + * signal(SIGPIPE, SIG_IGN); + * // ... + * ``` + */ +struct PQXX_LIBEXPORT broken_connection : failure +{ + broken_connection(); + explicit broken_connection(std::string const &); +}; + + +/// The caller attempted to set a variable to null, which is not allowed. +struct PQXX_LIBEXPORT variable_set_to_null : failure +{ + variable_set_to_null(); + explicit variable_set_to_null(std::string const &); +}; + + +/// Exception class for failed queries. +/** Carries, in addition to a regular error message, a copy of the failed query + * and (if available) the SQLSTATE value accompanying the error. + */ +class PQXX_LIBEXPORT sql_error : public failure +{ + /// Query string. Empty if unknown. + std::string const m_query; + /// SQLSTATE string describing the error type, if known; or empty string. + std::string const m_sqlstate; + +public: + explicit sql_error( + std::string const &whatarg = "", std::string const &Q = "", + char const sqlstate[] = nullptr); + virtual ~sql_error() noexcept override; + + /// The query whose execution triggered the exception + [[nodiscard]] PQXX_PURE std::string const &query() const noexcept; + + /// SQLSTATE error code if known, or empty string otherwise. + [[nodiscard]] PQXX_PURE std::string const &sqlstate() const noexcept; +}; + + +/// "Help, I don't know whether transaction was committed successfully!" +/** Exception that might be thrown in rare cases where the connection to the + * database is lost while finishing a database transaction, and there's no way + * of telling whether it was actually executed by the backend. In this case + * the database is left in an indeterminate (but consistent) state, and only + * manual inspection will tell which is the case. + */ +struct PQXX_LIBEXPORT in_doubt_error : failure +{ + explicit in_doubt_error(std::string const &); +}; + + +/// The backend saw itself forced to roll back the ongoing transaction. +struct PQXX_LIBEXPORT transaction_rollback : sql_error +{ + explicit transaction_rollback( + std::string const &whatarg, std::string const &q = "", + char const sqlstate[] = nullptr); +}; + + +/// Transaction failed to serialize. Please retry it. +/** Can only happen at transaction isolation levels REPEATABLE READ and + * SERIALIZABLE. + * + * The current transaction cannot be committed without violating the guarantees + * made by its isolation level. This is the effect of a conflict with another + * ongoing transaction. The transaction may still succeed if you try to + * perform it again. + */ +struct PQXX_LIBEXPORT serialization_failure : transaction_rollback +{ + explicit serialization_failure( + std::string const &whatarg, std::string const &q, + char const sqlstate[] = nullptr); +}; + + +/// We can't tell whether our last statement succeeded. +struct PQXX_LIBEXPORT statement_completion_unknown : transaction_rollback +{ + explicit statement_completion_unknown( + std::string const &whatarg, std::string const &q, + char const sqlstate[] = nullptr); +}; + + +/// The ongoing transaction has deadlocked. Retrying it may help. +struct PQXX_LIBEXPORT deadlock_detected : transaction_rollback +{ + explicit deadlock_detected( + std::string const &whatarg, std::string const &q, + char const sqlstate[] = nullptr); +}; + + +/// Internal error in libpqxx library +struct PQXX_LIBEXPORT internal_error : std::logic_error +{ + explicit internal_error(std::string const &); +}; + + +/// Error in usage of libpqxx library, similar to std::logic_error +struct PQXX_LIBEXPORT usage_error : std::logic_error +{ + explicit usage_error(std::string const &); +}; + + +/// Invalid argument passed to libpqxx, similar to std::invalid_argument +struct PQXX_LIBEXPORT argument_error : std::invalid_argument +{ + explicit argument_error(std::string const &); +}; + + +/// Value conversion failed, e.g. when converting "Hello" to int. +struct PQXX_LIBEXPORT conversion_error : std::domain_error +{ + explicit conversion_error(std::string const &); +}; + + +/// Could not convert value to string: not enough buffer space. +struct PQXX_LIBEXPORT conversion_overrun : conversion_error +{ + explicit conversion_overrun(std::string const &); +}; + + +/// Something is out of range, similar to std::out_of_range +struct PQXX_LIBEXPORT range_error : std::out_of_range +{ + explicit range_error(std::string const &); +}; + + +/// Query returned an unexpected number of rows. +struct PQXX_LIBEXPORT unexpected_rows : public range_error +{ + explicit unexpected_rows(std::string const &msg) : range_error{msg} {} +}; + + +/// Database feature not supported in current setup. +struct PQXX_LIBEXPORT feature_not_supported : sql_error +{ + explicit feature_not_supported( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +/// Error in data provided to SQL statement. +struct PQXX_LIBEXPORT data_exception : sql_error +{ + explicit data_exception( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT integrity_constraint_violation : sql_error +{ + explicit integrity_constraint_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT restrict_violation : integrity_constraint_violation +{ + explicit restrict_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT not_null_violation : integrity_constraint_violation +{ + explicit not_null_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT foreign_key_violation : integrity_constraint_violation +{ + explicit foreign_key_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT unique_violation : integrity_constraint_violation +{ + explicit unique_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT check_violation : integrity_constraint_violation +{ + explicit check_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT invalid_cursor_state : sql_error +{ + explicit invalid_cursor_state( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT invalid_sql_statement_name : sql_error +{ + explicit invalid_sql_statement_name( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT invalid_cursor_name : sql_error +{ + explicit invalid_cursor_name( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT syntax_error : sql_error +{ + /// Approximate position in string where error occurred, or -1 if unknown. + int const error_position; + + explicit syntax_error( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr, int pos = -1) : + sql_error{err, Q, sqlstate}, error_position{pos} + {} +}; + +struct PQXX_LIBEXPORT undefined_column : syntax_error +{ + explicit undefined_column( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + syntax_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT undefined_function : syntax_error +{ + explicit undefined_function( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + syntax_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT undefined_table : syntax_error +{ + explicit undefined_table( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + syntax_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT insufficient_privilege : sql_error +{ + explicit insufficient_privilege( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +/// Resource shortage on the server +struct PQXX_LIBEXPORT insufficient_resources : sql_error +{ + explicit insufficient_resources( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT disk_full : insufficient_resources +{ + explicit disk_full( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + insufficient_resources{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT out_of_memory : insufficient_resources +{ + explicit out_of_memory( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + insufficient_resources{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT too_many_connections : broken_connection +{ + explicit too_many_connections(std::string const &err) : + broken_connection{err} + {} +}; + +/// PL/pgSQL error +/** Exceptions derived from this class are errors from PL/pgSQL procedures. + */ +struct PQXX_LIBEXPORT plpgsql_error : sql_error +{ + explicit plpgsql_error( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +/// Exception raised in PL/pgSQL procedure +struct PQXX_LIBEXPORT plpgsql_raise : plpgsql_error +{ + explicit plpgsql_raise( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + plpgsql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT plpgsql_no_data_found : plpgsql_error +{ + explicit plpgsql_no_data_found( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + plpgsql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT plpgsql_too_many_rows : plpgsql_error +{ + explicit plpgsql_too_many_rows( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + plpgsql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT blob_already_exists : failure +{ + explicit blob_already_exists(std::string const &); +}; + +/** + * @} + */ +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/field b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/field new file mode 100644 index 000000000..37cb69e84 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/field @@ -0,0 +1,8 @@ +/** pqxx::field class. + * + * pqxx::field refers to a field in a query result. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/field.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/field.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/field.hxx new file mode 100644 index 000000000..b8b869fe4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/field.hxx @@ -0,0 +1,542 @@ +/* Definitions for the pqxx::field class. + * + * pqxx::field refers to a field in a query result. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/field instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_FIELD +#define PQXX_H_FIELD + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/array.hxx" +#include "pqxx/composite.hxx" +#include "pqxx/result.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/types.hxx" + +namespace pqxx +{ +/// Reference to a field in a result set. +/** A field represents one entry in a row. It represents an actual value + * in the result set, and can be converted to various types. + */ +class PQXX_LIBEXPORT field +{ +public: + using size_type = field_size_type; + + /// Constructor. Do not call this yourself; libpqxx will do it for you. + /** Create field as reference to a field in a result set. + * @param r Row that this field is part of. + * @param c Column number of this field. + */ + [[deprecated( + "Do not construct fields yourself. Get them from the row.")]] field(row const &r, row_size_type c) noexcept; + + /// Constructor. Do not call this yourself; libpqxx will do it for you. + [[deprecated( + "Do not construct fields yourself. Get them from the " + "row.")]] field() noexcept = default; + + /** + * @name Comparison + */ + //@{ + // TODO: noexcept. Breaks ABI. + /// Byte-by-byte comparison of two fields (all nulls are considered equal) + /** @warning null handling is still open to discussion and change! + * + * Handling of null values differs from that in SQL where a comparison + * involving a null value yields null, so nulls are never considered equal + * to one another or even to themselves. + * + * Null handling also probably differs from the closest equivalent in C++, + * which is the NaN (Not-a-Number) value, a singularity comparable to + * SQL's null. This is because the builtin == operator demands that a == a. + * + * The usefulness of this operator is questionable. No interpretation + * whatsoever is imposed on the data; 0 and 0.0 are considered different, + * as are null vs. the empty string, or even different (but possibly + * equivalent and equally valid) encodings of the same Unicode character + * etc. + */ + [[nodiscard]] PQXX_PURE bool operator==(field const &) const; + + /// Byte-by-byte comparison (all nulls are considered equal) + /** @warning See operator==() for important information about this operator + */ + [[nodiscard]] PQXX_PURE bool operator!=(field const &rhs) const noexcept + { + return not operator==(rhs); + } + //@} + + /** + * @name Column information + */ + //@{ + /// Column name. + [[nodiscard]] PQXX_PURE char const *name() const &; + + /// Column type. + [[nodiscard]] oid PQXX_PURE type() const; + + /// What table did this column come from? + [[nodiscard]] PQXX_PURE oid table() const; + + /// Return row number. The first row is row 0, the second is row 1, etc. + PQXX_PURE constexpr row_size_type num() const noexcept { return col(); } + + /// What column number in its originating table did this column come from? + [[nodiscard]] PQXX_PURE row_size_type table_column() const; + //@} + + /** + * @name Content access + */ + //@{ + /// Read as `string_view`, or an empty one if null. + /** The result only remains usable while the data for the underlying + * @ref result exists. Once all `result` objects referring to that data have + * been destroyed, the `string_view` will no longer point to valid memory. + */ + [[nodiscard]] PQXX_PURE std::string_view view() const & + { + return std::string_view(c_str(), size()); + } + + /// Read as plain C string. + /** Since the field's data is stored internally in the form of a + * zero-terminated C string, this is the fastest way to read it. Use the + * to() or as() functions to convert the string to other types such as + * `int`, or to C++ strings. + * + * Do not use this for BYTEA values, or other binary values. To read those, + * convert the value to your desired type using `to()` or `as()`. For + * example: `f.as>()`. + */ + [[nodiscard]] PQXX_PURE char const *c_str() const &; + + /// Is this field's value null? + [[nodiscard]] PQXX_PURE bool is_null() const noexcept; + + /// Return number of bytes taken up by the field's value. + [[nodiscard]] PQXX_PURE size_type size() const noexcept; + + /// Read value into obj; or if null, leave obj untouched and return `false`. + /** This can be used with optional types (except pointers other than C-style + * strings). + */ + template + auto to(T &obj) const -> typename std::enable_if_t< + (not std::is_pointer::value or std::is_same::value), + bool> + { + if (is_null()) + { + return false; + } + else + { + auto const bytes{c_str()}; + from_string(bytes, obj); + return true; + } + } + + /// Read field as a composite value, write its components into `fields`. + /** @warning This is still experimental. It may change or be replaced. + * + * Returns whether the field was null. If it was, it will not touch the + * values in `fields`. + */ + template bool composite_to(T &...fields) const + { + if (is_null()) + { + return false; + } + else + { + parse_composite(m_home.m_encoding, view(), fields...); + return true; + } + } + + /// Read value into obj; or leave obj untouched and return `false` if null. + template bool operator>>(T &obj) const { return to(obj); } + + /// Read value into obj; or if null, use default value and return `false`. + /** This can be used with `std::optional`, as well as with standard smart + * pointer types, but not with raw pointers. If the conversion from a + * PostgreSQL string representation allocates a pointer (e.g. using `new`), + * then the object's later deallocation should be baked in as well, right + * from the point where the object is created. So if you want a pointer, use + * a smart pointer, not a raw pointer. + * + * There is one exception, of course: C-style strings. Those are just + * pointers to the field's internal text data. + */ + template + auto to(T &obj, T const &default_value) const -> typename std::enable_if_t< + (not std::is_pointer::value or std::is_same::value), + bool> + { + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = from_string(this->view()); + return not null; + } + + /// Return value as object of given type, or default value if null. + /** Note that unless the function is instantiated with an explicit template + * argument, the Default value's type also determines the result type. + */ + template T as(T const &default_value) const + { + if (is_null()) + return default_value; + else + return from_string(this->view()); + } + + /// Return value as object of given type, or throw exception if null. + /** Use as `as>()` or `as()` as + * an alternative to `get()`; this is disabled for use with raw pointers + * (other than C-strings) because storage for the value can't safely be + * allocated here + */ + template T as() const + { + if (is_null()) + { + if constexpr (not nullness::has_null) + internal::throw_null_conversion(type_name); + else + return nullness::null(); + } + else + { + return from_string(this->view()); + } + } + + /// Return value wrapped in some optional type (empty for nulls). + /** Use as `get()` as before to obtain previous behavior, or specify + * container type with `get()` + */ + template class O = std::optional> + constexpr O get() const + { + return as>(); + } + + // TODO: constexpr noexcept, once array_parser constructor gets those. + /// Parse the field as an SQL array. + /** Call the parser to retrieve values (and structure) from the array. + * + * Make sure the @ref result object stays alive until parsing is finished. If + * you keep the @ref row of `field` object alive, it will keep the @ref + * result object alive as well. + */ + array_parser as_array() const & + { + return array_parser{c_str(), m_home.m_encoding}; + } + //@} + + +protected: + constexpr result const &home() const noexcept { return m_home; } + constexpr result::size_type idx() const noexcept { return m_row; } + constexpr row_size_type col() const noexcept { return m_col; } + + // TODO: Create gates. + friend class pqxx::result; + friend class pqxx::row; + field( + result const &r, result_size_type row_num, row_size_type col_num) noexcept + : + m_col{col_num}, m_home{r}, m_row{row_num} + {} + + /** + * You'd expect this to be unsigned, but due to the way reverse iterators + * are related to regular iterators, it must be allowed to underflow to -1. + */ + row_size_type m_col; + +private: + result m_home; + result::size_type m_row; +}; + + +template<> inline bool field::to(std::string &obj) const +{ + bool const null{is_null()}; + if (not null) + obj = std::string{view()}; + return not null; +} + + +template<> +inline bool field::to( + std::string &obj, std::string const &default_value) const +{ + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = std::string{view()}; + return not null; +} + + +/// Specialization: `to(char const *&)`. +/** The buffer has the same lifetime as the data in this result (i.e. of this + * result object, or the last remaining one copied from it etc.), so take care + * not to use it after the last result object referring to this query result is + * destroyed. + */ +template<> inline bool field::to(char const *&obj) const +{ + bool const null{is_null()}; + if (not null) + obj = c_str(); + return not null; +} + + +template<> inline bool field::to(std::string_view &obj) const +{ + bool const null{is_null()}; + if (not null) + obj = view(); + return not null; +} + + +template<> +inline bool field::to( + std::string_view &obj, std::string_view const &default_value) const +{ + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = view(); + return not null; +} + + +template<> inline std::string_view field::as() const +{ + if (is_null()) + PQXX_UNLIKELY + internal::throw_null_conversion(type_name); + return view(); +} + + +template<> +inline std::string_view +field::as(std::string_view const &default_value) const +{ + return is_null() ? default_value : view(); +} + + +template<> inline bool field::to(zview &obj) const +{ + bool const null{is_null()}; + if (not null) + obj = zview{c_str(), size()}; + return not null; +} + + +template<> +inline bool field::to(zview &obj, zview const &default_value) const +{ + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = zview{c_str(), size()}; + return not null; +} + + +template<> inline zview field::as() const +{ + if (is_null()) + PQXX_UNLIKELY + internal::throw_null_conversion(type_name); + return zview{c_str(), size()}; +} + + +template<> inline zview field::as(zview const &default_value) const +{ + return is_null() ? default_value : zview{c_str(), size()}; +} + + +template> +class field_streambuf : public std::basic_streambuf +{ +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + using openmode = std::ios::openmode; + using seekdir = std::ios::seekdir; + + explicit field_streambuf(field const &f) : m_field{f} { initialize(); } + +protected: + virtual int sync() override { return traits_type::eof(); } + + virtual pos_type seekoff(off_type, seekdir, openmode) override + { + return traits_type::eof(); + } + virtual pos_type seekpos(pos_type, openmode) override + { + return traits_type::eof(); + } + virtual int_type overflow(int_type) override { return traits_type::eof(); } + virtual int_type underflow() override { return traits_type::eof(); } + +private: + field const &m_field; + + int_type initialize() + { + auto g{static_cast(const_cast(m_field.c_str()))}; + this->setg(g, g, g + std::size(m_field)); + return int_type(std::size(m_field)); + } +}; + + +/// Input stream that gets its data from a result field +/** Use this class exactly as you would any other istream to read data from a + * field. All formatting and streaming operations of `std::istream` are + * supported. What you'll typically want to use, however, is the fieldstream + * alias (which defines a @ref basic_fieldstream for `char`). This is similar + * to how e.g. `std::ifstream` relates to `std::basic_ifstream`. + * + * This class has only been tested for the char type (and its default traits). + */ +template> +class basic_fieldstream : public std::basic_istream +{ + using super = std::basic_istream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + + basic_fieldstream(field const &f) : super{nullptr}, m_buf{f} + { + super::init(&m_buf); + } + +private: + field_streambuf m_buf; +}; + +using fieldstream = basic_fieldstream; + +/// Write a result field to any type of stream +/** This can be convenient when writing a field to an output stream. More + * importantly, it lets you write a field to e.g. a `stringstream` which you + * can then use to read, format and convert the field in ways that to() does + * not support. + * + * Example: parse a field into a variable of the nonstandard + * "long long" type. + * + * ```cxx + * extern result R; + * long long L; + * stringstream S; + * + * // Write field's string into S + * S << R[0][0]; + * + * // Parse contents of S into L + * S >> L; + * ``` + */ +template +inline std::basic_ostream & +operator<<(std::basic_ostream &s, field const &value) +{ + s.write(value.c_str(), std::streamsize(std::size(value))); + return s; +} + + +/// Convert a field's value to type `T`. +/** Unlike the "regular" `from_string`, this knows how to deal with null + * values. + */ +template inline T from_string(field const &value) +{ + if (value.is_null()) + { + if constexpr (nullness::has_null) + return nullness::null(); + else + internal::throw_null_conversion(type_name); + } + else + { + return from_string(value.view()); + } +} + + +/// Convert a field's value to `nullptr_t`. +/** Yes, you read that right. This conversion does nothing useful. It always + * returns `nullptr`. + * + * Except... what if the field is not null? In that case, this throws + * @ref conversion_error. + */ +template<> +inline std::nullptr_t from_string(field const &value) +{ + if (not value.is_null()) + throw conversion_error{ + "Extracting non-null field into nullptr_t variable."}; + return nullptr; +} + + +/// Convert a field to a string. +template<> PQXX_LIBEXPORT std::string to_string(field const &value); +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/array-composite.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/array-composite.hxx new file mode 100644 index 000000000..d2b6603e5 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/array-composite.hxx @@ -0,0 +1,305 @@ +#if !defined(PQXX_ARRAY_COMPOSITE_HXX) +# define PQXX_ARRAY_COMPOSITE_HXX + +# include + +# include "pqxx/strconv.hxx" + +namespace pqxx::internal +{ +// Find the end of a double-quoted string. +/** `input[pos]` must be the opening double quote. + * + * Returns the offset of the first position after the closing quote. + */ +inline std::size_t scan_double_quoted_string( + char const input[], std::size_t size, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + // XXX: find_char<'"', '\\'>(). + auto next{scan(input, size, pos)}; + bool at_quote{false}; + for (pos = next, next = scan(input, size, pos); pos < size; + pos = next, next = scan(input, size, pos)) + { + if (at_quote) + { + if (next - pos == 1 and input[pos] == '"') + { + // We just read a pair of double quotes. Carry on. + at_quote = false; + } + else + { + // We just read one double quote, and now we're at a character that's + // not a second double quote. Ergo, that last character was the + // closing double quote and this is the position right after it. + return pos; + } + } + else if (next - pos == 1) + { + switch (input[pos]) + { + case '\\': + // Backslash escape. Skip ahead by one more character. + pos = next; + next = scan(input, size, pos); + break; + + case '"': + // This is either the closing double quote, or the first of a pair of + // double quotes. + at_quote = true; + break; + } + } + else + { + // Multibyte character. Carry on. + } + } + if (not at_quote) + throw argument_error{ + "Missing closing double-quote: " + std::string{input}}; + return pos; +} + + +/// Un-quote and un-escape a double-quoted SQL string. +inline std::string parse_double_quoted_string( + char const input[], std::size_t end, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + std::string output; + // Maximum output size is same as the input size, minus the opening and + // closing quotes. Or in the extreme opposite case, the real number could be + // half that. Usually it'll be a pretty close estimate. + output.reserve(std::size_t(end - pos - 2)); + + for (auto here{scan(input, end, pos)}, next{scan(input, end, here)}; + here < end - 1; here = next, next = scan(input, end, here)) + { + // A backslash here is always an escape. So is a double-quote, since we're + // inside the double-quoted string. In either case, we can just ignore the + // escape character and use the next character. This is the one redeeming + // feature of SQL's escaping system. + if ((next - here == 1) and (input[here] == '\\' or input[here] == '"')) + { + // Skip escape. + here = next; + next = scan(input, end, here); + } + output.append(input + here, input + next); + } + return output; +} + + +/// Find the end of an unquoted string in an array or composite-type value. +/** Stops when it gets to the end of the input; or when it sees any of the + * characters in STOP which has not been escaped. + * + * For array values, STOP is a comma, a semicolon, or a closing brace. For + * a value of a composite type, STOP is a comma or a closing parenthesis. + */ +template +inline std::size_t scan_unquoted_string( + char const input[], std::size_t size, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + bool at_backslash{false}; + auto next{scan(input, size, pos)}; + while ((pos < size) and + ((next - pos) > 1 or at_backslash or ((input[pos] != STOP) and ...))) + { + pos = next; + next = scan(input, size, pos); + at_backslash = + ((not at_backslash) and ((next - pos) == 1) and (input[pos] == '\\')); + } + return pos; +} + + +/// Parse an unquoted array entry or cfield of a composite-type field. +inline std::string parse_unquoted_string( + char const input[], std::size_t end, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + std::string output; + bool at_backslash{false}; + output.reserve(end - pos); + for (auto next{scan(input, end, pos)}; pos < end; + pos = next, next = scan(input, end, pos)) + { + at_backslash = + ((not at_backslash) and ((next - pos) == 1) and (input[pos] == '\\')); + if (not at_backslash) + output.append(input + pos, next - pos); + } + return output; +} + + +/// Parse a field of a composite-type value. +/** `T` is the C++ type of the field we're parsing, and `index` is its + * zero-based number. + * + * Strip off the leading parenthesis or bracket yourself before parsing. + * However, this function will parse the lcosing parenthesis or bracket. + * + * After a successful parse, `pos` will point at `std::end(text)`. + * + * For the purposes of parsing, ranges and arrays count as compositve values, + * so this function supports parsing those. If you specifically need a closing + * parenthesis, check afterwards that `text` did not end in a bracket instead. + * + * @param index Index of the current field, zero-based. It will increment for + * the next field. + * @param input Full input text for the entire composite-type value. + * @param pos Starting position (in `input`) of the field that we're parsing. + * After parsing, this will point at the beginning of the next field if + * there is one, or one position past the last character otherwise. + * @param field Destination for the parsed value. + * @param scan Glyph scanning function for the relevant encoding type. + * @param last_field Number of the last field in the value (zero-based). When + * parsing the last field, this will equal `index`. + */ +template +inline void parse_composite_field( + std::size_t &index, std::string_view input, std::size_t &pos, T &field, + glyph_scanner_func *scan, std::size_t last_field) +{ + assert(index <= last_field); + auto next{scan(std::data(input), std::size(input), pos)}; + if ((next - pos) != 1) + throw conversion_error{"Non-ASCII character in composite-type syntax."}; + + // Expect a field. + switch (input[pos]) + { + case ',': + case ')': + case ']': + // The field is empty, i.e, null. + if constexpr (nullness::has_null) + field = nullness::null(); + else + throw conversion_error{ + "Can't read composite field " + to_string(index) + ": C++ type " + + type_name + " does not support nulls."}; + break; + + case '"': { + auto const stop{scan_double_quoted_string( + std::data(input), std::size(input), pos, scan)}; + auto const text{ + parse_double_quoted_string(std::data(input), stop, pos, scan)}; + field = from_string(text); + pos = stop; + } + break; + + default: { + auto const stop{scan_unquoted_string<',', ')', ']'>( + std::data(input), std::size(input), pos, scan)}; + auto const text{parse_unquoted_string(std::data(input), stop, pos, scan)}; + field = from_string(text); + pos = stop; + } + break; + } + + // Expect a comma or a closing parenthesis. + next = scan(std::data(input), std::size(input), pos); + + if ((next - pos) != 1) + throw conversion_error{ + "Unexpected non-ASCII character after composite field: " + + std::string{input}}; + + if (index < last_field) + { + if (input[pos] != ',') + throw conversion_error{ + "Found '" + std::string{input[pos]} + + "' in composite value where comma was expected: " + std::data(input)}; + } + else + { + if (input[pos] == ',') + throw conversion_error{ + "Composite value contained more fields than the expected " + + to_string(last_field) + ": " + std::data(input)}; + if (input[pos] != ')' and input[pos] != ']') + throw conversion_error{ + "Composite value has unexpected characters where closing parenthesis " + "was expected: " + + std::string{input}}; + if (next != std::size(input)) + throw conversion_error{ + "Composite value has unexpected text after closing parenthesis: " + + std::string{input}}; + } + + pos = next; + ++index; +} + + +/// Conservatively estimate buffer size needed for a composite field. +template +inline std::size_t size_composite_field_buffer(T const &field) +{ + if constexpr (is_unquoted_safe) + { + // Safe to copy, without quotes or escaping. Drop the terminating zero. + return size_buffer(field) - 1; + } + else + { + // + Opening quote. + // + Field budget. + // - Terminating zero. + // + Escaping for each byte in the field's string representation. + // - Escaping for terminating zero. + // + Closing quote. + return 1 + 2 * (size_buffer(field) - 1) + 1; + } +} + + +template +inline void write_composite_field(char *&pos, char *end, T const &field) +{ + if constexpr (is_unquoted_safe) + { + // No need for quoting or escaping. Convert it straight into its final + // place in the buffer, and "backspace" the trailing zero. + pos = string_traits::into_buf(pos, end, field) - 1; + } + else + { + // The field may need escaping, which means we need an intermediate buffer. + // To avoid allocating that at run time, we use the end of the buffer that + // we have. + auto const budget{size_buffer(field)}; + *pos++ = '"'; + + // Now escape buf into its final position. + for (char const c : string_traits::to_buf(end - budget, end, field)) + { + if ((c == '"') or (c == '\\')) + *pos++ = '\\'; + + *pos++ = c; + } + + *pos++ = '"'; + } + + *pos++ = ','; +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/callgate.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/callgate.hxx new file mode 100644 index 000000000..42f7703e3 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/callgate.hxx @@ -0,0 +1,70 @@ +#ifndef PQXX_H_CALLGATE +#define PQXX_H_CALLGATE + +/* +Here's what a typical gate class definition looks like: + +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE @gateclass@ : callgate<@host@> +{ + friend class @client@; + + @gateclass@(reference x) : super(x) {} + + // Methods here. Use home() to access the host-class object. +}; +} // namespace pqxx::internal::gate +*/ + +namespace pqxx::internal +{ +/// Base class for call gates. +/** + * A call gate defines a limited, private interface on the host class that + * specified client classes can access. + * + * The metaphor works as follows: the gate stands in front of a "home," which + * is really a class, and only lets specific friends in. + * + * To implement a call gate that gives client C access to host H, + * * derive a gate class from callgate; + * * make the gate class a friend of H; + * * make C a friend of the gate class; and + * * implement "stuff C can do with H" as private members in the gate class. + * + * This special kind of "gated" friendship gives C private access to H, but + * only through an expressly limited interface. The gate class can access its + * host object as home(). + * + * Keep gate classes entirely stateless. They should be ultra-lightweight + * wrappers for their host classes, and be optimized away as much as possible + * by the compiler. Once you start adding state, you're on a slippery slope + * away from the pure, clean, limited interface pattern that gate classes are + * meant to implement. + * + * Ideally, all member functions of the gate class should be one-liners passing + * calls straight on to the host class. It can be useful however to break this + * rule temporarily during inter-class refactoring. + */ +template class PQXX_PRIVATE callgate +{ +protected: + /// This class, to keep constructors easy. + using super = callgate; + /// A reference to the host class. Helps keep constructors easy. + using reference = HOME &; + + callgate(reference x) : m_home(x) {} + + /// The home object. The gate class has full "private" access. + reference home() const noexcept { return m_home; } + +private: + reference m_home; +}; +} // namespace pqxx::internal + +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/concat.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/concat.hxx new file mode 100644 index 000000000..cd28bde7c --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/concat.hxx @@ -0,0 +1,45 @@ +#if !defined(PQXX_CONCAT_HXX) +# define PQXX_CONCAT_HXX + +# include +# include + +# include "pqxx/strconv.hxx" + +namespace pqxx::internal +{ +/// Convert item to a string, write it into [here, end). +template +void render_item(TYPE const &item, char *&here, char *end) +{ + here = string_traits::into_buf(here, end, item) - 1; +} + + +// C++20: Support non-random_access_range ranges. +/// Efficiently combine a bunch of items into one big string. +/** Use this as an optimised version of string concatentation. It takes just + * about any type; it will represent each item as a string according to its + * @ref string_traits. + * + * This is a simpler, more specialised version of @ref separated_list for a + * statically known series of items, possibly of different types. + */ +template +[[nodiscard]] inline std::string concat(TYPE... item) +{ + std::string buf; + // Size to accommodate string representations of all inputs, minus their + // terminating zero bytes. + buf.resize(size_buffer(item...)); + + char *const data{buf.data()}; + char *here = data; + char *end = data + std::size(buf); + (render_item(item, here, end), ...); + + buf.resize(static_cast(here - data)); + return buf; +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/conversions.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/conversions.hxx new file mode 100644 index 000000000..1df4fdead --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/conversions.hxx @@ -0,0 +1,1188 @@ +#include +#include +#include +#include +#include +#include + +#if defined(PQXX_HAVE_SPAN) && __has_include() +# include +#endif + +#include +#include +#include + +#include "pqxx/types.hxx" +#include "pqxx/util.hxx" + + +/* Internal helpers for string conversion, and conversion implementations. + * + * Do not include this header directly. The libpqxx headers do it for you. + */ +namespace pqxx::internal +{ +/// Convert a number in [0, 9] to its ASCII digit. +inline constexpr char number_to_digit(int i) noexcept +{ + return static_cast(i + '0'); +} + + +/// Compute numeric value of given textual digit (assuming that it is a digit). +constexpr int digit_to_number(char c) noexcept +{ + return c - '0'; +} + + +/// Summarize buffer overrun. +/** Don't worry about the exact parameter types: the sizes will be reasonably + * small, and nonnegative. + */ +std::string PQXX_LIBEXPORT +state_buffer_overrun(int have_bytes, int need_bytes); + + +template +inline std::string state_buffer_overrun(HAVE have_bytes, NEED need_bytes) +{ + return state_buffer_overrun( + static_cast(have_bytes), static_cast(need_bytes)); +} + + +/// Throw exception for attempt to convert null to given type. +[[noreturn]] PQXX_LIBEXPORT void +throw_null_conversion(std::string const &type); + + +/// Deliberately nonfunctional conversion traits for `char` types. +/** There are no string conversions for `char` and its signed and unsigned + * variants. Such a conversion would be dangerously ambiguous: should we treat + * it as text, or as a small integer? It'd be an open invitation for bugs. + * + * But the error message when you get this wrong is very cryptic. So, we + * derive dummy @ref string_traits implementations from this dummy type, and + * ensure that the compiler disallows their use. The compiler error message + * will at least contain a hint of the root of the problem. + */ +template struct disallowed_ambiguous_char_conversion +{ + static char *into_buf(char *, char *, CHAR_TYPE) = delete; + static constexpr zview + to_buf(char *, char *, CHAR_TYPE const &) noexcept = delete; + + static constexpr std::size_t + size_buffer(CHAR_TYPE const &) noexcept = delete; + static CHAR_TYPE from_string(std::string_view) = delete; +}; + + +template PQXX_LIBEXPORT extern std::string to_string_float(T); + + +/// Generic implementation for into_buf, on top of to_buf. +template +inline char *generic_into_buf(char *begin, char *end, T const &value) +{ + zview const text{string_traits::to_buf(begin, end, value)}; + auto const space{end - begin}; + // Include the trailing zero. + auto const len = std::size(text) + 1; + if (internal::cmp_greater(len, space)) + throw conversion_overrun{ + "Not enough buffer space to insert " + type_name + ". " + + state_buffer_overrun(space, len)}; + std::memmove(begin, text.data(), len); + return begin + len; +} + + +/// String traits for builtin integral types (though not bool). +template struct integral_traits +{ + static PQXX_LIBEXPORT T from_string(std::string_view text); + static PQXX_LIBEXPORT zview to_buf(char *begin, char *end, T const &value); + static PQXX_LIBEXPORT char *into_buf(char *begin, char *end, T const &value); + + static constexpr std::size_t size_buffer(T const &) noexcept + { + /** Includes a sign if needed; the number of base-10 digits which the type + * can reliably represent; the one extra base-10 digit which the type can + * only partially represent; and the terminating zero. + */ + return std::is_signed_v + std::numeric_limits::digits10 + 1 + 1; + } +}; + + +/// String traits for builtin floating-point types. +template struct float_traits +{ + static PQXX_LIBEXPORT T from_string(std::string_view text); + static PQXX_LIBEXPORT zview to_buf(char *begin, char *end, T const &value); + static PQXX_LIBEXPORT char *into_buf(char *begin, char *end, T const &value); + + // Return a nonnegative integral value's number of decimal digits. + static constexpr std::size_t digits10(std::size_t value) noexcept + { + if (value < 10) + return 1; + else + return 1 + digits10(value / 10); + } + + static constexpr std::size_t size_buffer(T const &) noexcept + { + using lims = std::numeric_limits; + // See #328 for a detailed discussion on the maximum number of digits. + // + // In a nutshell: for the big cases, the scientific notation is always + // the shortest one, and therefore the one that to_chars will pick. + // + // So... How long can the scientific notation get? 1 (for sign) + 1 (for + // decimal point) + 1 (for 'e') + 1 (for exponent sign) + max_digits10 + + // max number of digits in the exponent + 1 (terminating zero). + // + // What's the max number of digits in the exponent? It's the max number of + // digits out of the most negative exponent and the most positive one. + // + // The longest positive exponent is easy: 1 + ceil(log10(max_exponent10)). + // (The extra 1 is because 10^n takes up 1 + n digits, not n.) + // + // The longest negative exponent is a bit harder: min_exponent10 gives us + // the smallest power of 10 which a normalised version of T can represent. + // But the smallest denormalised power of 10 that T can represent is + // another max_digits10 powers of 10 below that. + // needs a minus sign. + // + // All this stuff messes with my head a bit because it's on the order of + // log10(log10(n)). It's easy to get the number of logs wrong. + auto const max_pos_exp{digits10(lims::max_exponent10)}; + // Really want std::abs(lims::min_exponent10), but MSVC 2017 apparently has + // problems with std::abs. So we use -lims::min_exponent10 instead. + auto const max_neg_exp{ + digits10(lims::max_digits10 - lims::min_exponent10)}; + return 1 + // Sign. + 1 + // Decimal point. + std::numeric_limits::max_digits10 + // Mantissa digits. + 1 + // Exponent "e". + 1 + // Exponent sign. + // Spell this weirdly to stop Windows compilers from reading this as + // a call to their "max" macro when NOMINMAX is not defined. + (std::max)(max_pos_exp, max_neg_exp) + // Exponent digits. + 1; // Terminating zero. + } +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/// The built-in arithmetic types do not have inherent null values. +template +struct nullness>> : no_null +{}; + + +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits + : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits + : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::float_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::float_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits : internal::float_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; + + +template<> struct string_traits +{ + static PQXX_LIBEXPORT bool from_string(std::string_view text); + + static constexpr zview to_buf(char *, char *, bool const &value) noexcept + { + return value ? "true"_zv : "false"_zv; + } + + static char *into_buf(char *begin, char *end, bool const &value) + { + return pqxx::internal::generic_into_buf(begin, end, value); + } + + static constexpr std::size_t size_buffer(bool const &) noexcept { return 6; } +}; + + +/// We don't support conversion to/from `char` types. +/** Why are these disallowed? Because they are ambiguous. It's not inherently + * clear whether we should treat values of these types as text or as small + * integers. Either choice would lead to bugs. + */ +template<> +struct string_traits + : internal::disallowed_ambiguous_char_conversion +{}; + +/// We don't support conversion to/from `char` types. +/** Why are these disallowed? Because they are ambiguous. It's not inherently + * clear whether we should treat values of these types as text or as small + * integers. Either choice would lead to bugs. + */ +template<> +struct string_traits + : internal::disallowed_ambiguous_char_conversion +{}; + +/// We don't support conversion to/from `char` types. +/** Why are these disallowed? Because they are ambiguous. It's not inherently + * clear whether we should treat values of these types as text or as small + * integers. Either choice would lead to bugs. + */ +template<> +struct string_traits + : internal::disallowed_ambiguous_char_conversion +{}; + + +template<> inline constexpr bool is_unquoted_safe{true}; + + +template struct nullness> +{ + static constexpr bool has_null = true; + /// Technically, you could have an optional of an always-null type. + static constexpr bool always_null = nullness::always_null; + static constexpr bool is_null(std::optional const &v) noexcept + { + return ((not v.has_value()) or pqxx::is_null(*v)); + } + static constexpr std::optional null() { return {}; } +}; + + +template +inline constexpr format param_format(std::optional const &value) +{ + return param_format(*value); +} + + +template struct string_traits> +{ + static char *into_buf(char *begin, char *end, std::optional const &value) + { + return string_traits::into_buf(begin, end, *value); + } + + static zview to_buf(char *begin, char *end, std::optional const &value) + { + if (value.has_value()) + return string_traits::to_buf(begin, end, *value); + else + return {}; + } + + static std::optional from_string(std::string_view text) + { + return std::optional{ + std::in_place, string_traits::from_string(text)}; + } + + static std::size_t size_buffer(std::optional const &value) noexcept + { + return pqxx::size_buffer(value.value()); + } +}; + + +template +inline constexpr bool is_unquoted_safe>{is_unquoted_safe}; + + +template struct nullness> +{ + static constexpr bool has_null = (nullness::has_null or ...); + static constexpr bool always_null = (nullness::always_null and ...); + static constexpr bool is_null(std::variant const &value) noexcept + { + return std::visit( + [](auto const &i) noexcept { + return nullness>::is_null(i); + }, + value); + } + + // We don't support `null()` for `std::variant`. + /** It would be technically possible to have a `null` in the case where just + * one of the types has a null, but it gets complicated and arbitrary. + */ + static constexpr std::variant null() = delete; +}; + + +template struct string_traits> +{ + static char * + into_buf(char *begin, char *end, std::variant const &value) + { + return std::visit( + [begin, end](auto const &i) { + return string_traits>::into_buf(begin, end, i); + }, + value); + } + static zview to_buf(char *begin, char *end, std::variant const &value) + { + return std::visit( + [begin, end](auto const &i) { + return string_traits>::to_buf(begin, end, i); + }, + value); + } + static std::size_t size_buffer(std::variant const &value) noexcept + { + return std::visit( + [](auto const &i) noexcept { return pqxx::size_buffer(i); }, value); + } + + /** There's no from_string for std::variant. We could have one with a rule + * like "pick the first type which fits the value," but we'd have to look + * into how natural that API feels to users. + */ + static std::variant from_string(std::string_view) = delete; +}; + + +template +inline constexpr format param_format(std::variant const &value) +{ + return std::visit([](auto &v) { return param_format(v); }, value); +} + + +template +inline constexpr bool is_unquoted_safe>{ + (is_unquoted_safe and ...)}; + + +template inline T from_string(std::stringstream const &text) +{ + return from_string(text.str()); +} + + +template<> struct string_traits +{ + static char *into_buf(char *, char *, std::nullptr_t) = delete; + + static constexpr zview + to_buf(char *, char *, std::nullptr_t const &) noexcept + { + return {}; + } + + static constexpr std::size_t size_buffer(std::nullptr_t = nullptr) noexcept + { + return 0; + } + static std::nullptr_t from_string(std::string_view) = delete; +}; + + +template<> struct string_traits +{ + static char *into_buf(char *, char *, std::nullopt_t) = delete; + + static constexpr zview + to_buf(char *, char *, std::nullopt_t const &) noexcept + { + return {}; + } + + static constexpr std::size_t size_buffer(std::nullopt_t) noexcept + { + return 0; + } + static std::nullopt_t from_string(std::string_view) = delete; +}; + + +template<> struct string_traits +{ + static char *into_buf(char *, char *, std::monostate) = delete; + + static constexpr zview + to_buf(char *, char *, std::monostate const &) noexcept + { + return {}; + } + + static constexpr std::size_t size_buffer(std::monostate) noexcept + { + return 0; + } + static std::monostate from_string(std::string_view) = delete; +}; + + +template<> inline constexpr bool is_unquoted_safe{true}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(char const *t) noexcept + { + return t == nullptr; + } + static constexpr char const *null() noexcept { return nullptr; } +}; + + +/// String traits for C-style string ("pointer to char const"). +template<> struct string_traits +{ + static char const *from_string(std::string_view text) { return text.data(); } + + static zview to_buf(char *begin, char *end, char const *const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, char const *const &value) + { + auto const space{end - begin}; + // Count the trailing zero, even though std::strlen() and friends don't. + auto const len{std::strlen(value) + 1}; + if (space < ptrdiff_t(len)) + throw conversion_overrun{ + "Could not copy string: buffer too small. " + + pqxx::internal::state_buffer_overrun(space, len)}; + std::memmove(begin, value, len); + return begin + len; + } + + static std::size_t size_buffer(char const *const &value) noexcept + { + return std::strlen(value) + 1; + } +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(char const *t) noexcept + { + return t == nullptr; + } + static constexpr char const *null() { return nullptr; } +}; + + +/// String traits for non-const C-style string ("pointer to char"). +template<> struct string_traits +{ + static char *into_buf(char *begin, char *end, char *const &value) + { + return string_traits::into_buf(begin, end, value); + } + static zview to_buf(char *begin, char *end, char *const &value) + { + return string_traits::to_buf(begin, end, value); + } + static std::size_t size_buffer(char *const &value) noexcept + { + return string_traits::size_buffer(value); + } + + /// Don't allow conversion to this type since it breaks const-safety. + static char *from_string(std::string_view) = delete; +}; + + +template struct nullness : no_null +{}; + + +/// String traits for C-style string constant ("array of char"). +/** @warning This assumes that every array-of-char is a C-style string literal. + * So, it must include a trailing zero. and it must have static duration. + */ +template struct string_traits +{ + static constexpr zview + to_buf(char *, char *, char const (&value)[N]) noexcept + { + return zview{value, N - 1}; + } + + static char *into_buf(char *begin, char *end, char const (&value)[N]) + { + if (internal::cmp_less(end - begin, size_buffer(value))) + throw conversion_overrun{ + "Could not convert char[] to string: too long for buffer."}; + std::memcpy(begin, value, N); + return begin + N; + } + static constexpr std::size_t size_buffer(char const (&)[N]) noexcept + { + return N; + } + + /// Don't allow conversion to this type. + static void from_string(std::string_view) = delete; +}; + + +template<> struct nullness : no_null +{}; + + +template<> struct string_traits +{ + static std::string from_string(std::string_view text) + { + return std::string{text}; + } + + static char *into_buf(char *begin, char *end, std::string const &value) + { + if (internal::cmp_greater_equal(std::size(value), end - begin)) + throw conversion_overrun{ + "Could not convert string to string: too long for buffer."}; + // Include the trailing zero. + value.copy(begin, std::size(value)); + begin[std::size(value)] = '\0'; + return begin + std::size(value) + 1; + } + + static zview to_buf(char *begin, char *end, std::string const &value) + { + return generic_to_buf(begin, end, value); + } + + static std::size_t size_buffer(std::string const &value) noexcept + { + return std::size(value) + 1; + } +}; + + +/// There's no real null for `std::string_view`. +/** I'm not sure how clear-cut this is: a `string_view` may have a null + * data pointer, which is analogous to a null `char` pointer. + */ +template<> struct nullness : no_null +{}; + + +/// String traits for `string_view`. +template<> struct string_traits +{ + static constexpr std::size_t + size_buffer(std::string_view const &value) noexcept + { + return std::size(value) + 1; + } + + static char *into_buf(char *begin, char *end, std::string_view const &value) + { + if (internal::cmp_greater_equal(std::size(value), end - begin)) + throw conversion_overrun{ + "Could not store string_view: too long for buffer."}; + value.copy(begin, std::size(value)); + begin[std::size(value)] = '\0'; + return begin + std::size(value) + 1; + } + + /// Don't convert to this type; it has nowhere to store its contents. + static std::string_view from_string(std::string_view) = delete; +}; + + +template<> struct nullness : no_null +{}; + + +/// String traits for `zview`. +template<> struct string_traits +{ + static constexpr std::size_t + size_buffer(std::string_view const &value) noexcept + { + return std::size(value) + 1; + } + + static char *into_buf(char *begin, char *end, zview const &value) + { + auto const size{std::size(value)}; + if (internal::cmp_less_equal(end - begin, std::size(value))) + throw conversion_overrun{"Not enough buffer space to store this zview."}; + value.copy(begin, size); + begin[size] = '\0'; + return begin + size + 1; + } + + static std::string_view to_buf(char *begin, char *end, zview const &value) + { + return {into_buf(begin, end, value), std::size(value)}; + } + + /// Don't convert to this type; it has nowhere to store its contents. + static zview from_string(std::string_view) = delete; +}; + + +template<> struct nullness : no_null +{}; + + +template<> struct string_traits +{ + static std::size_t size_buffer(std::stringstream const &) = delete; + + static std::stringstream from_string(std::string_view text) + { + std::stringstream stream; + stream.write(text.data(), std::streamsize(std::size(text))); + return stream; + } + + static char *into_buf(char *, char *, std::stringstream const &) = delete; + static std::string_view + to_buf(char *, char *, std::stringstream const &) = delete; +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = true; + static constexpr bool is_null(std::nullptr_t const &) noexcept + { + return true; + } + static constexpr std::nullptr_t null() noexcept { return nullptr; } +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = true; + static constexpr bool is_null(std::nullopt_t const &) noexcept + { + return true; + } + static constexpr std::nullopt_t null() noexcept { return std::nullopt; } +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = true; + static constexpr bool is_null(std::monostate const &) noexcept + { + return true; + } + static constexpr std::monostate null() noexcept { return {}; } +}; + + +template struct nullness> +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(std::unique_ptr const &t) noexcept + { + return not t or pqxx::is_null(*t); + } + static constexpr std::unique_ptr null() { return {}; } +}; + + +template +struct string_traits> +{ + static std::unique_ptr from_string(std::string_view text) + { + return std::make_unique(string_traits::from_string(text)); + } + + static char * + into_buf(char *begin, char *end, std::unique_ptr const &value) + { + return string_traits::into_buf(begin, end, *value); + } + + static zview + to_buf(char *begin, char *end, std::unique_ptr const &value) + { + if (value) + return string_traits::to_buf(begin, end, *value); + else + return {}; + } + + static std::size_t + size_buffer(std::unique_ptr const &value) noexcept + { + return pqxx::size_buffer(*value.get()); + } +}; + + +template +inline format param_format(std::unique_ptr const &value) +{ + return param_format(*value); +} + + +template +inline constexpr bool is_unquoted_safe>{ + is_unquoted_safe}; + + +template struct nullness> +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(std::shared_ptr const &t) noexcept + { + return not t or pqxx::is_null(*t); + } + static constexpr std::shared_ptr null() { return {}; } +}; + + +template struct string_traits> +{ + static std::shared_ptr from_string(std::string_view text) + { + return std::make_shared(string_traits::from_string(text)); + } + + static zview to_buf(char *begin, char *end, std::shared_ptr const &value) + { + return string_traits::to_buf(begin, end, *value); + } + static char * + into_buf(char *begin, char *end, std::shared_ptr const &value) + { + return string_traits::into_buf(begin, end, *value); + } + static std::size_t size_buffer(std::shared_ptr const &value) noexcept + { + return pqxx::size_buffer(*value); + } +}; + + +template format param_format(std::shared_ptr const &value) +{ + return param_format(*value); +} + + +template +inline constexpr bool is_unquoted_safe>{ + is_unquoted_safe}; + + +template<> +struct nullness> + : no_null> +{}; + + +#if defined(PQXX_HAVE_CONCEPTS) +template struct nullness : no_null +{}; + + +template inline constexpr format param_format(DATA const &) +{ + return format::binary; +} + + +template struct string_traits +{ + static std::size_t size_buffer(DATA const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview to_buf(char *begin, char *end, DATA const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, DATA const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + internal::esc_bin(value, begin); + return begin + budget; + } + + static DATA from_string(std::string_view text) + { + auto const size{pqxx::internal::size_unesc_bin(std::size(text))}; + std::basic_string buf; + buf.resize(size); + pqxx::internal::unesc_bin(text, reinterpret_cast(buf.data())); + return buf; + } +}; +#endif // PQXX_HAVE_CONCEPTS + + +template<> struct string_traits> +{ + static std::size_t + size_buffer(std::basic_string const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview + to_buf(char *begin, char *end, std::basic_string const &value) + { + return generic_to_buf(begin, end, value); + } + + static char * + into_buf(char *begin, char *end, std::basic_string const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + internal::esc_bin(value, begin); + return begin + budget; + } + + static std::basic_string from_string(std::string_view text) + { + auto const size{pqxx::internal::size_unesc_bin(std::size(text))}; + std::basic_string buf; + buf.resize(size); + pqxx::internal::unesc_bin(text, reinterpret_cast(buf.data())); + return buf; + } +}; + + +template<> +inline constexpr format param_format(std::basic_string const &) +{ + return format::binary; +} + + +template<> +struct nullness> + : no_null> +{}; + + +template<> struct string_traits> +{ + static std::size_t + size_buffer(std::basic_string_view const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview to_buf( + char *begin, char *end, std::basic_string_view const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf( + char *begin, char *end, std::basic_string_view const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + internal::esc_bin(value, begin); + return begin + budget; + } + + // There's no from_string, because there's nobody to hold the data. +}; + +template<> +inline constexpr format param_format(std::basic_string_view const &) +{ + return format::binary; +} +} // namespace pqxx + + +namespace pqxx::internal +{ +/// String traits for SQL arrays. +template struct array_string_traits +{ +private: + using elt_type = strip_t>; + using elt_traits = string_traits; + static constexpr zview s_null{"NULL"}; + +public: + static zview to_buf(char *begin, char *end, Container const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, Container const &value) + { + std::size_t const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to convert array to string."}; + + char *here = begin; + *here++ = '{'; + + bool nonempty{false}; + for (auto const &elt : value) + { + if (is_null(elt)) + { + s_null.copy(here, std::size(s_null)); + here += std::size(s_null); + } + else if constexpr (is_sql_array) + { + // Render nested array in-place. Then erase the trailing zero. + here = elt_traits::into_buf(here, end, elt) - 1; + } + else if constexpr (is_unquoted_safe) + { + // No need to quote or escape. Just convert the value straight into + // its place in the array, and "backspace" the trailing zero. + here = elt_traits::into_buf(here, end, elt) - 1; + } + else + { + *here++ = '"'; + + // Use the tail end of the destination buffer as an intermediate + // buffer. + auto const elt_budget{pqxx::size_buffer(elt)}; + for (char const c : elt_traits::to_buf(end - elt_budget, end, elt)) + { + if (c == '\\' or c == '"') + *here++ = '\\'; + *here++ = c; + } + *here++ = '"'; + } + *here++ = array_separator; + nonempty = true; + } + + // Erase that last comma, if present. + if (nonempty) + here--; + + *here++ = '}'; + *here++ = '\0'; + + return here; + } + + static std::size_t size_buffer(Container const &value) noexcept + { + if constexpr (is_unquoted_safe) + return 3 + std::accumulate( + std::begin(value), std::end(value), std::size_t{}, + [](std::size_t acc, elt_type const &elt) { + return acc + + (pqxx::is_null(elt) ? + std::size(s_null) : + elt_traits::size_buffer(elt)) - + 1; + }); + else + return 3 + std::accumulate( + std::begin(value), std::end(value), std::size_t{}, + [](std::size_t acc, elt_type const &elt) { + // Opening and closing quotes, plus worst-case escaping, + // but don't count the trailing zeroes. + std::size_t const elt_size{ + pqxx::is_null(elt) ? std::size(s_null) : + elt_traits::size_buffer(elt) - 1}; + return acc + 2 * elt_size + 2; + }); + } + + // We don't yet support parsing of array types using from_string. Doing so + // would require a reference to the connection. +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +template +struct nullness> : no_null> +{}; + + +template +struct string_traits> + : internal::array_string_traits> +{}; + + +/// We don't know how to pass array params in binary format, so pass as text. +template +inline constexpr format param_format(std::vector const &) +{ + return format::text; +} + + +/// A `std::vector` is a binary string. Other vectors are not. +template +inline constexpr format param_format(std::vector const &) +{ + return format::binary; +} + + +template inline constexpr bool is_sql_array>{true}; + + +template +struct nullness> : no_null> +{}; + + +template +struct string_traits> + : internal::array_string_traits> +{}; + + +/// We don't know how to pass array params in binary format, so pass as text. +template +inline constexpr format param_format(std::array const &) +{ + return format::text; +} + + +/// An array of `std::byte` is a binary string. +template +inline constexpr format param_format(std::array const &) +{ + return format::binary; +} + + +template +inline constexpr bool is_sql_array>{true}; +} // namespace pqxx + + +namespace pqxx +{ +template inline std::string to_string(T const &value) +{ + if (is_null(value)) + throw conversion_error{ + "Attempt to convert null " + type_name + " to a string."}; + + std::string buf; + // We can't just reserve() space; modifying the terminating zero leads to + // undefined behaviour. + buf.resize(size_buffer(value)); + auto const data{buf.data()}; + auto const end{ + string_traits::into_buf(data, data + std::size(buf), value)}; + buf.resize(static_cast(end - data - 1)); + return buf; +} + + +template<> inline std::string to_string(float const &value) +{ + return internal::to_string_float(value); +} +template<> inline std::string to_string(double const &value) +{ + return internal::to_string_float(value); +} +template<> inline std::string to_string(long double const &value) +{ + return internal::to_string_float(value); +} +template<> inline std::string to_string(std::stringstream const &value) +{ + return value.str(); +} + + +template inline void into_string(T const &value, std::string &out) +{ + if (is_null(value)) + throw conversion_error{ + "Attempt to convert null " + type_name + " to a string."}; + + // We can't just reserve() data; modifying the terminating zero leads to + // undefined behaviour. + out.resize(size_buffer(value) + 1); + auto const data{out.data()}; + auto const end{ + string_traits::into_buf(data, data + std::size(out), value)}; + out.resize(static_cast(end - data - 1)); +} +} // namespace pqxx diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/encoding_group.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/encoding_group.hxx new file mode 100644 index 000000000..e17736e5b --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/encoding_group.hxx @@ -0,0 +1,60 @@ +/** Enum type for supporting encodings in libpqxx + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ENCODING_GROUP +#define PQXX_H_ENCODING_GROUP + +#include + +namespace pqxx::internal +{ +// Types of encodings supported by PostgreSQL, see +// https://www.postgresql.org/docs/current/static/multibyte.html#CHARSET-TABLE +enum class encoding_group +{ + // Handles all single-byte fixed-width encodings + MONOBYTE, + + // Multibyte encodings. + // Many of these can embed ASCII-like bytes inside multibyte characters, + // notably Big5, SJIS, SHIFT_JIS_2004, GP18030, GBK, JOHAB, UHC. + BIG5, + EUC_CN, + // TODO: Merge EUC_JP and EUC_JIS_2004? + EUC_JP, + EUC_JIS_2004, + EUC_KR, + EUC_TW, + GB18030, + GBK, + JOHAB, + MULE_INTERNAL, + // TODO: Merge SJIS and SHIFT_JIS_2004? + SJIS, + SHIFT_JIS_2004, + UHC, + UTF8, +}; + + +// TODO:: Can we just use string_view now? +/// Function type: "find the end of the current glyph." +/** This type of function takes a text buffer, and a location in that buffer, + * and returns the location one byte past the end of the current glyph. + * + * The start offset marks the beginning of the current glyph. It must fall + * within the buffer. + * + * There are multiple different glyph scanner implementations, for different + * kinds of encodings. + */ +using glyph_scanner_func = + std::size_t(char const buffer[], std::size_t buffer_len, std::size_t start); +} // namespace pqxx::internal + +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/encodings.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/encodings.hxx new file mode 100644 index 000000000..ba7fecc70 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/encodings.hxx @@ -0,0 +1,90 @@ +/** Internal string encodings support for libpqxx + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ENCODINGS +#define PQXX_H_ENCODINGS + +#include "pqxx/internal/encoding_group.hxx" + +#include +#include + + +namespace pqxx::internal +{ +char const *name_encoding(int encoding_id); + +/// Convert libpq encoding enum or encoding name to its libpqxx group. +encoding_group enc_group(int /* libpq encoding ID */); +encoding_group enc_group(std::string_view); + + +/// Look up the glyph scanner function for a given encoding group. +/** To identify the glyph boundaries in a buffer, call this to obtain the + * scanner function appropriate for the buffer's encoding. Then, repeatedly + * call the scanner function to find the glyphs. + */ +PQXX_LIBEXPORT glyph_scanner_func *get_glyph_scanner(encoding_group); + + +// TODO: For ASCII search, treat UTF8/EUC_*/MULE_INTERNAL as MONOBYTE. + +/// Find any of the ASCII characters `NEEDLE` in `haystack`. +/** Scans through `haystack` until it finds a single-byte character that + * matches any value in `NEEDLE`. + * + * If it finds one, returns its offset. If not, returns the end of the + * haystack. + */ +template +inline std::size_t find_char( + glyph_scanner_func *scanner, std::string_view haystack, + std::size_t here = 0u) +{ + auto const sz{std::size(haystack)}; + auto const data{std::data(haystack)}; + while (here < sz) + { + auto next{scanner(data, sz, here)}; + // (For some reason gcc had a problem with a right-fold here. But clang + // was fine.) + if ((... or (data[here] == NEEDLE))) + { + // Also check against a multibyte character starting with a bytes which + // just happens to match one of the ASCII bytes we're looking for. It'd + // be cleaner to check that first, but either works. So, let's apply the + // most selective filter first and skip this check in almost all cases. + if (next == here + 1) + return here; + } + + // Nope, no hit. Move on. + here = next; + } + return sz; +} + + +/// Iterate over the glyphs in a buffer. +/** Scans the glyphs in the buffer, and for each, passes its begin and its + * one-past-end pointers to `callback`. + */ +template +inline void for_glyphs( + encoding_group enc, CALLABLE callback, char const buffer[], + std::size_t buffer_len, std::size_t start = 0) +{ + auto const scan{get_glyph_scanner(enc)}; + for (std::size_t here = start, next; here < buffer_len; here = next) + { + next = scan(buffer, buffer_len, here); + callback(buffer + here, buffer + next); + } +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-errorhandler.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-errorhandler.hxx new file mode 100644 index 000000000..ffc12a6cf --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-errorhandler.hxx @@ -0,0 +1,26 @@ +#include + +namespace pqxx +{ +class connection; +class errorhandler; +} // namespace pqxx + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_errorhandler : callgate +{ + friend class pqxx::errorhandler; + + connection_errorhandler(reference x) : super(x) {} + + void register_errorhandler(errorhandler *h) + { + home().register_errorhandler(h); + } + void unregister_errorhandler(errorhandler *h) + { + home().unregister_errorhandler(h); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-largeobject.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-largeobject.hxx new file mode 100644 index 000000000..49feaf9e6 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-largeobject.hxx @@ -0,0 +1,35 @@ +#include + +#include +#include + +namespace pqxx +{ +class blob; +class largeobject; +} // namespace pqxx + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_largeobject : callgate +{ + friend class pqxx::blob; + friend class pqxx::largeobject; + + connection_largeobject(reference x) : super(x) {} + + pq::PGconn *raw_connection() const { return home().raw_connection(); } +}; + + +class PQXX_PRIVATE const_connection_largeobject : callgate +{ + friend class pqxx::blob; + friend class pqxx::largeobject; + + const_connection_largeobject(reference x) : super(x) {} + + std::string error_message() const { return home().err_msg(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-notification_receiver.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-notification_receiver.hxx new file mode 100644 index 000000000..0bcb2db17 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-notification_receiver.hxx @@ -0,0 +1,29 @@ +#include + +#include "pqxx/connection.hxx" + + +namespace pqxx +{ +class notification_receiver; +} + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_notification_receiver : callgate +{ + friend class pqxx::notification_receiver; + + connection_notification_receiver(reference x) : super(x) {} + + void add_receiver(notification_receiver *receiver) + { + home().add_receiver(receiver); + } + void remove_receiver(notification_receiver *receiver) noexcept + { + home().remove_receiver(receiver); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-pipeline.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-pipeline.hxx new file mode 100644 index 000000000..c6ae6e17a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-pipeline.hxx @@ -0,0 +1,23 @@ +#include "pqxx/internal/libpq-forward.hxx" +#include + +#include "pqxx/pipeline.hxx" + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_pipeline : callgate +{ + friend class pqxx::pipeline; + + connection_pipeline(reference x) : super(x) {} + + void start_exec(char const query[]) { home().start_exec(query); } + pqxx::internal::pq::PGresult *get_result() { return home().get_result(); } + void cancel_query() { home().cancel_query(); } + + bool consume_input() noexcept { return home().consume_input(); } + bool is_busy() const noexcept { return home().is_busy(); } + + int encoding_id() { return home().encoding_id(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-sql_cursor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-sql_cursor.hxx new file mode 100644 index 000000000..51a889844 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-sql_cursor.hxx @@ -0,0 +1,19 @@ +#include + +namespace pqxx::internal +{ +class sql_cursor; +} + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_sql_cursor : callgate +{ + friend class pqxx::internal::sql_cursor; + + connection_sql_cursor(reference x) : super(x) {} + + result exec(char const query[]) { return home().exec(query); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-stream_from.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-stream_from.hxx new file mode 100644 index 000000000..8961e7146 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-stream_from.hxx @@ -0,0 +1,15 @@ +#include + +#include "pqxx/connection.hxx" + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_stream_from : callgate +{ + friend class pqxx::stream_from; + + connection_stream_from(reference x) : super{x} {} + + auto read_copy_line() { return home().read_copy_line(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-stream_to.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-stream_to.hxx new file mode 100644 index 000000000..a6974fb21 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-stream_to.hxx @@ -0,0 +1,17 @@ +#include + +#include "pqxx/stream_to.hxx" + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_stream_to : callgate +{ + friend class pqxx::stream_to; + + connection_stream_to(reference x) : super(x) {} + + void write_copy_line(std::string_view line) { home().write_copy_line(line); } + void end_copy_write() { home().end_copy_write(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-transaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-transaction.hxx new file mode 100644 index 000000000..74d659253 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/connection-transaction.hxx @@ -0,0 +1,44 @@ +#include + +namespace pqxx +{ +class connection; +} + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_transaction : callgate +{ + friend class pqxx::transaction_base; + + connection_transaction(reference x) : super(x) {} + + template result exec(STRING query, std::string_view desc) + { + return home().exec(query, desc); + } + + void register_transaction(transaction_base *t) + { + home().register_transaction(t); + } + void unregister_transaction(transaction_base *t) noexcept + { + home().unregister_transaction(t); + } + + auto read_copy_line() { return home().read_copy_line(); } + void write_copy_line(std::string_view line) { home().write_copy_line(line); } + void end_copy_write() { home().end_copy_write(); } + + result exec_prepared(zview statement, internal::c_params const &args) + { + return home().exec_prepared(statement, args); + } + + result exec_params(zview query, internal::c_params const &args) + { + return home().exec_params(query, args); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/errorhandler-connection.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/errorhandler-connection.hxx new file mode 100644 index 000000000..5560cedec --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/errorhandler-connection.hxx @@ -0,0 +1,13 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE errorhandler_connection : callgate +{ + friend class pqxx::connection; + + errorhandler_connection(reference x) : super(x) {} + + void unregister() noexcept { home().unregister(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/icursor_iterator-icursorstream.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/icursor_iterator-icursorstream.hxx new file mode 100644 index 000000000..296d22145 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/icursor_iterator-icursorstream.hxx @@ -0,0 +1,24 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE icursor_iterator_icursorstream : callgate +{ + friend class pqxx::icursorstream; + + icursor_iterator_icursorstream(reference x) : super(x) {} + + icursor_iterator::difference_type pos() const noexcept + { + return home().pos(); + } + + icursor_iterator *get_prev() { return home().m_prev; } + void set_prev(icursor_iterator *i) { home().m_prev = i; } + + icursor_iterator *get_next() { return home().m_next; } + void set_next(icursor_iterator *i) { home().m_next = i; } + + void fill(result const &r) { home().fill(r); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/icursorstream-icursor_iterator.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/icursorstream-icursor_iterator.hxx new file mode 100644 index 000000000..56056d5ef --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/icursorstream-icursor_iterator.hxx @@ -0,0 +1,32 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE icursorstream_icursor_iterator : callgate +{ + friend class pqxx::icursor_iterator; + + icursorstream_icursor_iterator(reference x) : super(x) {} + + void insert_iterator(icursor_iterator *i) noexcept + { + home().insert_iterator(i); + } + + void remove_iterator(icursor_iterator *i) const noexcept + { + home().remove_iterator(i); + } + + icursorstream::size_type forward() { return home().forward(); } + icursorstream::size_type forward(icursorstream::size_type n) + { + return home().forward(n); + } + + void service_iterators(icursorstream::difference_type p) + { + home().service_iterators(p); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-connection.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-connection.hxx new file mode 100644 index 000000000..daa0808c0 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-connection.hxx @@ -0,0 +1,14 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_connection : callgate +{ + friend class pqxx::connection; + + result_connection(reference x) : super(x) {} + + operator bool() const { return bool(home()); } + bool operator!() const { return not home(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-creation.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-creation.hxx new file mode 100644 index 000000000..3d9205f2c --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-creation.hxx @@ -0,0 +1,24 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_creation : callgate +{ + friend class pqxx::connection; + friend class pqxx::pipeline; + + result_creation(reference x) : super(x) {} + + static result create( + internal::pq::PGresult *rhs, std::shared_ptr const &query, + encoding_group enc) + { + return result(rhs, query, enc); + } + + void check_status(std::string_view desc = ""sv) const + { + return home().check_status(desc); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-pipeline.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-pipeline.hxx new file mode 100644 index 000000000..3ebe436d2 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-pipeline.hxx @@ -0,0 +1,16 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_pipeline : callgate +{ + friend class pqxx::pipeline; + + result_pipeline(reference x) : super(x) {} + + std::shared_ptr query_ptr() const + { + return home().query_ptr(); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-sql_cursor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-sql_cursor.hxx new file mode 100644 index 000000000..78b450739 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/result-sql_cursor.hxx @@ -0,0 +1,13 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_sql_cursor : callgate +{ + friend class pqxx::internal::sql_cursor; + + result_sql_cursor(reference x) : super(x) {} + + char const *cmd_status() const noexcept { return home().cmd_status(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/transaction-sql_cursor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/transaction-sql_cursor.hxx new file mode 100644 index 000000000..4ed78dc93 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/transaction-sql_cursor.hxx @@ -0,0 +1,10 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE transaction_sql_cursor : callgate +{ + friend class pqxx::internal::sql_cursor; + transaction_sql_cursor(reference x) : super(x) {} +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/transaction-transaction_focus.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/transaction-transaction_focus.hxx new file mode 100644 index 000000000..ca7939a99 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/gates/transaction-transaction_focus.hxx @@ -0,0 +1,30 @@ +#include + +#include "pqxx/transaction_base.hxx" + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE transaction_transaction_focus : callgate +{ + friend class pqxx::transaction_focus; + + transaction_transaction_focus(reference x) : super(x) {} + + void register_focus(transaction_focus *focus) + { + home().register_focus(focus); + } + void unregister_focus(transaction_focus *focus) noexcept + { + home().unregister_focus(focus); + } + void register_pending_error(zview error) + { + home().register_pending_error(error); + } + void register_pending_error(std::string &&error) + { + home().register_pending_error(std::move(error)); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/header-post.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/header-post.hxx new file mode 100644 index 000000000..ff6bf8986 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/header-post.hxx @@ -0,0 +1,22 @@ +/* Compiler deficiency workarounds for compiling libpqxx headers. + * + * To be included at the end of each libpqxx header, in order to restore the + * client program's settings. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +// NO GUARDS HERE! This code should be executed every time! + +#if defined(_MSC_VER) +# pragma warning(pop) // Restore compiler's warning state +#endif + +#if !defined(PQXX_HEADER_PRE) +# error "Include pqxx/internal/header-post.hxx AFTER its 'pre' counterpart." +#endif + +#undef PQXX_HEADER_PRE diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/header-pre.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/header-pre.hxx new file mode 100644 index 000000000..abc1a398d --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/header-pre.hxx @@ -0,0 +1,169 @@ +/* Compiler settings for compiling libpqxx headers, and workarounds for all. + * + * Include this before including any other libpqxx headers from within libpqxx. + * And to balance it out, also include header-post.hxx at the end of the batch + * of headers. + * + * The public libpqxx headers (e.g. ``) include this already; + * there's no need to do this from within an application. + * + * Include this file at the highest aggregation level possible to avoid nesting + * and to keep things simple. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ + +// NO GUARD HERE! This part should be included every time this file is. +#if defined(_MSC_VER) + +// Save compiler's warning state, and set warning level 4 for maximum +// sensitivity to warnings. +# pragma warning(push, 4) + +// Visual C++ generates some entirely unreasonable warnings. Disable them. +// Copy constructor could not be generated. +# pragma warning(disable : 4511) +// Assignment operator could not be generated. +# pragma warning(disable : 4512) +// Can't expose outside classes without exporting them. Except the MSVC docs +// say please ignore the warning if it's a standard library class. +# pragma warning(disable : 4251) +// Can't derive library classes from outside classes without exporting them. +// Except the MSVC docs say please ignore the warning if the parent class is +// in the standard library. +# pragma warning(disable : 4275) +// Can't inherit from non-exported class. +# pragma warning(disable : 4275) + +#endif // _MSC_VER + + +#if defined(PQXX_HEADER_PRE) +# error "Avoid nesting #include of pqxx/internal/header-pre.hxx." +#endif + +#define PQXX_HEADER_PRE + + +// Workarounds & definitions that need to be included even in library's headers +#include "pqxx/config-public-compiler.h" + +// Enable ISO-646 alternative operaotr representations: "and" instead of "&&" +// etc. on older compilers. C++20 removes this header. +#if __has_include() +# include +#endif + + +#if defined(PQXX_HAVE_GCC_PURE) +/// Declare function "pure": no side effects, only reads globals and its args. +# define PQXX_PURE __attribute__((pure)) +#else +# define PQXX_PURE /* pure */ +#endif + + +#if defined(__GNUC__) +/// Tell the compiler to optimise a function for size, not speed. +# define PQXX_COLD __attribute__((cold)) +#else +# define PQXX_COLD /* cold */ +#endif + + +// Workarounds for Windows +#ifdef _WIN32 + +/* For now, export DLL symbols if _DLL is defined. This is done automatically + * by the compiler when linking to the dynamic version of the runtime library, + * according to "gzh" + */ +# if defined(PQXX_SHARED) && !defined(PQXX_LIBEXPORT) +# define PQXX_LIBEXPORT __declspec(dllimport) +# endif // PQXX_SHARED && !PQXX_LIBEXPORT + + +// Workarounds for Microsoft Visual C++ +# ifdef _MSC_VER + +// Suppress vtables on abstract classes. +# define PQXX_NOVTABLE __declspec(novtable) + +// Automatically link with the appropriate libpq (static or dynamic, debug or +// release). The default is to use the release DLL. Define PQXX_PQ_STATIC to +// link to a static version of libpq, and _DEBUG to link to a debug version. +// The two may be combined. +# if defined(PQXX_AUTOLINK) +# if defined(PQXX_PQ_STATIC) +# ifdef _DEBUG +# pragma comment(lib, "libpqd") +# else +# pragma comment(lib, "libpq") +# endif +# else +# ifdef _DEBUG +# pragma comment(lib, "libpqddll") +# else +# pragma comment(lib, "libpqdll") +# endif +# endif +# endif + +// If we're not compiling libpqxx itself, automatically link with the +// appropriate libpqxx library. To link with the libpqxx DLL, define +// PQXX_SHARED; the default is to link with the static library. A static link +// is the recommended practice. +// +// The preprocessor macro PQXX_INTERNAL is used to detect whether we +// are compiling the libpqxx library itself. When you compile the library +// yourself using your own project file, make sure to include this macro. +# if defined(PQXX_AUTOLINK) && !defined(PQXX_INTERNAL) +# ifdef PQXX_SHARED +# ifdef _DEBUG +# pragma comment(lib, "libpqxxD") +# else +# pragma comment(lib, "libpqxx") +# endif +# else // !PQXX_SHARED +# ifdef _DEBUG +# pragma comment(lib, "libpqxx_staticD") +# else +# pragma comment(lib, "libpqxx_static") +# endif +# endif +# endif + +# endif // _MSC_VER + +#elif defined(PQXX_HAVE_GCC_VISIBILITY) // !_WIN32 + +# define PQXX_LIBEXPORT __attribute__((visibility("default"))) +# define PQXX_PRIVATE __attribute__((visibility("hidden"))) + +#endif // PQXX_HAVE_GCC_VISIBILITY + + +#ifndef PQXX_LIBEXPORT +# define PQXX_LIBEXPORT /* libexport */ +#endif + +#ifndef PQXX_PRIVATE +# define PQXX_PRIVATE /* private */ +#endif + +#ifndef PQXX_NOVTABLE +# define PQXX_NOVTABLE /* novtable */ +#endif + +// C++20: Assume support. +#if defined(PQXX_HAVE_LIKELY) +# define PQXX_LIKELY [[likely]] +# define PQXX_UNLIKELY [[unlikely]] +#else +# define PQXX_LIKELY /* [[likely]] */ +# define PQXX_UNLIKELY /* [[unlikely]] */ +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/ignore-deprecated-post.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/ignore-deprecated-post.hxx new file mode 100644 index 000000000..cebcf0594 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/ignore-deprecated-post.hxx @@ -0,0 +1,15 @@ +/// End a code block started by "ignore-deprecated-pre.hxx". + +#if !defined(PQXX_IGNORING_DEPRECATED) +# error "Ended an 'ignore-deprecated' block while none was active." +#endif + +#if defined(__GNUC__) +# pragma GCC diagnostic pop +#endif // __GNUC__ + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +#undef PQXX_IGNORING_DEPRECATED diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/ignore-deprecated-pre.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/ignore-deprecated-pre.hxx new file mode 100644 index 000000000..8ac57afaa --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/ignore-deprecated-pre.hxx @@ -0,0 +1,28 @@ +/** Start a block of deprecated code which may call other deprecated code. + * + * Most compilers will emit warnings when deprecated code is invoked from + * non-deprecated code. But some compilers (notably gcc) will always emit the + * warning even when the calling code is also deprecated. + * + * This header starts a block where those warnings are suppressed. It can be + * included inside a code block. + * + * Always match the #include with a closing #include of + * "ignore-deprecated-post.hxx". To avoid mistakes, keep the enclosed area as + * small as possible. + */ +#if defined(PQXX_IGNORING_DEPRECATED) +# error "Started an 'ignore-deprecated' block inside another." +#endif + +#define PQXX_IGNORING_DEPRECATED + +#if defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif // __GNUC__ + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable : 4996) +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/libpq-forward.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/libpq-forward.hxx new file mode 100644 index 000000000..9e74f79ec --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/libpq-forward.hxx @@ -0,0 +1,31 @@ +/** Minimal forward declarations of libpq types needed in libpqxx headers. + * + * DO NOT INCLUDE THIS FILE when building client programs. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +extern "C" +{ + struct pg_conn; + struct pg_result; + struct pgNotify; +} + +/// Forward declarations of libpq types as needed in libpqxx headers. +namespace pqxx::internal::pq +{ +using PGconn = pg_conn; +using PGresult = pg_result; +using PGnotify = pgNotify; +using PQnoticeProcessor = void (*)(void *, char const *); +} // namespace pqxx::internal::pq + +namespace pqxx +{ +/// PostgreSQL database row identifier. +using oid = unsigned int; +} // namespace pqxx diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/result_iter.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/result_iter.hxx new file mode 100644 index 000000000..1fa1f7d8a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/result_iter.hxx @@ -0,0 +1,124 @@ +/** Result loops. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_RESULT_ITER +#define PQXX_H_RESULT_ITER + +#include + +#include "pqxx/strconv.hxx" + +namespace pqxx +{ +class result; +} // namespace pqxx + + +namespace pqxx::internal +{ +// C++20: Replace with generator? +/// Iterator for looped unpacking of a result. +template class result_iter +{ +public: + using value_type = std::tuple; + + /// Construct an "end" iterator. + result_iter() = default; + + explicit result_iter(result const &home) : + m_home{&home}, m_size{std::size(home)} + { + if (not std::empty(home)) + read(); + } + result_iter(result_iter const &) = default; + + result_iter &operator++() + { + m_index++; + if (m_index >= m_size) + m_home = nullptr; + else + read(); + return *this; + } + + /// Comparison only works for comparing to end(). + bool operator==(result_iter const &rhs) const + { + return m_home == rhs.m_home; + } + bool operator!=(result_iter const &rhs) const { return not(*this == rhs); } + + value_type const &operator*() const { return m_value; } + +private: + void read() { (*m_home)[m_index].convert(m_value); } + + result const *m_home{nullptr}; + result::size_type m_index{0}; + result::size_type m_size; + value_type m_value; +}; + + +template class result_iteration +{ +public: + using iterator = result_iter; + explicit result_iteration(result const &home) : m_home{home} + { + constexpr auto tup_size{sizeof...(TYPE)}; + if (home.columns() != tup_size) + throw usage_error{internal::concat( + "Tried to extract ", to_string(tup_size), + " field(s) from a result with ", to_string(home.columns()), + " column(s).")}; + } + iterator begin() const + { + if (std::size(m_home) == 0) + return end(); + else + return iterator{m_home}; + } + iterator end() const { return {}; } + +private: + pqxx::result const &m_home; +}; +} // namespace pqxx::internal + + +template inline auto pqxx::result::iter() const +{ + return pqxx::internal::result_iteration{*this}; +} + + +template +inline void pqxx::result::for_each(CALLABLE &&func) const +{ + using args_tuple = internal::args_t; + constexpr auto sz{std::tuple_size_v}; + static_assert( + sz > 0, + "Callback for for_each must take parameters, one for each column in the " + "result."); + + auto const cols{this->columns()}; + if (sz != cols) + throw usage_error{internal::concat( + "Callback to for_each takes ", sz, "parameter", (sz == 1) ? "" : "s", + ", but result set has ", cols, "field", (cols == 1) ? "" : "s", ".")}; + + using pass_tuple = pqxx::internal::strip_types_t; + for (auto const r : *this) std::apply(func, r.as_tuple()); +} +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/result_iterator.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/result_iterator.hxx new file mode 100644 index 000000000..3f27a1d3f --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/result_iterator.hxx @@ -0,0 +1,389 @@ +/* Definitions for the pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/result instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_RESULT_ITERATOR +#define PQXX_H_RESULT_ITERATOR + +#include "pqxx/row.hxx" + + +/* Result iterator. + * + * Don't include this header from your own application; it is included for you + * by other libpqxx headers. + */ + +namespace pqxx +{ +/// Iterator for rows in a result. Use as result::const_iterator. +/** A result, once obtained, cannot be modified. Therefore there is no + * plain iterator type for result. However its const_iterator type can be + * used to inspect its rows without changing them. + */ +class PQXX_LIBEXPORT const_result_iterator : public row +{ +public: + using iterator_category = std::random_access_iterator_tag; + using value_type = row const; + using pointer = row const *; + using reference = row; + using size_type = result_size_type; + using difference_type = result_difference_type; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Create an iterator, but in an unusable state. + const_result_iterator() noexcept = default; + /// Copy an iterator. + const_result_iterator(const_result_iterator const &) noexcept = default; + /// Move an iterator. + const_result_iterator(const_result_iterator &&) noexcept = default; + + /// Begin iterating a @ref row. + const_result_iterator(row const &t) noexcept : row{t} {} +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /** + * @name Dereferencing operators + * + * An iterator "points to" its own row, which is also itself. This makes it + * easy to address a @ref result as a two-dimensional container, without + * going through the intermediate step of dereferencing the iterator. It + * makes the interface similar to C pointer/array semantics. + * + * IIRC Alex Stepanov, the inventor of the STL, once remarked that having + * this as standard behaviour for pointers would be useful in some + * algorithms. So even if this makes me look foolish, I would seem to be in + * distinguished company. + */ + //@{ + /// Dereference the iterator. + [[nodiscard]] pointer operator->() const { return this; } + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Dereference the iterator. + [[nodiscard]] reference operator*() const { return *this; } +#include "pqxx/internal/ignore-deprecated-post.hxx" + //@} + + /** + * @name Field access + */ + //@{ + using row::back; + using row::front; + using row::operator[]; + using row::at; + using row::rownumber; + //@} + + /** + * @name Manipulations + */ + //@{ + const_result_iterator &operator=(const_result_iterator const &rhs) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row::operator=(rhs); +#include "pqxx/internal/ignore-deprecated-post.hxx" + return *this; + } + + const_result_iterator &operator=(const_result_iterator &&rhs) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row::operator=(std::move(rhs)); +#include "pqxx/internal/ignore-deprecated-post.hxx" + return *this; + } + + const_result_iterator operator++(int); + const_result_iterator &operator++() + { + ++m_index; + return *this; + } + const_result_iterator operator--(int); + const_result_iterator &operator--() + { + --m_index; + return *this; + } + + const_result_iterator &operator+=(difference_type i) + { + m_index += i; + return *this; + } + const_result_iterator &operator-=(difference_type i) + { + m_index -= i; + return *this; + } + + /// Interchange two iterators in an exception-safe manner. + void swap(const_result_iterator &other) noexcept + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row::swap(other); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] bool operator==(const_result_iterator const &i) const + { + return m_index == i.m_index; + } + [[nodiscard]] bool operator!=(const_result_iterator const &i) const + { + return m_index != i.m_index; + } + [[nodiscard]] bool operator<(const_result_iterator const &i) const + { + return m_index < i.m_index; + } + [[nodiscard]] bool operator<=(const_result_iterator const &i) const + { + return m_index <= i.m_index; + } + [[nodiscard]] bool operator>(const_result_iterator const &i) const + { + return m_index > i.m_index; + } + [[nodiscard]] bool operator>=(const_result_iterator const &i) const + { + return m_index >= i.m_index; + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] inline const_result_iterator operator+(difference_type) const; + friend const_result_iterator + operator+(difference_type, const_result_iterator const &); + [[nodiscard]] inline const_result_iterator operator-(difference_type) const; + [[nodiscard]] inline difference_type + operator-(const_result_iterator const &) const; + //@} + +private: + friend class pqxx::result; + const_result_iterator(pqxx::result const *r, result_size_type i) noexcept : + row{*r, i, r->columns()} + {} +}; + + +/// Reverse iterator for result. Use as result::const_reverse_iterator. +class PQXX_LIBEXPORT const_reverse_result_iterator + : private const_result_iterator +{ +public: + using super = const_result_iterator; + using iterator_type = const_result_iterator; + using iterator_type::difference_type; + using iterator_type::iterator_category; + using iterator_type::pointer; + using value_type = iterator_type::value_type; + using reference = iterator_type::reference; + + /// Create an iterator, but in an unusable state. + const_reverse_result_iterator() = default; + /// Copy an iterator. + const_reverse_result_iterator(const_reverse_result_iterator const &rhs) = + default; + /// Copy a reverse iterator from a regular iterator. + explicit const_reverse_result_iterator(const_result_iterator const &rhs) : + const_result_iterator{rhs} + { + super::operator--(); + } + + /// Move a regular iterator into a reverse iterator. + explicit const_reverse_result_iterator(const_result_iterator const &&rhs) : + const_result_iterator{std::move(rhs)} + { + super::operator--(); + } + + /// Return the underlying "regular" iterator (as per standard library). + [[nodiscard]] PQXX_PURE const_result_iterator base() const noexcept; + + /** + * @name Dereferencing operators + */ + //@{ + /// Dereference iterator. + using const_result_iterator::operator->; + /// Dereference iterator. + using const_result_iterator::operator*; + //@} + + /** + * @name Field access + */ + //@{ + using const_result_iterator::back; + using const_result_iterator::front; + using const_result_iterator::operator[]; + using const_result_iterator::at; + using const_result_iterator::rownumber; + //@} + + /** + * @name Manipulations + */ + //@{ + const_reverse_result_iterator & + operator=(const_reverse_result_iterator const &r) + { + iterator_type::operator=(r); + return *this; + } + const_reverse_result_iterator &operator=(const_reverse_result_iterator &&r) + { + iterator_type::operator=(std::move(r)); + return *this; + } + const_reverse_result_iterator &operator++() + { + iterator_type::operator--(); + return *this; + } + const_reverse_result_iterator operator++(int); + const_reverse_result_iterator &operator--() + { + iterator_type::operator++(); + return *this; + } + const_reverse_result_iterator operator--(int); + const_reverse_result_iterator &operator+=(difference_type i) + { + iterator_type::operator-=(i); + return *this; + } + const_reverse_result_iterator &operator-=(difference_type i) + { + iterator_type::operator+=(i); + return *this; + } + + void swap(const_reverse_result_iterator &other) noexcept + { + const_result_iterator::swap(other); + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] const_reverse_result_iterator + operator+(difference_type i) const + { + return const_reverse_result_iterator(base() - i); + } + [[nodiscard]] const_reverse_result_iterator operator-(difference_type i) + { + return const_reverse_result_iterator(base() + i); + } + [[nodiscard]] difference_type + operator-(const_reverse_result_iterator const &rhs) const + { + return rhs.const_result_iterator::operator-(*this); + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] bool + operator==(const_reverse_result_iterator const &rhs) const noexcept + { + return iterator_type::operator==(rhs); + } + [[nodiscard]] bool + operator!=(const_reverse_result_iterator const &rhs) const noexcept + { + return not operator==(rhs); + } + + [[nodiscard]] bool operator<(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator>(rhs); + } + [[nodiscard]] bool operator<=(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator>=(rhs); + } + [[nodiscard]] bool operator>(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator<(rhs); + } + [[nodiscard]] bool operator>=(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator<=(rhs); + } + //@} +}; + + +inline const_result_iterator +const_result_iterator::operator+(result::difference_type o) const +{ + return {&m_result, size_type(result::difference_type(m_index) + o)}; +} + +inline const_result_iterator +operator+(result::difference_type o, const_result_iterator const &i) +{ + return i + o; +} + +inline const_result_iterator +const_result_iterator::operator-(result::difference_type o) const +{ + return {&m_result, result_size_type(result::difference_type(m_index) - o)}; +} + +inline result::difference_type +const_result_iterator::operator-(const const_result_iterator &i) const +{ + return result::difference_type(num() - i.num()); +} + +inline const_result_iterator result::end() const noexcept +{ + return {this, size()}; +} + + +inline const_result_iterator result::cend() const noexcept +{ + return end(); +} + + +inline const_reverse_result_iterator +operator+(result::difference_type n, const_reverse_result_iterator const &i) +{ + return const_reverse_result_iterator{i.base() - n}; +} + +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/sql_cursor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/sql_cursor.hxx new file mode 100644 index 000000000..a26d06306 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/sql_cursor.hxx @@ -0,0 +1,118 @@ +/** Internal wrapper for SQL cursors. Supports higher-level cursor classes. + * + * DO NOT INCLUDE THIS FILE DIRECTLY. Other headers include it for you. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_SQL_CURSOR +#define PQXX_H_SQL_CURSOR + +namespace pqxx::internal +{ +/// Cursor with SQL positioning semantics. +/** Thin wrapper around an SQL cursor, with SQL's ideas of positioning. + * + * SQL cursors have pre-increment/pre-decrement semantics, with on either end + * of the result set a special position that does not repesent a row. This + * class models SQL cursors for the purpose of implementing more C++-like + * semantics on top. + * + * Positions of actual rows are numbered starting at 1. Position 0 exists but + * does not refer to a row. There is a similar non-row position at the end of + * the result set. + * + * Don't use this at home. You deserve better. Use the stateles_cursor + * instead. + */ +class PQXX_LIBEXPORT sql_cursor : public cursor_base +{ +public: + sql_cursor( + transaction_base &t, std::string_view query, std::string_view cname, + cursor_base::access_policy ap, cursor_base::update_policy up, + cursor_base::ownership_policy op, bool hold); + + sql_cursor( + transaction_base &t, std::string_view cname, + cursor_base::ownership_policy op); + + ~sql_cursor() noexcept { close(); } + + result fetch(difference_type rows, difference_type &displacement); + result fetch(difference_type rows) + { + difference_type d = 0; + return fetch(rows, d); + } + difference_type move(difference_type rows, difference_type &displacement); + difference_type move(difference_type rows) + { + difference_type d = 0; + return move(rows, d); + } + + /// Current position, or -1 for unknown + /** + * The starting position, just before the first row, counts as position zero. + * + * Position may be unknown if (and only if) this cursor was adopted, and has + * never hit its starting position (position zero). + */ + difference_type pos() const noexcept { return m_pos; } + + /// End position, or -1 for unknown + /** + * Returns the final position, just after the last row in the result set. The + * starting position, just before the first row, counts as position zero. + * + * End position is unknown until it is encountered during use. + */ + difference_type endpos() const noexcept { return m_endpos; } + + /// Return zero-row result for this cursor. + result const &empty_result() const noexcept { return m_empty_result; } + + void close() noexcept; + +private: + difference_type adjust(difference_type hoped, difference_type actual); + static std::string stridestring(difference_type); + /// Initialize cached empty result. Call only at beginning or end! + void init_empty_result(transaction_base &); + + /// Connection in which this cursor lives. + connection &m_home; + + /// Zero-row result from this cursor (or plain empty one if cursor is + /// adopted) + result m_empty_result; + + result m_cached_current_row; + + /// Is this cursor adopted (as opposed to created by this cursor object)? + bool m_adopted; + + /// Will this cursor object destroy its SQL cursor when it dies? + cursor_base::ownership_policy m_ownership; + + /// At starting position (-1), somewhere in the middle (0), or past end (1) + int m_at_end; + + /// Position, or -1 for unknown + difference_type m_pos; + + /// End position, or -1 for unknown + difference_type m_endpos = -1; +}; + + +PQXX_LIBEXPORT result_size_type obtain_stateless_cursor_size(sql_cursor &); +PQXX_LIBEXPORT result stateless_cursor_retrieve( + sql_cursor &, result::difference_type size, + result::difference_type begin_pos, result::difference_type end_pos); +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/statement_parameters.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/statement_parameters.hxx new file mode 100644 index 000000000..b078bf6e0 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/statement_parameters.hxx @@ -0,0 +1,131 @@ +/** Common implementation for statement parameter lists. + * + * These are used for both prepared statements and parameterized statements. + * + * DO NOT INCLUDE THIS FILE DIRECTLY. Other headers include it for you. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STATEMENT_PARAMETER +#define PQXX_H_STATEMENT_PARAMETER + +#include +#include +#include +#include +#include + +#include "pqxx/binarystring.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/util.hxx" + + +namespace pqxx::internal +{ +template +constexpr inline auto const iterator_identity{ + [](decltype(*std::declval()) x) { return x; }}; + + +/// Marker type: pass a dynamically-determined number of statement parameters. +/** @deprecated Use @ref params instead. + * + * Normally when invoking a prepared or parameterised statement, the number + * of parameters is known at compile time. For instance, + * `t.exec_prepared("foo", 1, "x");` executes statement `foo` with two + * parameters, an `int` and a C string. + * + * But sometimes you may want to pass a number of parameters known only at run + * time. In those cases, a @ref dynamic_params encodes a dynamically + * determined number of parameters. You can mix these with regular, static + * parameter lists, and you can re-use them for multiple statement invocations. + * + * A dynamic_params object does not store copies of its parameters, so make + * sure they remain accessible until you've executed the statement. + * + * The ACCESSOR is an optional callable (such as a lambda). If you pass an + * accessor `a`, then each parameter `p` goes into your statement as `a(p)`. + */ +template)> +class dynamic_params +{ +public: + /// Wrap a sequence of pointers or iterators. + constexpr dynamic_params(IT begin, IT end) : + m_begin(begin), m_end(end), m_accessor(iterator_identity) + {} + + /// Wrap a sequence of pointers or iterators. + /** This version takes an accessor callable. If you pass an accessor `acc`, + * then any parameter `p` will go into the statement's parameter list as + * `acc(p)`. + */ + constexpr dynamic_params(IT begin, IT end, ACCESSOR &acc) : + m_begin(begin), m_end(end), m_accessor(acc) + {} + + /// Wrap a container. + template + explicit constexpr dynamic_params(C &container) : + dynamic_params(std::begin(container), std::end(container)) + {} + + /// Wrap a container. + /** This version takes an accessor callable. If you pass an accessor `acc`, + * then any parameter `p` will go into the statement's parameter list as + * `acc(p)`. + */ + template + explicit constexpr dynamic_params(C &container, ACCESSOR &acc) : + dynamic_params(std::begin(container), std::end(container), acc) + {} + + constexpr IT begin() const noexcept { return m_begin; } + constexpr IT end() const noexcept { return m_end; } + + constexpr auto access(decltype(*std::declval()) value) const + -> decltype(std::declval()(value)) + { + return m_accessor(value); + } + +private: + IT const m_begin, m_end; + ACCESSOR m_accessor = iterator_identity; +}; + + +/// Internal type: encode statement parameters. +/** Compiles arguments for prepared statements and parameterised queries into + * a format that can be passed into libpq. + * + * Objects of this type are meant to be short-lived: a `c_params` lives and + * dies entirely within the call to execute. So, for example, if you pass in a + * non-null pointer as a parameter, @ref params may simply use that pointer as + * a parameter value, without arranging longer-term storage for the data to + * which it points. All values referenced by parameters must remain "live" + * until the parameterised or prepared statement has been executed. + */ +struct PQXX_LIBEXPORT c_params +{ + c_params() = default; + /// Copying these objects is pointless and expensive. Don't do it. + c_params(c_params const &) = delete; + c_params(c_params &&) = default; + + /// Pre-allocate storage for `n` parameters. + void reserve(std::size_t n) &; + + /// As used by libpq: pointers to parameter values. + std::vector values; + /// As used by libpq: lengths of non-null arguments, in bytes. + std::vector lengths; + /// As used by libpq: effectively boolean "is this a binary parameter?" + std::vector formats; +}; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/stream_iterator.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/stream_iterator.hxx new file mode 100644 index 000000000..f240dcfa7 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/stream_iterator.hxx @@ -0,0 +1,105 @@ +/** Stream iterators. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STREAM_ITERATOR +#define PQXX_H_STREAM_ITERATOR + +#include + +namespace pqxx +{ +class stream_from; +} + + +namespace pqxx::internal +{ +// C++20: Replace with generator? +/// Input iterator for stream_from. +/** Just barely enough to support range-based "for" loops. Don't assume that + * any of the usual behaviour works beyond that. + */ +template class stream_input_iterator +{ +public: + using value_type = std::tuple; + + /// Construct an "end" iterator. + stream_input_iterator() = default; + + explicit stream_input_iterator(stream_from &home) : m_home(&home) + { + advance(); + } + stream_input_iterator(stream_input_iterator const &) = default; + + stream_input_iterator &operator++() + { + advance(); + return *this; + } + + value_type const &operator*() const { return m_value; } + + /// Comparison only works for comparing to end(). + bool operator==(stream_input_iterator const &rhs) const + { + return m_home == rhs.m_home; + } + /// Comparison only works for comparing to end(). + bool operator!=(stream_input_iterator const &rhs) const + { + return not(*this == rhs); + } + +private: + void advance() + { + if (m_home == nullptr) + throw usage_error{"Moving stream_from iterator beyond end()."}; + if (not((*m_home) >> m_value)) + m_home = nullptr; + } + + stream_from *m_home{nullptr}; + value_type m_value; +}; + + +// C++20: Replace with generator? +/// Iteration over a @ref stream_from. +template class stream_input_iteration +{ +public: + using iterator = stream_input_iterator; + explicit stream_input_iteration(stream_from &home) : m_home{home} {} + iterator begin() const { return iterator{m_home}; } + iterator end() const { return {}; } + +private: + stream_from &m_home; +}; + + +// C++20: Replace with generator? +/// Iteration over a @ref stream_from, deleting it once done. +template class owning_stream_input_iteration +{ +public: + using iterator = stream_input_iterator; + explicit owning_stream_input_iteration(std::unique_ptr &&home) : + m_home{std::move(home)} + {} + iterator begin() const { return iterator{*m_home.get()}; } + iterator end() const { return {}; } + +private: + std::unique_ptr m_home; +}; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/wait.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/wait.hxx new file mode 100644 index 000000000..7a82e6553 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/internal/wait.hxx @@ -0,0 +1,18 @@ +#if !defined(PQXX_WAIT_HXX) +# define PQXX_WAIT_HXX + +namespace pqxx::internal +{ +/// Wait. +/** This is normally `std::this_thread::sleep_for()`. But MinGW's `thread` + * header doesn't work, so we must be careful about including it. + */ +void PQXX_LIBEXPORT wait_for(unsigned int microseconds); + + +/// Wait for a socket to be ready for reading/writing, or timeout. +PQXX_LIBEXPORT void wait_fd( + int fd, bool for_read, bool for_write, unsigned seconds = 1, + unsigned microseconds = 0); +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/isolation b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/isolation new file mode 100644 index 000000000..1b801329b --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/isolation @@ -0,0 +1,8 @@ +/** Transaction isolation levels. + * + * Policies and traits describing SQL transaction isolation levels + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/isolation.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/isolation.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/isolation.hxx new file mode 100644 index 000000000..0698c6ab4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/isolation.hxx @@ -0,0 +1,75 @@ +/* Definitions for transaction isolation levels, and such. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/isolation instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ISOLATION +#define PQXX_H_ISOLATION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/util.hxx" + +namespace pqxx +{ +/// Should a transaction be read-only, or read-write? +/** No, this is not an isolation level. So it really doesn't belong here. + * But it's not really worth a separate header. + */ +enum class write_policy +{ + read_only, + read_write +}; + + +/// Transaction isolation levels. +/** These are as defined in the SQL standard. But there are a few notes + * specific to PostgreSQL. + * + * First, postgres does not support "read uncommitted." The lowest level you + * can get is "read committed," which is better. PostgreSQL is built on the + * MVCC paradigm, which guarantees "read committed" isolation without any + * additional performance overhead, so there was no point in providing the + * lower level. + * + * Second, "repeatable read" also makes more isolation guarantees than the + * standard requires. According to the standard, this level prevents "dirty + * reads" and "nonrepeatable reads," but not "phantom reads." In postgres, + * it actually prevents all three. + * + * Third, "serializable" is only properly supported starting at postgres 9.1. + * If you request "serializable" isolation on an older backend, you will get + * the same isolation as in "repeatable read." It's better than the + * "repeatable read" defined in the SQL standard, but not a complete + * implementation of the standard's "serializable" isolation level. + * + * In general, a lower isolation level will allow more surprising interactions + * between ongoing transactions, but improve performance. A higher level + * gives you more protection from subtle concurrency bugs, but sometimes it + * may not be possible to complete your transaction without avoiding paradoxes + * in the data. In that case a transaction may fail, and the application will + * have to re-do the whole thing based on the latest state of the database. + * (If you want to retry your code in that situation, have a look at the + * transactor framework.) + * + * Study the levels and design your application with the right level in mind. + */ +enum isolation_level +{ + // PostgreSQL only has the better isolation levels. + // read_uncommitted, + + read_committed, + repeatable_read, + serializable, +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/largeobject b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/largeobject new file mode 100644 index 000000000..1f2f94790 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/largeobject @@ -0,0 +1,8 @@ +/** Large Objects interface. + * + * Supports direct access to large objects, as well as through I/O streams + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/largeobject.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/largeobject.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/largeobject.hxx new file mode 100644 index 000000000..ebafc51d8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/largeobject.hxx @@ -0,0 +1,735 @@ +/* Large Objects interface. Deprecated; use blob instead. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/largeobject instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_LARGEOBJECT +#define PQXX_H_LARGEOBJECT + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/dbtransaction.hxx" + + +namespace pqxx +{ +/// Identity of a large object. +/** @deprecated Use the @ref blob class instead. + * + * Encapsulates the identity of a large object. + * + * A largeobject must be accessed only from within a backend transaction, but + * the object's identity remains valid as long as the object exists. + */ +class PQXX_LIBEXPORT largeobject +{ +public: + using size_type = large_object_size_type; + + /// Refer to a nonexistent large object (similar to what a null pointer + /// does). + [[deprecated("Use blob instead.")]] largeobject() noexcept = default; + + /// Create new large object. + /** @param t Backend transaction in which the object is to be created. + */ + [[deprecated("Use blob instead.")]] explicit largeobject(dbtransaction &t); + + /// Wrap object with given oid. + /** Convert combination of a transaction and object identifier into a + * large object identity. Does not affect the database. + * @param o Object identifier for the given object. + */ + [[deprecated("Use blob instead.")]] explicit largeobject(oid o) noexcept : + m_id{o} + {} + + /// Import large object from a local file. + /** Creates a large object containing the data found in the given file. + * @param t Backend transaction in which the large object is to be created. + * @param file A filename on the client program's filesystem. + */ + [[deprecated("Use blob instead.")]] largeobject( + dbtransaction &t, std::string_view file); + + /// Take identity of an opened large object. + /** Copy identity of already opened large object. Note that this may be done + * as an implicit conversion. + * @param o Already opened large object to copy identity from. + */ + [[deprecated("Use blob instead.")]] largeobject( + largeobjectaccess const &o) noexcept; + + /// Object identifier. + /** The number returned by this function identifies the large object in the + * database we're connected to (or oid_none is returned if we refer to the + * null object). + */ + [[nodiscard]] oid id() const noexcept { return m_id; } + + /** + * @name Identity comparisons + * + * These operators compare the object identifiers of large objects. This has + * nothing to do with the objects' actual contents; use them only for keeping + * track of containers of references to large objects and such. + */ + //@{ + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator==(largeobject const &other) const + { + return m_id == other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator!=(largeobject const &other) const + { + return m_id != other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator<=(largeobject const &other) const + { + return m_id <= other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator>=(largeobject const &other) const + { + return m_id >= other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator<(largeobject const &other) const + { + return m_id < other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator>(largeobject const &other) const + { + return m_id > other.m_id; + } + //@} + + /// Export large object's contents to a local file + /** Writes the data stored in the large object to the given file. + * @param t Transaction in which the object is to be accessed + * @param file A filename on the client's filesystem + */ + void to_file(dbtransaction &t, std::string_view file) const; + + /// Delete large object from database + /** Unlike its low-level equivalent cunlink, this will throw an exception if + * deletion fails. + * @param t Transaction in which the object is to be deleted + */ + void remove(dbtransaction &t) const; + +protected: + PQXX_PURE static internal::pq::PGconn * + raw_connection(dbtransaction const &T); + + PQXX_PRIVATE std::string reason(connection const &, int err) const; + +private: + oid m_id = oid_none; +}; + + +/// Accessor for large object's contents. +/** @deprecated Use the `blob` class instead. + */ +class PQXX_LIBEXPORT largeobjectaccess : private largeobject +{ +public: + using largeobject::size_type; + using off_type = size_type; + using pos_type = size_type; + + /// Open mode: `in`, `out` (can be combined using "bitwise or"). + /** According to the C++ standard, these should be in `std::ios_base`. We + * take them from derived class `std::ios` instead, which is easier on the + * eyes. + * + * Historical note: taking it from std::ios was originally a workaround for a + * problem with gcc 2.95. + */ + using openmode = std::ios::openmode; + + /// Default open mode: in, out, binary. + static constexpr auto default_mode{ + std::ios::in | std::ios::out | std::ios::binary}; + + /// Seek direction: `beg`, `cur`, `end`. + using seekdir = std::ios::seekdir; + + /// Create new large object and open it. + /** + * @param t Backend transaction in which the object is to be created. + * @param mode Access mode, defaults to ios_base::in | ios_base::out | + * ios_base::binary. + */ + [[deprecated("Use blob instead.")]] explicit largeobjectaccess( + dbtransaction &t, openmode mode = default_mode); + + /// Open large object with given oid. + /** Convert combination of a transaction and object identifier into a + * large object identity. Does not affect the database. + * @param t Transaction in which the object is to be accessed. + * @param o Object identifier for the given object. + * @param mode Access mode, defaults to ios_base::in | ios_base::out | + * ios_base::binary. + */ + [[deprecated("Use blob instead.")]] largeobjectaccess( + dbtransaction &t, oid o, openmode mode = default_mode); + + /// Open given large object. + /** Open a large object with the given identity for reading and/or writing. + * @param t Transaction in which the object is to be accessed. + * @param o Identity for the large object to be accessed. + * @param mode Access mode, defaults to ios_base::in | ios_base::out | + * ios_base::binary. + */ + [[deprecated("Use blob instead.")]] largeobjectaccess( + dbtransaction &t, largeobject o, openmode mode = default_mode); + + /// Import large object from a local file and open it. + /** Creates a large object containing the data found in the given file. + * @param t Backend transaction in which the large object is to be created. + * @param file A filename on the client program's filesystem. + * @param mode Access mode, defaults to ios_base::in | ios_base::out. + */ + [[deprecated("Use blob instead.")]] largeobjectaccess( + dbtransaction &t, std::string_view file, openmode mode = default_mode); + + ~largeobjectaccess() noexcept { close(); } + + /// Object identifier. + /** The number returned by this function uniquely identifies the large object + * in the context of the database we're connected to. + */ + using largeobject::id; + + /// Export large object's contents to a local file. + /** Writes the data stored in the large object to the given file. + * @param file A filename on the client's filesystem. + */ + void to_file(std::string_view file) const + { + largeobject::to_file(m_trans, file); + } + + using largeobject::to_file; + + /** + * @name High-level access to object contents. + */ + //@{ + /// Write data to large object. + /** @warning The size of a write is currently limited to 2GB. + * + * @param buf Data to write. + * @param len Number of bytes from Buf to write. + */ + void write(char const buf[], std::size_t len); + + /// Write string to large object. + /** If not all bytes could be written, an exception is thrown. + * @param buf Data to write; no terminating zero is written. + */ + void write(std::string_view buf) { write(std::data(buf), std::size(buf)); } + + /// Read data from large object. + /** Throws an exception if an error occurs while reading. + * @param buf Location to store the read data in. + * @param len Number of bytes to try and read. + * @return Number of bytes read, which may be less than the number requested + * if the end of the large object is reached. + */ + size_type read(char buf[], std::size_t len); + + /// Seek in large object's data stream. + /** Throws an exception if an error occurs. + * @return The new position in the large object + */ + size_type seek(size_type dest, seekdir dir); + + /// Report current position in large object's data stream. + /** Throws an exception if an error occurs. + * @return The current position in the large object. + */ + [[nodiscard]] size_type tell() const; + //@} + + /** + * @name Low-level access to object contents. + * + * These functions provide a more "C-like" access interface, returning + * special values instead of throwing exceptions on error. These functions + * are generally best avoided in favour of the high-level access functions, + * which behave more like C++ functions should. + * + * Due to libpq's underlying API, some operations are limited to "int" + * sizes, typically 2 GB, even though a large object can grow much larger. + */ + //@{ + /// Seek in large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @param dest Offset to go to. + * @param dir Origin to which dest is relative: ios_base::beg (from beginning + * of the object), ios_base::cur (from current access position), or + * ios_base;:end (from end of object). + * @return New position in large object, or -1 if an error occurred. + */ + pos_type cseek(off_type dest, seekdir dir) noexcept; + + /// Write to large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @param buf Data to write. + * @param len Number of bytes to write. + * @return Number of bytes actually written, or -1 if an error occurred. + */ + off_type cwrite(char const buf[], std::size_t len) noexcept; + + /// Read from large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @param buf Area where incoming bytes should be stored. + * @param len Number of bytes to read. + * @return Number of bytes actually read, or -1 if an error occurred.. + */ + off_type cread(char buf[], std::size_t len) noexcept; + + /// Report current position in large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @return Current position in large object, of -1 if an error occurred. + */ + [[nodiscard]] pos_type ctell() const noexcept; + //@} + + /** + * @name Error/warning output + */ + //@{ + /// Issue message to transaction's notice processor. + void process_notice(zview) noexcept; + //@} + + using largeobject::remove; + + using largeobject::operator==; + using largeobject::operator!=; + using largeobject::operator<; + using largeobject::operator<=; + using largeobject::operator>; + using largeobject::operator>=; + + largeobjectaccess() = delete; + largeobjectaccess(largeobjectaccess const &) = delete; + largeobjectaccess operator=(largeobjectaccess const &) = delete; + +private: + PQXX_PRIVATE std::string reason(int err) const; + internal::pq::PGconn *raw_connection() const + { + return largeobject::raw_connection(m_trans); + } + + PQXX_PRIVATE void open(openmode mode); + void close() noexcept; + + dbtransaction &m_trans; + int m_fd = -1; +}; + + +/// Streambuf to use large objects in standard I/O streams. +/** @deprecated Access large objects directly using the @ref blob class. + * + * The standard streambuf classes provide uniform access to data storage such + * as files or string buffers, so they can be accessed using standard input or + * output streams. This streambuf implementation provided similar access to + * large objects, so they could be read and written using the same stream + * classes. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class largeobject_streambuf : public std::basic_streambuf +{ + using size_type = largeobject::size_type; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + using openmode = largeobjectaccess::openmode; + using seekdir = largeobjectaccess::seekdir; + + /// Default open mode: in, out, binary. + static constexpr auto default_mode{ + std::ios::in | std::ios::out | std::ios::binary}; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + [[deprecated("Use blob instead.")]] largeobject_streambuf( + dbtransaction &t, largeobject o, openmode mode = default_mode, + size_type buf_size = 512) : + m_bufsize{buf_size}, m_obj{t, o, mode}, m_g{nullptr}, m_p{nullptr} + { + initialize(mode); + } +#include "pqxx/internal/ignore-deprecated-post.hxx" + + [[deprecated("Use blob instead.")]] largeobject_streambuf( + dbtransaction &t, oid o, openmode mode = default_mode, + size_type buf_size = 512) : + m_bufsize{buf_size}, m_obj{t, o, mode}, m_g{nullptr}, m_p{nullptr} + { + initialize(mode); + } + + virtual ~largeobject_streambuf() noexcept + { + delete[] m_p; + delete[] m_g; + } + + /// For use by large object stream classes. + void process_notice(zview const &s) { m_obj.process_notice(s); } + +protected: + virtual int sync() override + { + // setg() sets eback, gptr, egptr. + this->setg(this->eback(), this->eback(), this->egptr()); + return overflow(eof()); + } + + virtual pos_type seekoff(off_type offset, seekdir dir, openmode) override + { + return adjust_eof(m_obj.cseek(largeobjectaccess::off_type(offset), dir)); + } + + virtual pos_type seekpos(pos_type pos, openmode) override + { + largeobjectaccess::pos_type const newpos{ + m_obj.cseek(largeobjectaccess::off_type(pos), std::ios::beg)}; + return adjust_eof(newpos); + } + + virtual int_type overflow(int_type ch) override + { + auto *const pp{this->pptr()}; + if (pp == nullptr) + return eof(); + auto *const pb{this->pbase()}; + int_type res{0}; + + if (pp > pb) + { + auto const write_sz{pp - pb}; + auto const written_sz{ + m_obj.cwrite(pb, static_cast(pp - pb))}; + if (internal::cmp_less_equal(written_sz, 0)) + throw internal_error{ + "pqxx::largeobject: write failed " + "(is transaction still valid on write or flush?), " + "libpq reports error"}; + else if (write_sz != written_sz) + throw internal_error{ + "pqxx::largeobject: write failed " + "(is transaction still valid on write or flush?), " + + std::to_string(written_sz) + "/" + std::to_string(write_sz) + + " bytes written"}; + auto const out{adjust_eof(written_sz)}; + + if constexpr (std::is_arithmetic_v) + res = check_cast(out, "largeobject position"sv); + else + res = int_type(out); + } + this->setp(m_p, m_p + m_bufsize); + + // Write that one more character, if it's there. + if (ch != eof()) + { + *this->pptr() = static_cast(ch); + this->pbump(1); + } + return res; + } + + virtual int_type overflow() { return overflow(eof()); } + + virtual int_type underflow() override + { + if (this->gptr() == nullptr) + return eof(); + auto *const eb{this->eback()}; + auto const res{adjust_eof( + m_obj.cread(this->eback(), static_cast(m_bufsize)))}; + this->setg( + eb, eb, eb + (res == eof() ? 0 : static_cast(res))); + return (res == eof() or res == 0) ? eof() : traits_type::to_int_type(*eb); + } + +private: + /// Shortcut for traits_type::eof(). + static int_type eof() { return traits_type::eof(); } + + /// Helper: change error position of -1 to EOF (probably a no-op). + template static std::streampos adjust_eof(INTYPE pos) + { + bool const at_eof{pos == -1}; + if constexpr (std::is_arithmetic_v) + { + return check_cast( + (at_eof ? eof() : pos), "large object seek"sv); + } + else + { + return std::streampos(at_eof ? eof() : pos); + } + } + + void initialize(openmode mode) + { + if ((mode & std::ios::in) != 0) + { + m_g = new char_type[unsigned(m_bufsize)]; + this->setg(m_g, m_g, m_g); + } + if ((mode & std::ios::out) != 0) + { + m_p = new char_type[unsigned(m_bufsize)]; + this->setp(m_p, m_p + m_bufsize); + } + } + + size_type const m_bufsize; + largeobjectaccess m_obj; + + /// Get & put buffers. + char_type *m_g, *m_p; +}; + + +/// Input stream that gets its data from a large object. +/** @deprecated Access large objects directly using the @ref blob class. + * + * This class worked like any other istream, but to read data from a large + * object. It supported all formatting and streaming operations of + * `std::istream`. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class basic_ilostream : public std::basic_istream +{ + using super = std::basic_istream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Create a basic_ilostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_ilostream( + dbtransaction &t, largeobject o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::in | std::ios::binary, buf_size} + { + super::init(&m_buf); + } +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /// Create a basic_ilostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Identifier of a large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_ilostream( + dbtransaction &t, oid o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::in | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + +private: + largeobject_streambuf m_buf; +}; + +using ilostream = basic_ilostream; + + +/// Output stream that writes data back to a large object. +/** @deprecated Access large objects directly using the @ref blob class. + * + * This worked like any other ostream, but to write data to a large object. + * It supported all formatting and streaming operations of `std::ostream`. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class basic_olostream : public std::basic_ostream +{ + using super = std::basic_ostream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Create a basic_olostream. + /** + * @param t transaction in which this stream is to exist. + * @param o a large object to access. + * @param buf_size size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_olostream( + dbtransaction &t, largeobject o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /// Create a basic_olostream. + /** + * @param t transaction in which this stream is to exist. + * @param o a large object to access. + * @param buf_size size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_olostream( + dbtransaction &t, oid o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + + ~basic_olostream() + { + try + { + m_buf.pubsync(); + m_buf.pubsync(); + } + catch (std::exception const &e) + { + m_buf.process_notice(e.what()); + } + } + +private: + largeobject_streambuf m_buf; +}; + +using olostream = basic_olostream; + + +/// Stream that reads and writes a large object. +/** @deprecated Access large objects directly using the @ref blob class. + * + * This worked like a std::iostream, but to read data from, or write data to, a + * large object. It supported all formatting and streaming operations of + * `std::iostream`. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class basic_lostream : public std::basic_iostream +{ + using super = std::basic_iostream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + + /// Create a basic_lostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_lostream( + dbtransaction &t, largeobject o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{ + t, o, std::ios::in | std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + + /// Create a basic_lostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_lostream( + dbtransaction &t, oid o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{ + t, o, std::ios::in | std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + + ~basic_lostream() + { + try + { + m_buf.pubsync(); + m_buf.pubsync(); + } + catch (std::exception const &e) + { + m_buf.process_notice(e.what()); + } + } + +private: + largeobject_streambuf m_buf; +}; + +using lostream = basic_lostream; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/nontransaction b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/nontransaction new file mode 100644 index 000000000..bb5b79724 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/nontransaction @@ -0,0 +1,8 @@ +/** pqxx::nontransaction class. + * + * pqxx::nontransaction provides nontransactional database access. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/nontransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/nontransaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/nontransaction.hxx new file mode 100644 index 000000000..c50715594 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/nontransaction.hxx @@ -0,0 +1,76 @@ +/* Definition of the pqxx::nontransaction class. + * + * pqxx::nontransaction provides nontransactional database access + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/nontransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_NONTRANSACTION +#define PQXX_H_NONTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/connection.hxx" +#include "pqxx/result.hxx" +#include "pqxx/transaction.hxx" + +namespace pqxx +{ +using namespace std::literals; + +/// Simple "transaction" class offering no transactional integrity. +/** + * @ingroup transactions + * + * nontransaction, like transaction or any other transaction_base-derived + * class, provides access to a database through a connection. Unlike its + * siblings, however, nontransaction does not maintain any kind of + * transactional integrity. This may be useful eg. for read-only access to the + * database that does not require a consistent, atomic view on its data; or for + * operations that are not allowed within a backend transaction, such as + * creating tables. + * + * For queries that update the database, however, a real transaction is likely + * to be faster unless the transaction consists of only a single record update. + * + * Also, you can keep a nontransaction open for as long as you like. Actual + * back-end transactions are limited in lifespan, and will sometimes fail just + * because they took too long to execute or were left idle for too long. This + * will not happen with a nontransaction (although the connection may still + * time out, e.g. when the network is unavailable for a very long time). + * + * Any query executed in a nontransaction is committed immediately, and neither + * commit() nor abort() has any effect. + * + * Database features that require a backend transaction, such as cursors or + * large objects, will not work in a nontransaction. + */ +class PQXX_LIBEXPORT nontransaction final : public transaction_base +{ +public: + /// Constructor. + /** Create a "dummy" transaction. + * @param c Connection in which this "transaction" will operate. + * @param tname Optional tname for the transaction, beginning with a letter + * and containing only letters and digits. + */ + nontransaction(connection &c, std::string_view tname = ""sv) : + transaction_base{c, tname, std::shared_ptr{}} + { + register_transaction(); + } + + virtual ~nontransaction() override { close(); } + +private: + virtual void do_commit() override {} +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/notification b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/notification new file mode 100644 index 000000000..a0bd1c73e --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/notification @@ -0,0 +1,8 @@ +/** pqxx::notification_receiver functor interface. + * + * pqxx::notification_receiver handles incoming notifications. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/notification.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/notification.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/notification.hxx new file mode 100644 index 000000000..b59b8567a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/notification.hxx @@ -0,0 +1,94 @@ +/* Definition of the pqxx::notification_receiver functor interface. + * + * pqxx::notification_receiver handles incoming notifications. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/notification instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_NOTIFICATION +#define PQXX_H_NOTIFICATION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/types.hxx" + + +namespace pqxx +{ +/// "Observer" base class for notifications. +/** @addtogroup notification Notifications and Receivers + * + * To listen on a notification issued using the NOTIFY command, derive your own + * class from notification_receiver and define its function-call operator to + * perform whatever action you wish to take when the given notification + * arrives. Then create an object of that class and pass it to your connection. + * DO NOT use raw SQL to listen for notifications, or your attempts to listen + * won't be resumed when a connection fails--and you'll have no way to notice. + * + * Notifications never arrive inside a transaction, not even in a + * nontransaction. Therefore, you are free to open a transaction of your own + * inside your receiver's function invocation operator. + * + * Notifications you are listening for may arrive anywhere within libpqxx code, + * but be aware that **PostgreSQL defers notifications occurring inside + * transactions.** (This was done for excellent reasons; just think about what + * happens if the transaction where you happen to handle an incoming + * notification is later rolled back for other reasons). So if you're keeping + * a transaction open, don't expect any of your receivers on the same + * connection to be notified. + * + * (For very similar reasons, outgoing notifications are also not sent until + * the transaction that sends them commits.) + * + * Multiple receivers on the same connection may listen on a notification of + * the same name. An incoming notification is processed by invoking all + * receivers (zero or more) of the same name. + */ +class PQXX_LIBEXPORT PQXX_NOVTABLE notification_receiver +{ +public: + /// Register the receiver with a connection. + /** + * @param c Connnection to operate on. + * @param channel Name of the notification to listen for. + */ + notification_receiver(connection &c, std::string_view channel); + /// Register the receiver with a connection. + notification_receiver(notification_receiver const &) = delete; + /// Register the receiver with a connection. + notification_receiver &operator=(notification_receiver const &) = delete; + /// Deregister the receiver. + virtual ~notification_receiver(); + + /// The channel that this receiver listens on. + [[nodiscard]] std::string const &channel() const & { return m_channel; } + + // TODO: Change API to take payload as zview instead of string ref. + /// Overridable: action to invoke when notification arrives. + /** + * @param payload An optional string that may have been passed to the NOTIFY + * command. + * @param backend_pid Process ID of the database backend process that served + * our connection when the notification arrived. The actual process ID + * behind the connection may have changed by the time this method is called. + */ + virtual void operator()(std::string const &payload, int backend_pid) = 0; + +protected: + connection &conn() const noexcept { return m_conn; } + +private: + connection &m_conn; + std::string m_channel; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/params b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/params new file mode 100644 index 000000000..4098782aa --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/params @@ -0,0 +1,8 @@ +/** Helper classes for passing statement parameters. + * + * Use these for prepared statements and parameterised statements. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/params.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/params.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/params.hxx new file mode 100644 index 000000000..2d29cdfed --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/params.hxx @@ -0,0 +1,383 @@ +/* Helpers for prepared statements and parameterised statements. + * + * See the connection class for more about such statements. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_PARAMS +#define PQXX_H_PARAMS + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/statement_parameters.hxx" +#include "pqxx/types.hxx" + + +/// @deprecated The new @ref params class replaces all of this. +namespace pqxx::prepare +{ +/// Pass a number of statement parameters only known at runtime. +/** @deprecated Use @ref params instead. + * + * When you call any of the `exec_params` functions, the number of arguments + * is normally known at compile time. This helper function supports the case + * where it is not. + * + * Use this function to pass a variable number of parameters, based on a + * sequence ranging from `begin` to `end` exclusively. + * + * The technique combines with the regular static parameters. You can use it + * to insert dynamic parameter lists in any place, or places, among the call's + * parameters. You can even insert multiple dynamic sequences. + * + * @param begin A pointer or iterator for iterating parameters. + * @param end A pointer or iterator for iterating parameters. + * @return An object representing the parameters. + */ +template +[[deprecated("Use the params class instead.")]] constexpr inline auto +make_dynamic_params(IT begin, IT end) +{ + return pqxx::internal::dynamic_params(begin, end); +} + + +/// Pass a number of statement parameters only known at runtime. +/** @deprecated Use @ref params instead. + * + * When you call any of the `exec_params` functions, the number of arguments + * is normally known at compile time. This helper function supports the case + * where it is not. + * + * Use this function to pass a variable number of parameters, based on a + * container of parameter values. + * + * The technique combines with the regular static parameters. You can use it + * to insert dynamic parameter lists in any place, or places, among the call's + * parameters. You can even insert multiple dynamic containers. + * + * @param container A container of parameter values. + * @return An object representing the parameters. + */ +template +[[deprecated("Use the params class instead.")]] constexpr inline auto +make_dynamic_params(C const &container) +{ + using IT = typename C::const_iterator; +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return pqxx::internal::dynamic_params{container}; +#include "pqxx/internal/ignore-deprecated-post.hxx" +} + + +/// Pass a number of statement parameters only known at runtime. +/** @deprecated Use @ref params instead. + * + * When you call any of the `exec_params` functions, the number of arguments + * is normally known at compile time. This helper function supports the case + * where it is not. + * + * Use this function to pass a variable number of parameters, based on a + * container of parameter values. + * + * The technique combines with the regular static parameters. You can use it + * to insert dynamic parameter lists in any place, or places, among the call's + * parameters. You can even insert multiple dynamic containers. + * + * @param container A container of parameter values. + * @param accessor For each parameter `p`, pass `accessor(p)`. + * @return An object representing the parameters. + */ +template +[[deprecated("Use the params class instead.")]] constexpr inline auto +make_dynamic_params(C &container, ACCESSOR accessor) +{ + using IT = decltype(std::begin(container)); +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return pqxx::internal::dynamic_params{container, accessor}; +#include "pqxx/internal/ignore-deprecated-post.hxx" +} +} // namespace pqxx::prepare + + +namespace pqxx +{ +/// Generate parameter placeholders for use in an SQL statement. +/** When you want to pass parameters to a prepared statement or a parameterised + * statement, you insert placeholders into the SQL. During invocation, the + * database replaces those with the respective parameter values you passed. + * + * The placeholders look like `$1` (for the first parameter value), `$2` (for + * the second), and so on. You can just write those directly in your + * statement. But for those rare cases where it becomes difficult to track + * which number a placeholder should have, you can use a `placeholders` object + * to count and generate them in order. + */ +template class placeholders +{ +public: + /// Maximum number of parameters we support. + static inline constexpr unsigned int max_params{ + (std::numeric_limits::max)()}; + + placeholders() + { + static constexpr auto initial{"$1\0"sv}; + initial.copy(std::data(m_buf), std::size(initial)); + } + + /// Read an ephemeral version of the current placeholder text. + /** @warning Changing the current placeholder number will overwrite this. + * Use the view immediately, or lose it. + */ + constexpr zview view() const &noexcept + { + return zview{std::data(m_buf), m_len}; + } + + /// Read the current placeholder text, as a `std::string`. + /** This will be slightly slower than converting to a `zview`. With most + * C++ implementations however, until you get into ridiculous numbers of + * parameters, the string will benefit from the Short String Optimization, or + * SSO. + */ + std::string get() const { return std::string(std::data(m_buf), m_len); } + + /// Move on to the next parameter. + void next() & + { + if (m_current >= max_params) + throw range_error{pqxx::internal::concat( + "Too many parameters in one statement: limit is ", max_params, ".")}; + ++m_current; + if (m_current % 10 == 0) + { + // Carry the 1. Don't get too clever for this relatively rare + // case, just rewrite the entire number. Leave the $ in place + // though. + char *const data{std::data(m_buf)}; + char *const end{string_traits::into_buf( + data + 1, data + std::size(m_buf), m_current)}; + // (Subtract because we don't include the trailing zero.) + m_len = check_cast(end - data, "placeholders counter") - 1; + } + else + { + PQXX_LIKELY + // Shortcut for the common case: just increment that last digit. + ++m_buf[m_len - 1]; + } + } + + /// Return the current placeholder number. The initial placeholder is 1. + COUNTER count() const noexcept { return m_current; } + +private: + /// Current placeholder number. Starts at 1. + COUNTER m_current = 1; + + /// Length of the current placeholder string, not including trailing zero. + COUNTER m_len = 2; + + /// Text buffer where we render the placeholders, with a trailing zero. + /** We keep reusing this for every subsequent placeholder, just because we + * don't like string allocations. + * + * Maximum length is the maximum base-10 digits that COUNTER can fully + * represent, plus 1 more for the extra digit that it can only partially + * fill up, plus room for the dollar sign and the trailing zero. + */ + std::array::digits10 + 3> m_buf; +}; + + +/// Build a parameter list for a parameterised or prepared statement. +/** When calling a parameterised statement or a prepared statement, you can + * pass parameters into the statement directly in the invocation, as + * additional arguments to `exec_prepared` or `exec_params`. But in + * complex cases, sometimes that's just not convenient. + * + * In those situations, you can create a `params` and append your parameters + * into that, one by one. Then you pass the `params` to `exec_prepared` or + * `exec_params`. + * + * Combinations also work: if you have a `params` containing a string + * parameter, and you call `exec_params` with an `int` argument followed by + * your `params`, you'll be passing the `int` as the first parameter and + * the string as the second. You can even insert a `params` in a `params`, + * or pass two `params` objects to a statement. + */ +class PQXX_LIBEXPORT params +{ +public: + params() = default; + + /// Pre-populate a `params` with `args`. Feel free to add more later. + template constexpr params(Args &&...args) + { + reserve(sizeof...(args)); + append_pack(std::forward(args)...); + } + + /// Pre-allocate room for at least `n` parameters. + /** This is not needed, but it may improve efficiency. + * + * Reserve space if you're going to add parameters individually, and you've + * got some idea of how many there are going to be. It may save some + * memory re-allocations. + */ + void reserve(std::size_t n) &; + + // C++20: constexpr. + /// Get the number of parameters currently in this `params`. + [[nodiscard]] auto size() const noexcept { return m_params.size(); } + + // C++20: Use the vector's ssize() directly and go noexcept+constexpr. + /// Get the number of parameters (signed). + /** Unlike `size()`, this is not yet `noexcept`. That's because C++17's + * `std::vector` does not have a `ssize()` member function. These member + * functions are `noexcept`, but `std::size()` and `std::ssize()` are + * not. + */ + [[nodiscard]] auto ssize() const { return pqxx::internal::ssize(m_params); } + + /// Append a null value. + void append() &; + + /// Append a non-null zview parameter. + /** The underlying data must stay valid for as long as the `params` + * remains active. + */ + void append(zview) &; + + /// Append a non-null string parameter. + /** Copies the underlying data into internal storage. For best efficiency, + * use the @ref zview variant if you can, or `std::move()` + */ + void append(std::string const &) &; + + /// Append a non-null string parameter. + void append(std::string &&) &; + + /// Append a non-null binary parameter. + /** The underlying data must stay valid for as long as the `params` + * remains active. + */ + void append(std::basic_string_view) &; + + /// Append a non-null binary parameter. + /** Copies the underlying data into internal storage. For best efficiency, + * use the `std::basic_string_view` variant if you can, or + * `std::move()`. + */ + void append(std::basic_string const &) &; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Append a non-null binary parameter. + /** The `data` object must stay in place and unchanged, for as long as the + * `params` remains active. + */ + template void append(DATA const &data) & + { + append( + std::basic_string_view{std::data(data), std::size(data)}); + } +#endif // PQXX_HAVE_CONCEPTS + + /// Append a non-null binary parameter. + void append(std::basic_string &&) &; + + /// @deprecated Append binarystring parameter. + /** The binarystring must stay valid for as long as the `params` remains + * active. + */ + void append(binarystring const &value) &; + + /// Append all parameters from value. + template + void append(pqxx::internal::dynamic_params const &value) & + { + for (auto ¶m : value) append(value.access(param)); + } + + void append(params const &value) &; + + void append(params &&value) &; + + /// Append a non-null parameter, converting it to its string + /// representation. + template void append(TYPE const &value) & + { + // TODO: Pool storage for multiple string conversions in one buffer? + if constexpr (nullness>::always_null) + { + ignore_unused(value); + m_params.emplace_back(); + } + else if (is_null(value)) + { + m_params.emplace_back(); + } + else + { + m_params.emplace_back(entry{to_string(value)}); + } + } + + /// Append all elements of `range` as parameters. + template void append_multi(RANGE const &range) & + { +#if defined(PQXX_HAVE_CONCEPTS) + if constexpr (std::ranges::sized_range) + reserve(std::size(*this) + std::size(range)); +#endif + for (auto &value : range) append(value); + } + + /// For internal use: Generate a `params` object for use in calls. + /** The params object encapsulates the pointers which we will need to pass + * to libpq when calling a parameterised or prepared statement. + * + * The pointers in the params will refer to storage owned by either the + * params object, or the caller. This is not a problem because a + * `c_params` object is guaranteed to live only while the call is going on. + * As soon as we climb back out of that call tree, we're done with that + * data. + */ + pqxx::internal::c_params make_c_params() const; + +private: + /// Recursively append a pack of params. + template + void append_pack(Arg &&arg, More &&...args) + { + this->append(std::forward(arg)); + // Recurse for remaining args. + append_pack(std::forward(args)...); + } + + /// Terminating case: append an empty parameter pack. It's not hard BTW. + constexpr void append_pack() noexcept {} + + // The way we store a parameter depends on whether it's binary or text + // (most types are text), and whether we're responsible for storing the + // contents. + using entry = std::variant< + std::nullptr_t, zview, std::string, std::basic_string_view, + std::basic_string>; + std::vector m_params; + + static constexpr std::string_view s_overflow{ + "Statement parameter length overflow."sv}; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/pipeline b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/pipeline new file mode 100644 index 000000000..bf828843a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/pipeline @@ -0,0 +1,8 @@ +/** pqxx::pipeline class. + * + * Throughput-optimized query interface. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/pipeline.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/pipeline.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/pipeline.hxx new file mode 100644 index 000000000..049dcdd58 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/pipeline.hxx @@ -0,0 +1,237 @@ +/* Definition of the pqxx::pipeline class. + * + * Throughput-optimized mechanism for executing queries. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/pipeline instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_PIPELINE +#define PQXX_H_PIPELINE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/transaction_base.hxx" + + +namespace pqxx +{ +// TODO: libpq 14 introduced a similar "pipeline mode." Can we use that? + +/// Processes several queries in FIFO manner, optimized for high throughput. +/** Use a pipeline if you want to keep doing useful work while your queries are + * executing. Result retrieval is decoupled from execution request; queries + * "go in at the front" and results "come out the back." + * + * Actually, you can retrieve the results in any order if you want, but it may + * lead to surprising "time travel" effects if any of the queries fails. In + * particular, syntax errors in the queries can confuse things and show up too + * early in the stream of results. + * + * Generally, if any of the queries fails, it will throw an exception at the + * point where you request its result. But it may happen earlier, especially + * if you request results out of chronological order. + * + * @warning While a pipeline is active, you cannot execute queries, open + * streams, etc. on the same transaction. A transaction can have at most one + * object of a type derived from @ref pqxx::transaction_focus active on it at a + * time. + */ +class PQXX_LIBEXPORT pipeline : public transaction_focus +{ +public: + /// Identifying numbers for queries. + using query_id = long; + + pipeline(pipeline const &) = delete; + pipeline &operator=(pipeline const &) = delete; + + /// Start a pipeline. + explicit pipeline(transaction_base &t) : transaction_focus{t, s_classname} + { + init(); + } + /// Start a pipeline. Assign it a name, for more helpful error messages. + pipeline(transaction_base &t, std::string_view tname) : + transaction_focus{t, s_classname, tname} + { + init(); + } + + /// Close the pipeline. + ~pipeline() noexcept; + + /// Add query to the pipeline. + /** Queries accumulate in the pipeline, which sends them to the backend in a + * batch separated by semicolons. The queries you insert must not use this + * trick themselves, or the pipeline will get hopelessly confused! + * + * @return Identifier for this query, unique only within this pipeline. + */ + query_id insert(std::string_view) &; + + /// Wait for all ongoing or pending operations to complete, and detach. + /** Detaches from the transaction when done. + * + * This does not produce the queries' results, so it may not report any + * errors which may have occurred in their execution. To be sure that your + * statements succeeded, call @ref retrieve until the pipeline is empty. + */ + void complete(); + + /// Forget all ongoing or pending operations and retrieved results. + /** Queries already sent to the backend may still be completed, depending + * on implementation and timing. + * + * Any error state (unless caused by an internal error) will also be cleared. + * This is mostly useful in a nontransaction, since a backend transaction is + * aborted automatically when an error occurs. + * + * Detaches from the transaction when done. + */ + void flush(); + + /// Cancel ongoing query, if any. + /** May cancel any or all of the queries that have been inserted at this + * point whose results have not yet been retrieved. If the pipeline lives in + * a backend transaction, that transaction may be left in a nonfunctional + * state in which it can only be aborted. + * + * Therefore, either use this function in a nontransaction, or abort the + * transaction after calling it. + */ + void cancel(); + + /// Is result for given query available? + [[nodiscard]] bool is_finished(query_id) const; + + /// Retrieve result for given query. + /** If the query failed for whatever reason, this will throw an exception. + * The function will block if the query has not finished yet. + * @warning If results are retrieved out-of-order, i.e. in a different order + * than the one in which their queries were inserted, errors may "propagate" + * to subsequent queries. + */ + result retrieve(query_id qid) + { + return retrieve(m_queries.find(qid)).second; + } + + /// Retrieve oldest unretrieved result (possibly wait for one). + /** @return The query's identifier and its result set. */ + std::pair retrieve(); + + [[nodiscard]] bool empty() const noexcept { return std::empty(m_queries); } + + /// Set maximum number of queries to retain before issuing them to the + /// backend. + /** The pipeline will perform better if multiple queries are issued at once, + * but retaining queries until the results are needed (as opposed to issuing + * them to the backend immediately) may negate any performance benefits the + * pipeline can offer. + * + * Recommended practice is to set this value no higher than the number of + * queries you intend to insert at a time. + * @param retain_max A nonnegative "retention capacity;" passing zero will + * cause queries to be issued immediately + * @return Old retention capacity + */ + int retain(int retain_max = 2) &; + + + /// Resume retained query emission. Harmless when not needed. + void resume() &; + +private: + struct PQXX_PRIVATE Query + { + explicit Query(std::string_view q) : + query{std::make_shared(q)} + {} + + std::shared_ptr query; + result res; + }; + + using QueryMap = std::map; + + void init(); + void attach(); + void detach(); + + /// Upper bound to query id's. + static constexpr query_id qid_limit() noexcept + { + // Parenthesise this to work around an eternal Visual C++ problem: + // Without the extra parentheses, unless NOMINMAX is defined, the + // preprocessor will mistake this "max" for its annoying built-in macro + // of the same name. + return (std::numeric_limits::max)(); + } + + /// Create new query_id. + PQXX_PRIVATE query_id generate_id(); + + bool have_pending() const noexcept + { + return m_issuedrange.second != m_issuedrange.first; + } + + PQXX_PRIVATE void issue(); + + /// The given query failed; never issue anything beyond that. + void set_error_at(query_id qid) noexcept + { + PQXX_UNLIKELY + if (qid < m_error) + m_error = qid; + } + + /// Throw pqxx::internal_error. + [[noreturn]] PQXX_PRIVATE void internal_error(std::string const &err); + + PQXX_PRIVATE bool obtain_result(bool expect_none = false); + + PQXX_PRIVATE void obtain_dummy(); + PQXX_PRIVATE void get_further_available_results(); + PQXX_PRIVATE void check_end_results(); + + /// Receive any results that happen to be available; it's not urgent. + PQXX_PRIVATE void receive_if_available(); + + /// Receive results, up to stop if possible. + PQXX_PRIVATE void receive(pipeline::QueryMap::const_iterator stop); + std::pair retrieve(pipeline::QueryMap::iterator); + + QueryMap m_queries; + std::pair m_issuedrange; + int m_retain = 0; + int m_num_waiting = 0; + query_id m_q_id = 0; + + /// Is there a "dummy query" pending? + bool m_dummy_pending = false; + + /// Point at which an error occurred; no results beyond it will be available + query_id m_error = qid_limit(); + + /// Encoding. + /** We store this in the object to avoid the risk of exceptions at awkward + * moments. + */ + internal::encoding_group m_encoding; + + static constexpr std::string_view s_classname{"pipeline"}; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/pqxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/pqxx new file mode 100644 index 000000000..17a8eaa9c --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/pqxx @@ -0,0 +1,28 @@ +/// Convenience header: include all libpqxx definitions. +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/array.hxx" +#include "pqxx/binarystring.hxx" +#include "pqxx/blob.hxx" +#include "pqxx/connection.hxx" +#include "pqxx/cursor.hxx" +#include "pqxx/errorhandler.hxx" +#include "pqxx/except.hxx" +#include "pqxx/largeobject.hxx" +#include "pqxx/nontransaction.hxx" +#include "pqxx/notification.hxx" +#include "pqxx/params.hxx" +#include "pqxx/pipeline.hxx" +#include "pqxx/prepared_statement.hxx" +#include "pqxx/result.hxx" +#include "pqxx/internal/result_iterator.hxx" +#include "pqxx/internal/result_iter.hxx" +#include "pqxx/robusttransaction.hxx" +#include "pqxx/row.hxx" +#include "pqxx/stream_from.hxx" +#include "pqxx/stream_to.hxx" +#include "pqxx/subtransaction.hxx" +#include "pqxx/transaction.hxx" +#include "pqxx/transactor.hxx" + +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/prepared_statement b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/prepared_statement new file mode 100644 index 000000000..674be7090 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/prepared_statement @@ -0,0 +1,3 @@ +/// @deprecated Include @c instead. + +#include "params.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/prepared_statement.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/prepared_statement.hxx new file mode 100644 index 000000000..674be7090 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/prepared_statement.hxx @@ -0,0 +1,3 @@ +/// @deprecated Include @c instead. + +#include "params.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/range b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/range new file mode 100644 index 000000000..11985eca4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/range @@ -0,0 +1,6 @@ +/** Client-side support for SQL range types. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/range.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/range.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/range.hxx new file mode 100644 index 000000000..dc480e4b7 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/range.hxx @@ -0,0 +1,515 @@ +#ifndef PQXX_H_RANGE +#define PQXX_H_RANGE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/internal/array-composite.hxx" +#include "pqxx/internal/concat.hxx" + +namespace pqxx +{ +/// An _unlimited_ boundary value to a @ref pqxx::range. +/** Use this as a lower or upper bound for a range if the range should extend + * to infinity on that side. + * + * An unlimited boundary is always inclusive of "infinity" values, if the + * range's value type supports them. + */ +struct no_bound +{ + template constexpr bool extends_down_to(TYPE const &) const + { + return true; + } + template constexpr bool extends_up_to(TYPE const &) const + { + return true; + } +}; + + +/// An _inclusive_ boundary value to a @ref pqxx::range. +/** Use this as a lower or upper bound for a range if the range should include + * the value. + */ +template class inclusive_bound +{ +public: + inclusive_bound() = delete; + explicit inclusive_bound(TYPE const &value) : m_value{value} + { + if (is_null(value)) + throw argument_error{"Got null value as an inclusive range bound."}; + } + + [[nodiscard]] constexpr TYPE const &get() const &noexcept { return m_value; } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as a lower bound, include value? + [[nodiscard]] bool extends_down_to(TYPE const &value) const + { + return not(value < m_value); + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as an upper bound, include value? + [[nodiscard]] bool extends_up_to(TYPE const &value) const + { + return not(m_value < value); + } + +private: + TYPE m_value; +}; + + +/// An _exclusive_ boundary value to a @ref pqxx::range. +/** Use this as a lower or upper bound for a range if the range should _not_ + * include the value. + */ +template class exclusive_bound +{ +public: + exclusive_bound() = delete; + explicit exclusive_bound(TYPE const &value) : m_value{value} + { + if (is_null(value)) + throw argument_error{"Got null value as an exclusive range bound."}; + } + + [[nodiscard]] constexpr TYPE const &get() const &noexcept { return m_value; } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as a lower bound, include value? + [[nodiscard]] bool extends_down_to(TYPE const &value) const + { + return m_value < value; + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as an upper bound, include value? + [[nodiscard]] bool extends_up_to(TYPE const &value) const + { + return value < m_value; + } + +private: + TYPE m_value; +}; + + +/// A range boundary value. +/** A range bound is either no bound at all; or an inclusive bound; or an + * exclusive bound. Pass one of the three to the constructor. + */ +template class range_bound +{ +public: + range_bound() = delete; + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(no_bound) : m_bound{} {} + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(inclusive_bound const &bound) : m_bound{bound} {} + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(exclusive_bound const &bound) : m_bound{bound} {} + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(range_bound const &) = default; + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(range_bound &&) = default; + + // TODO: constexpr and/or noexcept if underlying operators support it. + bool operator==(range_bound const &rhs) const + { + if (this->is_limited()) + return ( + rhs.is_limited() and (this->is_inclusive() == rhs.is_inclusive()) and + (*this->value() == *rhs.value())); + else + return not rhs.is_limited(); + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + bool operator!=(range_bound const &rhs) const { return not(*this == rhs); } + range_bound &operator=(range_bound const &) = default; + range_bound &operator=(range_bound &&) = default; + + /// Is this a finite bound? + constexpr bool is_limited() const noexcept + { + return not std::holds_alternative(m_bound); + } + + /// Is this boundary an inclusive one? + constexpr bool is_inclusive() const noexcept + { + return std::holds_alternative>(m_bound); + } + + /// Is this boundary an exclusive one? + constexpr bool is_exclusive() const noexcept + { + return std::holds_alternative>(m_bound); + } + + // TODO: constexpr/noexcept if underlying function supports it. + /// Would this bound, as a lower bound, include `value`? + bool extends_down_to(TYPE const &value) const + { + return std::visit( + [&value](auto const &bound) { return bound.extends_down_to(value); }, + m_bound); + } + + // TODO: constexpr/noexcept if underlying function supports it. + /// Would this bound, as an upper bound, include `value`? + bool extends_up_to(TYPE const &value) const + { + return std::visit( + [&value](auto const &bound) { return bound.extends_up_to(value); }, + m_bound); + } + + /// Return bound value, or `nullptr` if it's not limited. + [[nodiscard]] constexpr TYPE const *value() const &noexcept + { + return std::visit( + [](auto const &bound) noexcept { + using bound_t = std::decay_t; + if constexpr (std::is_same_v) + return static_cast(nullptr); + else + return &bound.get(); + }, + m_bound); + } + +private: + std::variant, exclusive_bound> m_bound; +}; + + +// C++20: Concepts for comparisons, construction, etc. +/// A C++ equivalent to PostgreSQL's range types. +/** You can use this as a client-side representation of a "range" in SQL. + * + * PostgreSQL defines several range types, differing in the data type over + * which they range. You can also define your own range types. + * + * Usually you'll want the server to deal with ranges. But on occasions where + * you need to work with them client-side, you may want to use @ref + * pqxx::range. (In cases where all you do is pass them along to the server + * though, it's not worth the complexity. In that case you might as well treat + * ranges as just strings.) + * + * For documentation on PostgreSQL's range types, see: + * https://www.postgresql.org/docs/current/rangetypes.html + * + * The value type must be copyable and default-constructible, and support the + * less-than (`<`) and equals (`==`) comparisons. Value initialisation must + * produce a consistent value. + */ +template class range +{ +public: + /// Create a range. + /** For each of the two bounds, pass a @ref no_bound, @ref inclusive_bound, + * or + * @ref exclusive_bound. + */ + range(range_bound lower, range_bound upper) : + m_lower{lower}, m_upper{upper} + { + if ( + lower.is_limited() and upper.is_limited() and + (*upper.value() < *lower.value())) + throw range_error{internal::concat( + "Range's lower bound (", *lower.value(), + ") is greater than its upper bound (", *upper.value(), ").")}; + } + + // TODO: constexpr and/or noexcept if underlying constructor supports it. + /// Create an empty range. + /** SQL has a separate literal to denote an empty range, but any range which + * encompasses no values is an empty range. + */ + range() : + m_lower{exclusive_bound{TYPE{}}}, + m_upper{exclusive_bound{TYPE{}}} + {} + + // TODO: constexpr and/or noexcept if underlying operators support it. + bool operator==(range const &rhs) const + { + return (this->lower_bound() == rhs.lower_bound() and + this->upper_bound() == rhs.upper_bound()) or + (this->empty() and rhs.empty()); + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + bool operator!=(range const &rhs) const { return !(*this == rhs); } + + range(range const &) = default; + range(range &&) = default; + range &operator=(range const &) = default; + range &operator=(range &&) = default; + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Is this range clearly empty? + /** An empty range encompasses no values. + * + * It is possible to "fool" this. For example, if your range is of an + * integer type and has exclusive bounds of 0 and 1, it encompasses no values + * but its `empty()` will return false. The PostgreSQL implementation, by + * contrast, will notice that it is empty. Similar things can happen for + * floating-point types, but with more subtleties and edge cases. + */ + bool empty() const + { + return (m_lower.is_exclusive() or m_upper.is_exclusive()) and + m_lower.is_limited() and m_upper.is_limited() and + not(*m_lower.value() < *m_upper.value()); + } + + // TODO: constexpr and/or noexcept if underlying functions support it. + /// Does this range encompass `value`? + bool contains(TYPE value) const + { + return m_lower.extends_down_to(value) and m_upper.extends_up_to(value); + } + + // TODO: constexpr and/or noexcept if underlying operators support it. + /// Does this range encompass all of `other`? + /** This function is not particularly smart. It does not know, for example, + * that integer ranges `[0,9]` and `[0,10)` contain the same values. + */ + bool contains(range const &other) const + { + return (*this & other) == other; + } + + [[nodiscard]] constexpr range_bound const & + lower_bound() const &noexcept + { + return m_lower; + } + [[nodiscard]] constexpr range_bound const & + upper_bound() const &noexcept + { + return m_upper; + } + + // TODO: constexpr and/or noexcept if underlying operators support it. + /// Intersection of two ranges. + /** Returns a range describing those values which are in both ranges. + */ + range operator&(range const &other) const + { + range_bound lower{no_bound{}}; + if (not this->lower_bound().is_limited()) + lower = other.lower_bound(); + else if (not other.lower_bound().is_limited()) + lower = this->lower_bound(); + else if (*this->lower_bound().value() < *other.lower_bound().value()) + lower = other.lower_bound(); + else if (*other.lower_bound().value() < *this->lower_bound().value()) + lower = this->lower_bound(); + else if (this->lower_bound().is_exclusive()) + lower = this->lower_bound(); + else + lower = other.lower_bound(); + + range_bound upper{no_bound{}}; + if (not this->upper_bound().is_limited()) + upper = other.upper_bound(); + else if (not other.upper_bound().is_limited()) + upper = this->upper_bound(); + else if (*other.upper_bound().value() < *this->upper_bound().value()) + upper = other.upper_bound(); + else if (*this->upper_bound().value() < *other.upper_bound().value()) + upper = this->upper_bound(); + else if (this->upper_bound().is_exclusive()) + upper = this->upper_bound(); + else + upper = other.upper_bound(); + + if ( + lower.is_limited() and upper.is_limited() and + (*upper.value() < *lower.value())) + return {}; + else + return {lower, upper}; + } + + /// Convert to another base type. + template operator range() const + { + range_bound lower{no_bound{}}, upper{no_bound{}}; + if (lower_bound().is_inclusive()) + lower = inclusive_bound{*lower_bound().value()}; + else if (lower_bound().is_exclusive()) + lower = exclusive_bound{*lower_bound().value()}; + + if (upper_bound().is_inclusive()) + upper = inclusive_bound{*upper_bound().value()}; + else if (upper_bound().is_exclusive()) + upper = exclusive_bound{*upper_bound().value()}; + + return {lower, upper}; + } + +private: + range_bound m_lower, m_upper; +}; + + +/// String conversions for a @ref range type. +/** Conversion assumes that either your client encoding is UTF-8, or the values + * are pure ASCII. + */ +template struct string_traits> +{ + [[nodiscard]] static inline zview + to_buf(char *begin, char *end, range const &value) + { + return generic_to_buf(begin, end, value); + } + + static inline char * + into_buf(char *begin, char *end, range const &value) + { + if (value.empty()) + { + if ((end - begin) <= internal::ssize(s_empty)) + throw conversion_overrun{s_overrun.c_str()}; + char *here = begin + s_empty.copy(begin, std::size(s_empty)); + *here++ = '\0'; + return here; + } + else + { + if (end - begin < 4) + throw conversion_overrun{s_overrun.c_str()}; + char *here = begin; + *here++ = + (static_cast(value.lower_bound().is_inclusive() ? '[' : '(')); + TYPE const *lower{value.lower_bound().value()}; + // Convert bound (but go back to overwrite that trailing zero). + if (lower != nullptr) + here = string_traits::into_buf(here, end, *lower) - 1; + *here++ = ','; + TYPE const *upper{value.upper_bound().value()}; + // Convert bound (but go back to overwrite that trailing zero). + if (upper != nullptr) + here = string_traits::into_buf(here, end, *upper) - 1; + if ((end - here) < 2) + throw conversion_overrun{s_overrun.c_str()}; + *here++ = + static_cast(value.upper_bound().is_inclusive() ? ']' : ')'); + *here++ = '\0'; + return here; + } + } + + [[nodiscard]] static inline range from_string(std::string_view text) + { + if (std::size(text) < 3) + throw pqxx::conversion_error{err_bad_input(text)}; + bool left_inc{false}; + switch (text[0]) + { + case '[': left_inc = true; break; + + case '(': break; + + case 'e': + case 'E': + if ( + (std::size(text) != std::size(s_empty)) or + (text[1] != 'm' and text[1] != 'M') or + (text[2] != 'p' and text[2] != 'P') or + (text[3] != 't' and text[3] != 'T') or + (text[4] != 'y' and text[4] != 'Y')) + throw pqxx::conversion_error{err_bad_input(text)}; + return {}; + break; + + default: throw pqxx::conversion_error{err_bad_input(text)}; + } + + auto scan{internal::get_glyph_scanner(internal::encoding_group::UTF8)}; + // The field parser uses this to track which field it's parsing, and + // when not to expect a field separator. + std::size_t index{0}; + // The last field we expect to see. + static constexpr std::size_t last{1}; + // Current parsing position. We skip the opening parenthesis or bracket. + std::size_t pos{1}; + // The string may leave out either bound to indicate that it's unlimited. + std::optional lower, upper; + // We reuse the same field parser we use for composite values and arrays. + internal::parse_composite_field(index, text, pos, lower, scan, last); + internal::parse_composite_field(index, text, pos, upper, scan, last); + + // We need one more character: the closing parenthesis or bracket. + if (pos != std::size(text)) + throw pqxx::conversion_error{err_bad_input(text)}; + char const closing{text[pos - 1]}; + if (closing != ')' and closing != ']') + throw pqxx::conversion_error{err_bad_input(text)}; + bool const right_inc{closing == ']'}; + + range_bound lower_bound{no_bound{}}, upper_bound{no_bound{}}; + if (lower) + { + if (left_inc) + lower_bound = inclusive_bound{*lower}; + else + lower_bound = exclusive_bound{*lower}; + } + if (upper) + { + if (right_inc) + upper_bound = inclusive_bound{*upper}; + else + upper_bound = exclusive_bound{*upper}; + } + + return {lower_bound, upper_bound}; + } + + [[nodiscard]] static inline constexpr std::size_t + size_buffer(range const &value) noexcept + { + TYPE const *lower{value.lower_bound().value()}, + *upper{value.upper_bound().value()}; + std::size_t const lsz{ + lower == nullptr ? 0 : string_traits::size_buffer(*lower) - 1}, + usz{upper == nullptr ? 0 : string_traits::size_buffer(*upper) - 1}; + + if (value.empty()) + return std::size(s_empty) + 1; + else + return 1 + lsz + 1 + usz + 2; + } + +private: + static constexpr zview s_empty{"empty"_zv}; + static constexpr auto s_overrun{"Not enough space in buffer for range."_zv}; + + /// Compose error message for invalid range input. + static std::string err_bad_input(std::string_view text) + { + return internal::concat("Invalid range input: '", text, "'"); + } +}; + + +/// A range type does not have an innate null value. +template struct nullness> : no_null> +{}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/result b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/result new file mode 100644 index 000000000..523394b72 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/result @@ -0,0 +1,16 @@ +/** pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/result.hxx" + +// Now include some types which depend on result, but which the user will +// expect to see defined after including this header. +#include "pqxx/internal/result_iterator.hxx" +#include "pqxx/field.hxx" +#include "pqxx/internal/result_iter.hxx" + +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/result.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/result.hxx new file mode 100644 index 000000000..6c41cc096 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/result.hxx @@ -0,0 +1,335 @@ +/* Definitions for the pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/result instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_RESULT +#define PQXX_H_RESULT + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include + +#include "pqxx/except.hxx" +#include "pqxx/types.hxx" +#include "pqxx/util.hxx" +#include "pqxx/zview.hxx" + +#include "pqxx/internal/encodings.hxx" + + +namespace pqxx::internal +{ +// TODO: Make noexcept (but breaks ABI). +PQXX_LIBEXPORT void clear_result(pq::PGresult const *); +} // namespace pqxx::internal + + +namespace pqxx::internal::gate +{ +class result_connection; +class result_creation; +class result_pipeline; +class result_row; +class result_sql_cursor; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +/// Result set containing data returned by a query or command. +/** This behaves as a container (as defined by the C++ standard library) and + * provides random access const iterators to iterate over its rows. You can + * also access a row by indexing a `result R` by the row's zero-based + * number: + * + * + * for (result::size_type i=0; i < std::size(R); ++i) Process(R[i]); + * + * + * Result sets in libpqxx are lightweight, reference-counted wrapper objects + * which are relatively small and cheap to copy. Think of a result object as + * a "smart pointer" to an underlying result set. + * + * @warning The result set that a result object points to is not thread-safe. + * If you copy a result object, it still refers to the same underlying result + * set. So never copy, destroy, query, or otherwise access a result while + * another thread may be copying, destroying, querying, or otherwise accessing + * the same result set--even if it is doing so through a different result + * object! + */ +class PQXX_LIBEXPORT result +{ +public: + using size_type = result_size_type; + using difference_type = result_difference_type; + using reference = row; + using const_iterator = const_result_iterator; + using pointer = const_iterator; + using iterator = const_iterator; + using const_reverse_iterator = const_reverse_result_iterator; + using reverse_iterator = const_reverse_iterator; + + result() noexcept : + m_data{make_data_pointer()}, + m_query{}, + m_encoding{internal::encoding_group::MONOBYTE} + {} + + result(result const &rhs) noexcept = default; + result(result &&rhs) noexcept = default; + + /// Assign one result to another. + /** Copying results is cheap: it copies only smart pointers, but the actual + * data stays in the same place. + */ + result &operator=(result const &rhs) noexcept = default; + + /// Assign one result to another, invaliding the old one. + result &operator=(result &&rhs) noexcept = default; + + /** + * @name Comparisons + * + * You can compare results for equality. Beware: this is a very strict, + * dumb comparison. The smallest difference between two results (such as a + * string "Foo" versus a string "foo") will make them unequal. + */ + //@{ + /// Compare two results for equality. + [[nodiscard]] bool operator==(result const &) const noexcept; + /// Compare two results for inequality. + [[nodiscard]] bool operator!=(result const &rhs) const noexcept + { + return not operator==(rhs); + } + //@} + + /// Iterate rows, reading them directly into a tuple of "TYPE...". + /** Converts the fields to values of the given respective types. + * + * Use this only with a ranged "for" loop. The iteration produces + * std::tuple which you can "unpack" to a series of `auto` + * variables. + */ + template auto iter() const; + + [[nodiscard]] const_reverse_iterator rbegin() const; + [[nodiscard]] const_reverse_iterator crbegin() const; + [[nodiscard]] const_reverse_iterator rend() const; + [[nodiscard]] const_reverse_iterator crend() const; + + [[nodiscard]] const_iterator begin() const noexcept; + [[nodiscard]] const_iterator cbegin() const noexcept; + [[nodiscard]] inline const_iterator end() const noexcept; + [[nodiscard]] inline const_iterator cend() const noexcept; + + [[nodiscard]] reference front() const noexcept; + [[nodiscard]] reference back() const noexcept; + + [[nodiscard]] PQXX_PURE size_type size() const noexcept; + [[nodiscard]] PQXX_PURE bool empty() const noexcept; + [[nodiscard]] size_type capacity() const noexcept { return size(); } + + /// Exchange two `result` values in an exception-safe manner. + /** If the swap fails, the two values will be exactly as they were before. + * + * The swap is not necessarily thread-safe. + */ + void swap(result &) noexcept; + + /// Index a row by number. + /** This returns a @ref row object. Generally you should not keep the row + * around as a variable, but if you do, make sure that your variable is a + * `row`, not a `row&`. + */ + [[nodiscard]] row operator[](size_type i) const noexcept; + +#if defined(PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT) + // TODO: If C++23 will let us, also accept string for the column. + [[nodiscard]] field + operator[](size_type row_num, row_size_type col_num) const noexcept; +#endif + + /// Index a row by number, but check that the row number is valid. + row at(size_type) const; + + /// Index a field by row number and column number. + field at(size_type, row_size_type) const; + + /// Let go of the result's data. + /** Use this if you need to deallocate the result data earlier than you can + * destroy the `result` object itself. + * + * Multiple `result` objects can refer to the same set of underlying data. + * The underlying data will be deallocated once all `result` objects that + * refer to it are cleared or destroyed. + */ + void clear() noexcept + { + m_data.reset(); + m_query = nullptr; + } + + /** + * @name Column information + */ + //@{ + /// Number of columns in result. + [[nodiscard]] PQXX_PURE row_size_type columns() const noexcept; + + /// Number of given column (throws exception if it doesn't exist). + [[nodiscard]] row_size_type column_number(zview name) const; + + /// Name of column with this number (throws exception if it doesn't exist) + [[nodiscard]] char const *column_name(row_size_type number) const &; + + /// Return column's type, as an OID from the system catalogue. + [[nodiscard]] oid column_type(row_size_type col_num) const; + + /// Return column's type, as an OID from the system catalogue. + [[nodiscard]] oid column_type(zview col_name) const + { + return column_type(column_number(col_name)); + } + + /// What table did this column come from? + [[nodiscard]] oid column_table(row_size_type col_num) const; + + /// What table did this column come from? + [[nodiscard]] oid column_table(zview col_name) const + { + return column_table(column_number(col_name)); + } + + /// What column in its table did this column come from? + [[nodiscard]] row_size_type table_column(row_size_type col_num) const; + + /// What column in its table did this column come from? + [[nodiscard]] row_size_type table_column(zview col_name) const + { + return table_column(column_number(col_name)); + } + //@} + + /// Query that produced this result, if available (empty string otherwise) + [[nodiscard]] PQXX_PURE std::string const &query() const &noexcept; + + /// If command was an `INSERT` of 1 row, return oid of the inserted row. + /** @return Identifier of inserted row if exactly one row was inserted, or + * @ref oid_none otherwise. + */ + [[nodiscard]] PQXX_PURE oid inserted_oid() const; + + /// If command was `INSERT`, `UPDATE`, or `DELETE`: number of affected rows. + /** @return Number of affected rows if last command was `INSERT`, `UPDATE`, + * or `DELETE`; zero for all other commands. + */ + [[nodiscard]] PQXX_PURE size_type affected_rows() const; + + // C++20: Concept like std::invocable, but without specifying param types. + /// Run `func` on each row, passing the row's fields as parameters. + /** Goes through the rows from first to last. You provide a callable `func`. + * + * For each row in the `result`, `for_each` will call `func`. It converts + * the row's fields to the types of `func`'s parameters, and pass them to + * `func`. + * + * (Therefore `func` must have a _single_ signature. It can't be a generic + * lambda, or an object of a class with multiple overloaded function call + * operators. Otherwise, `for_each` will have no way to detect a parameter + * list without ambiguity.) + * + * If any of your parameter types is `std::string_view`, it refers to the + * underlying storage of this `result`. + * + * If any of your parameter types is a reference type, its argument will + * refer to a temporary value which only lives for the duration of that + * single invocation to `func`. If the reference is an lvalue reference, it + * must be `const`. + * + * For example, this queries employee names and salaries from the database + * and prints how much each would like to earn instead: + * ```cxx + * tx.exec("SELECT name, salary FROM employee").for_each( + * [](std::string_view name, float salary){ + * std::cout << name << " would like " << salary * 2 << ".\n"; + * }) + * ``` + * + * If `func` throws an exception, processing stops at that point and + * propagates the exception. + * + * @throws usage_error if `func`'s number of parameters does not match the + * number of columns in this result. + */ + template inline void for_each(CALLABLE &&func) const; + +private: + using data_pointer = std::shared_ptr; + + /// Underlying libpq result set. + data_pointer m_data; + + /// Factory for data_pointer. + static data_pointer + make_data_pointer(internal::pq::PGresult const *res = nullptr) noexcept + { + return {res, internal::clear_result}; + } + + friend class pqxx::internal::gate::result_pipeline; + PQXX_PURE std::shared_ptr query_ptr() const noexcept + { + return m_query; + } + + /// Query string. + std::shared_ptr m_query; + + internal::encoding_group m_encoding; + + static std::string const s_empty_string; + + friend class pqxx::field; + // TODO: noexcept. Breaks ABI. + PQXX_PURE char const *get_value(size_type row, row_size_type col) const; + // TODO: noexcept. Breaks ABI. + PQXX_PURE bool get_is_null(size_type row, row_size_type col) const; + PQXX_PURE + field_size_type get_length(size_type, row_size_type) const noexcept; + + friend class pqxx::internal::gate::result_creation; + result( + internal::pq::PGresult *rhs, std::shared_ptr query, + internal::encoding_group enc); + + PQXX_PRIVATE void check_status(std::string_view desc = ""sv) const; + + friend class pqxx::internal::gate::result_connection; + friend class pqxx::internal::gate::result_row; + bool operator!() const noexcept { return m_data.get() == nullptr; } + operator bool() const noexcept { return m_data.get() != nullptr; } + + [[noreturn]] PQXX_PRIVATE void + throw_sql_error(std::string const &Err, std::string const &Query) const; + PQXX_PRIVATE PQXX_PURE int errorposition() const; + PQXX_PRIVATE std::string status_error() const; + + friend class pqxx::internal::gate::result_sql_cursor; + PQXX_PURE char const *cmd_status() const noexcept; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/robusttransaction b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/robusttransaction new file mode 100644 index 000000000..04b71d7cc --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/robusttransaction @@ -0,0 +1,8 @@ +/** pqxx::robusttransaction class. + * + * pqxx::robusttransaction is a slower but safer transaction class. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/robusttransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/robusttransaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/robusttransaction.hxx new file mode 100644 index 000000000..faf6dbf5e --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/robusttransaction.hxx @@ -0,0 +1,120 @@ +/* Definition of the pqxx::robusttransaction class. + * + * pqxx::robusttransaction is a slower but safer transaction class. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/robusttransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ROBUSTTRANSACTION +#define PQXX_H_ROBUSTTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/dbtransaction.hxx" + +namespace pqxx::internal +{ +/// Helper base class for the @ref robusttransaction class template. +class PQXX_LIBEXPORT PQXX_NOVTABLE basic_robusttransaction + : public dbtransaction +{ +public: + virtual ~basic_robusttransaction() override = 0; + +protected: + basic_robusttransaction( + connection &c, zview begin_command, std::string_view tname); + basic_robusttransaction(connection &c, zview begin_command); + +private: + using IDType = unsigned long; + + std::string m_conn_string; + std::string m_xid; + int m_backendpid = -1; + + void init(zview begin_command); + + // @warning This function will become `final`. + virtual void do_commit() override; +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/** + * @ingroup transactions + * + * @{ + */ + +/// Slightly slower, better-fortified version of transaction. +/** Requires PostgreSQL 10 or better. + * + * robusttransaction is similar to transaction, but spends more time and effort + * to deal with the hopefully rare case that the connection to the backend is + * lost just while it's trying to commit. In such cases, the client does not + * know whether the backend (on the other side of the broken connection) + * managed to commit the transaction. + * + * When this happens, robusttransaction tries to reconnect to the database and + * figure out what happened. + * + * This service level was made optional since you may not want to pay the + * overhead where it is not necessary. Certainly the use of this class makes + * no sense for local connections, or for transactions that read the database + * but never modify it, or for noncritical database manipulations. + * + * Besides being slower, it's also more complex. Which means that in practice + * a robusttransaction could actually fail more instead of less often than a + * normal transaction. What robusttransaction tries to achieve is to give you + * certainty, not just be more successful per se. + */ +template +class robusttransaction final : public internal::basic_robusttransaction +{ +public: + /** Create robusttransaction of given name. + * @param c Connection inside which this robusttransaction should live. + * @param tname optional human-readable name for this transaction. + */ + robusttransaction(connection &c, std::string_view tname) : + internal::basic_robusttransaction{ + c, pqxx::internal::begin_cmd, + tname} + {} + + /** Create robusttransaction of given name. + * @param c Connection inside which this robusttransaction should live. + * @param tname optional human-readable name for this transaction. + */ + robusttransaction(connection &c, std::string &&tname) : + internal::basic_robusttransaction{ + c, pqxx::internal::begin_cmd, + std::move(tname)} + {} + + /** Create robusttransaction of given name. + * @param c Connection inside which this robusttransaction should live. + */ + explicit robusttransaction(connection &c) : + internal::basic_robusttransaction{ + c, pqxx::internal::begin_cmd} + {} + + virtual ~robusttransaction() noexcept override { close(); } +}; + +/** + * @} + */ +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/row b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/row new file mode 100644 index 000000000..62a950ac8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/row @@ -0,0 +1,11 @@ +/** pqxx::row class. + * + * pqxx::row refers to a row in a result. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/result.hxx" +#include "pqxx/row.hxx" + +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/row.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/row.hxx new file mode 100644 index 000000000..5be5132e3 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/row.hxx @@ -0,0 +1,561 @@ +/* Definitions for the pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/result instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ROW +#define PQXX_H_ROW + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/except.hxx" +#include "pqxx/field.hxx" +#include "pqxx/result.hxx" + +#include "pqxx/internal/concat.hxx" + +namespace pqxx::internal +{ +template class result_iter; +} // namespace pqxx::internal + + +namespace pqxx +{ +/// Reference to one row in a result. +/** A row represents one row (also called a row) in a query result set. + * It also acts as a container mapping column numbers or names to field + * values (see below): + * + * ```cxx + * cout << row["date"].c_str() << ": " << row["name"].c_str() << endl; + * ``` + * + * The row itself acts like a (non-modifyable) container, complete with its + * own const_iterator and const_reverse_iterator. + */ +class PQXX_LIBEXPORT row +{ +public: + using size_type = row_size_type; + using difference_type = row_difference_type; + using const_iterator = const_row_iterator; + using iterator = const_iterator; + using reference = field; + using pointer = const_row_iterator; + using const_reverse_iterator = const_reverse_row_iterator; + using reverse_iterator = const_reverse_iterator; + + row() noexcept = default; + row(row &&) noexcept = default; + row(row const &) noexcept = default; + row &operator=(row const &) noexcept = default; + row &operator=(row &&) noexcept = default; + + /** + * @name Comparison + */ + //@{ + [[nodiscard]] PQXX_PURE bool operator==(row const &) const noexcept; + [[nodiscard]] bool operator!=(row const &rhs) const noexcept + { + return not operator==(rhs); + } + //@} + + [[nodiscard]] const_iterator begin() const noexcept; + [[nodiscard]] const_iterator cbegin() const noexcept; + [[nodiscard]] const_iterator end() const noexcept; + [[nodiscard]] const_iterator cend() const noexcept; + + /** + * @name Field access + */ + //@{ + [[nodiscard]] reference front() const noexcept; + [[nodiscard]] reference back() const noexcept; + + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator rbegin() const; + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator crbegin() const; + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator rend() const; + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator crend() const; + + [[nodiscard]] reference operator[](size_type) const noexcept; + /** Address field by name. + * @warning This is much slower than indexing by number, or iterating. + */ + [[nodiscard]] reference operator[](zview col_name) const; + + reference at(size_type) const; + /** Address field by name. + * @warning This is much slower than indexing by number, or iterating. + */ + reference at(zview col_name) const; + + [[nodiscard]] constexpr size_type size() const noexcept + { + return m_end - m_begin; + } + + [[deprecated("Swap iterators, not rows.")]] void swap(row &) noexcept; + + /// Row number, assuming this is a real row and not end()/rend(). + [[nodiscard]] constexpr result::size_type rownumber() const noexcept + { + return m_index; + } + + /** + * @name Column information + */ + //@{ + /// Number of given column (throws exception if it doesn't exist). + [[nodiscard]] size_type column_number(zview col_name) const; + + /// Return a column's type. + [[nodiscard]] oid column_type(size_type) const; + + /// Return a column's type. + [[nodiscard]] oid column_type(zview col_name) const + { + return column_type(column_number(col_name)); + } + + /// What table did this column come from? + [[nodiscard]] oid column_table(size_type col_num) const; + + /// What table did this column come from? + [[nodiscard]] oid column_table(zview col_name) const + { + return column_table(column_number(col_name)); + } + + /// What column number in its table did this result column come from? + /** A meaningful answer can be given only if the column in question comes + * directly from a column in a table. If the column is computed in any + * other way, a logic_error will be thrown. + * + * @param col_num a zero-based column number in this result set + * @return a zero-based column number in originating table + */ + [[nodiscard]] size_type table_column(size_type) const; + + /// What column number in its table did this result column come from? + [[nodiscard]] size_type table_column(zview col_name) const + { + return table_column(column_number(col_name)); + } + //@} + + [[nodiscard]] constexpr result::size_type num() const noexcept + { + return rownumber(); + } + + /** Produce a slice of this row, containing the given range of columns. + * + * @deprecated I haven't heard of anyone caring about row slicing at all in + * at least the last 15 years. Yet it adds complexity, so unless anyone + * files a bug explaining why they really need this feature, I'm going to + * remove it. Even if they do, the feature may need an update. + * + * The slice runs from the range's starting column to the range's end + * column, exclusive. It looks just like a normal result row, except + * slices can be empty. + */ + [[deprecated("Row slicing is going away. File a bug if you need it.")]] row + slice(size_type sbegin, size_type send) const; + + /// Is this a row without fields? Can only happen to a slice. + [[nodiscard, deprecated("Row slicing is going away.")]] PQXX_PURE bool + empty() const noexcept; + + /// Extract entire row's values into a tuple. + /** Converts to the types of the tuple's respective fields. + */ + template void to(Tuple &t) const + { + check_size(std::tuple_size_v); + convert(t); + } + + template std::tuple as() const + { + check_size(sizeof...(TYPE)); + using seq = std::make_index_sequence; + return get_tuple>(seq{}); + } + +protected: + friend class const_row_iterator; + friend class result; + row(result const &r, result_size_type index, size_type cols) noexcept; + + /// Throw @ref usage_error if row size is not `expected`. + void check_size(size_type expected) const + { + if (size() != expected) + throw usage_error{internal::concat( + "Tried to extract ", expected, " field(s) from a row of ", size(), + ".")}; + } + + /// Convert to a given tuple of values, don't check sizes. + /** We need this for cases where we have a full tuple of field types, but + * not a parameter pack. + */ + template TUPLE as_tuple() const + { + using seq = std::make_index_sequence>; + return get_tuple(seq{}); + } + + template friend class pqxx::internal::result_iter; + /// Convert entire row to tuple fields, without checking row size. + template void convert(Tuple &t) const + { + extract_fields(t, std::make_index_sequence>{}); + } + + friend class field; + + /// Result set of which this is one row. + result m_result; + + /// Row number. + /** + * You'd expect this to be unsigned, but due to the way reverse iterators + * are related to regular iterators, it must be allowed to underflow to -1. + */ + result::size_type m_index = 0; + + // TODO: Remove m_begin and (if possible) m_end when we remove slice(). + /// First column in slice. This row ignores lower-numbered columns. + size_type m_begin = 0; + /// End column in slice. This row only sees lower-numbered columns. + size_type m_end = 0; + +private: + template + void extract_fields(Tuple &t, std::index_sequence) const + { + (extract_value(t), ...); + } + + template + void extract_value(Tuple &t) const; + + /// Convert row's values as a new tuple. + template + auto get_tuple(std::index_sequence) const + { + return std::make_tuple(get_field()...); + } + + /// Extract and convert a field. + template auto get_field() const + { + return (*this)[index].as>(); + } +}; + + +/// Iterator for fields in a row. Use as row::const_iterator. +class PQXX_LIBEXPORT const_row_iterator : public field +{ +public: + using iterator_category = std::random_access_iterator_tag; + using value_type = field const; + using pointer = field const *; + using size_type = row_size_type; + using difference_type = row_difference_type; + using reference = field; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + const_row_iterator() = default; +#include "pqxx/internal/ignore-deprecated-post.hxx" + const_row_iterator(row const &t, row_size_type c) noexcept : + field{t.m_result, t.m_index, c} + {} + const_row_iterator(field const &F) noexcept : field{F} {} + const_row_iterator(const_row_iterator const &) noexcept = default; + const_row_iterator(const_row_iterator &&) noexcept = default; + + /** + * @name Dereferencing operators + */ + //@{ + [[nodiscard]] constexpr pointer operator->() const noexcept { return this; } + [[nodiscard]] reference operator*() const noexcept { return {*this}; } + //@} + + /** + * @name Manipulations + */ + //@{ + const_row_iterator &operator=(const_row_iterator const &) noexcept = default; + const_row_iterator &operator=(const_row_iterator &&) noexcept = default; + + // TODO: noexcept. Breaks ABI. + const_row_iterator operator++(int); + const_row_iterator &operator++() noexcept + { + ++m_col; + return *this; + } + // TODO: noexcept. Breaks ABI. + const_row_iterator operator--(int); + const_row_iterator &operator--() noexcept + { + --m_col; + return *this; + } + + const_row_iterator &operator+=(difference_type i) noexcept + { + m_col = size_type(difference_type(m_col) + i); + return *this; + } + const_row_iterator &operator-=(difference_type i) noexcept + { + m_col = size_type(difference_type(m_col) - i); + return *this; + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] constexpr bool + operator==(const_row_iterator const &i) const noexcept + { + return col() == i.col(); + } + [[nodiscard]] constexpr bool + operator!=(const_row_iterator const &i) const noexcept + { + return col() != i.col(); + } + [[nodiscard]] constexpr bool + operator<(const_row_iterator const &i) const noexcept + { + return col() < i.col(); + } + [[nodiscard]] constexpr bool + operator<=(const_row_iterator const &i) const noexcept + { + return col() <= i.col(); + } + [[nodiscard]] constexpr bool + operator>(const_row_iterator const &i) const noexcept + { + return col() > i.col(); + } + [[nodiscard]] constexpr bool + operator>=(const_row_iterator const &i) const noexcept + { + return col() >= i.col(); + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] inline const_row_iterator + operator+(difference_type) const noexcept; + + friend const_row_iterator + operator+(difference_type, const_row_iterator const &) noexcept; + + [[nodiscard]] inline const_row_iterator + operator-(difference_type) const noexcept; + [[nodiscard]] inline difference_type + operator-(const_row_iterator const &) const noexcept; + //@} +}; + + +/// Reverse iterator for a row. Use as row::const_reverse_iterator. +class PQXX_LIBEXPORT const_reverse_row_iterator : private const_row_iterator +{ +public: + using super = const_row_iterator; + using iterator_type = const_row_iterator; + using iterator_type::difference_type; + using iterator_type::iterator_category; + using iterator_type::pointer; + using value_type = iterator_type::value_type; + using reference = iterator_type::reference; + + const_reverse_row_iterator() noexcept = default; + const_reverse_row_iterator(const_reverse_row_iterator const &) noexcept = + default; + const_reverse_row_iterator(const_reverse_row_iterator &&) noexcept = default; + + explicit const_reverse_row_iterator(super const &rhs) noexcept : + const_row_iterator{rhs} + { + super::operator--(); + } + + [[nodiscard]] PQXX_PURE iterator_type base() const noexcept; + + /** + * @name Dereferencing operators + */ + //@{ + using iterator_type::operator->; + using iterator_type::operator*; + //@} + + /** + * @name Manipulations + */ + //@{ + const_reverse_row_iterator & + operator=(const_reverse_row_iterator const &r) noexcept + { + iterator_type::operator=(r); + return *this; + } + const_reverse_row_iterator operator++() noexcept + { + iterator_type::operator--(); + return *this; + } + // TODO: noexcept. Breaks ABI. + const_reverse_row_iterator operator++(int); + const_reverse_row_iterator &operator--() noexcept + { + iterator_type::operator++(); + return *this; + } + const_reverse_row_iterator operator--(int); + // TODO: noexcept. Breaks ABI. + const_reverse_row_iterator &operator+=(difference_type i) noexcept + { + iterator_type::operator-=(i); + return *this; + } + const_reverse_row_iterator &operator-=(difference_type i) noexcept + { + iterator_type::operator+=(i); + return *this; + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] const_reverse_row_iterator + operator+(difference_type i) const noexcept + { + return const_reverse_row_iterator{base() - i}; + } + [[nodiscard]] const_reverse_row_iterator + operator-(difference_type i) noexcept + { + return const_reverse_row_iterator{base() + i}; + } + [[nodiscard]] difference_type + operator-(const_reverse_row_iterator const &rhs) const noexcept + { + return rhs.const_row_iterator::operator-(*this); + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] bool + operator==(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator==(rhs); + } + [[nodiscard]] bool + operator!=(const_reverse_row_iterator const &rhs) const noexcept + { + return !operator==(rhs); + } + + [[nodiscard]] constexpr bool + operator<(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator>(rhs); + } + [[nodiscard]] constexpr bool + operator<=(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator>=(rhs); + } + [[nodiscard]] constexpr bool + operator>(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator<(rhs); + } + [[nodiscard]] constexpr bool + operator>=(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator<=(rhs); + } + //@} +}; + + +const_row_iterator +const_row_iterator::operator+(difference_type o) const noexcept +{ + // TODO:: More direct route to home().columns()? + return { + row{home(), idx(), home().columns()}, + size_type(difference_type(col()) + o)}; +} + +inline const_row_iterator operator+( + const_row_iterator::difference_type o, const_row_iterator const &i) noexcept +{ + return i + o; +} + +inline const_row_iterator +const_row_iterator::operator-(difference_type o) const noexcept +{ + // TODO:: More direct route to home().columns()? + return { + row{home(), idx(), home().columns()}, + size_type(difference_type(col()) - o)}; +} + +inline const_row_iterator::difference_type +const_row_iterator::operator-(const_row_iterator const &i) const noexcept +{ + return difference_type(num() - i.num()); +} + + +template +inline void row::extract_value(Tuple &t) const +{ + using field_type = strip_t(t))>; + field const f{m_result, m_index, index}; + std::get(t) = from_string(f); +} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/separated_list b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/separated_list new file mode 100644 index 000000000..1bdf51c6a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/separated_list @@ -0,0 +1,6 @@ +/** Helper similar to Python's @c str.join(). + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/separated_list.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/separated_list.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/separated_list.hxx new file mode 100644 index 000000000..d4230ea08 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/separated_list.hxx @@ -0,0 +1,142 @@ +/* Helper similar to Python's `str.join()`. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/separated_list instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_SEPARATED_LIST +#define PQXX_H_SEPARATED_LIST + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/strconv.hxx" + +// C++20: Simplify using std::ranges::range. +// C++20: Optimise buffer allocation using random_access_range/iterator. +namespace pqxx +{ +/** + * @defgroup utility Utility functions + */ +//@{ + +/// Represent sequence of values as a string, joined by a given separator. +/** + * Use this to turn e.g. the numbers 1, 2, and 3 into a string "1, 2, 3". + * + * @param sep separator string (to be placed between items) + * @param begin beginning of items sequence + * @param end end of items sequence + * @param access functor defining how to dereference sequence elements + */ +template +[[nodiscard]] inline std::string +separated_list(std::string_view sep, ITER begin, ITER end, ACCESS access) +{ + if (end == begin) + return {}; + auto next{begin}; + ++next; + if (next == end) + return to_string(access(begin)); + + // From here on, we've got at least 2 elements -- meaning that we need sep. + using elt_type = strip_t; + using traits = string_traits; + + std::size_t budget{0}; + for (ITER cnt{begin}; cnt != end; ++cnt) + budget += traits::size_buffer(access(cnt)); + budget += + static_cast(std::distance(begin, end)) * std::size(sep); + + std::string result; + result.resize(budget); + + char *const data{result.data()}; + char *here{data}; + char *stop{data + budget}; + here = traits::into_buf(here, stop, access(begin)) - 1; + for (++begin; begin != end; ++begin) + { + here += sep.copy(here, std::size(sep)); + here = traits::into_buf(here, stop, access(begin)) - 1; + } + result.resize(static_cast(here - data)); + return result; +} + + +/// Render sequence as a string, using given separator between items. +template +[[nodiscard]] inline std::string +separated_list(std::string_view sep, ITER begin, ITER end) +{ + return separated_list(sep, begin, end, [](ITER i) { return *i; }); +} + + +/// Render items in a container as a string, using given separator. +template +[[nodiscard]] inline auto +separated_list(std::string_view sep, CONTAINER const &c) + /* + Always std::string; necessary because SFINAE doesn't work with the + contents of function bodies, so the check for iterability has to be in + the signature. + */ + -> typename std::enable_if< + (not std::is_void::value and + not std::is_void::value), + std::string>::type +{ + return separated_list(sep, std::begin(c), std::end(c)); +} + + +/// Render items in a tuple as a string, using given separator. +template< + typename TUPLE, std::size_t INDEX = 0, typename ACCESS, + typename std::enable_if< + (INDEX == std::tuple_size::value - 1), int>::type = 0> +[[nodiscard]] inline std::string separated_list( + std::string_view /* sep */, TUPLE const &t, ACCESS const &access) +{ + return to_string(access(&std::get(t))); +} + +template< + typename TUPLE, std::size_t INDEX = 0, typename ACCESS, + typename std::enable_if< + (INDEX < std::tuple_size::value - 1), int>::type = 0> +[[nodiscard]] inline std::string +separated_list(std::string_view sep, TUPLE const &t, ACCESS const &access) +{ + std::string out{to_string(access(&std::get(t)))}; + out.append(sep); + out.append(separated_list(sep, t, access)); + return out; +} + +template< + typename TUPLE, std::size_t INDEX = 0, + typename std::enable_if< + (INDEX <= std::tuple_size::value), int>::type = 0> +[[nodiscard]] inline std::string +separated_list(std::string_view sep, TUPLE const &t) +{ + // TODO: Optimise allocation. + return separated_list(sep, t, [](TUPLE const &tup) { return *tup; }); +} +//@} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/strconv b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/strconv new file mode 100644 index 000000000..aa2c40ed5 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/strconv @@ -0,0 +1,6 @@ +/** String conversion definitions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/strconv.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/strconv.hxx new file mode 100644 index 000000000..863711228 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/strconv.hxx @@ -0,0 +1,468 @@ +/* String conversion definitions. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/stringconv instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STRCONV +#define PQXX_H_STRCONV + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include +#include +#include + +#if __has_include() +# include +#endif + +#if defined(PQXX_HAVE_RANGES) && __has_include() +# include +#endif + +#include "pqxx/except.hxx" +#include "pqxx/util.hxx" +#include "pqxx/zview.hxx" + + +namespace pqxx::internal +{ +/// Attempt to demangle @c std::type_info::name() to something human-readable. +PQXX_LIBEXPORT std::string demangle_type_name(char const[]); +} // namespace pqxx::internal + + +namespace pqxx +{ +/** + * @defgroup stringconversion String conversion + * + * The PostgreSQL server accepts and represents data in string form. It has + * its own formats for various data types. The string conversions define how + * various C++ types translate to and from their respective PostgreSQL text + * representations. + * + * Each conversion is defined by a specialisations of @c string_traits. It + * gets complicated if you want top performance, but until you do, all you + * really need to care about when converting values between C++ in-memory + * representations such as @c int and the postgres string representations is + * the @c pqxx::to_string and @c pqxx::from_string functions. + * + * If you need to convert a type which is not supported out of the box, you'll + * need to define your own specialisations for these templates, similar to the + * ones defined here and in `pqxx/conversions.hxx`. Any conversion code which + * "sees" your specialisation will now support your conversion. In particular, + * you'll be able to read result fields into a variable of the new type. + * + * There is a macro to help you define conversions for individual enumeration + * types. The conversion will represent enumeration values as numeric strings. + */ +//@{ + +/// A human-readable name for a type, used in error messages and such. +/** Actually this may not always be very user-friendly. It uses + * @c std::type_info::name(). On gcc-like compilers we try to demangle its + * output. Visual Studio produces human-friendly names out of the box. + * + * This variable is not inline. Inlining it gives rise to "memory leak" + * warnings from asan, the address sanitizer, possibly from use of + * @c std::type_info::name. + */ +template +std::string const type_name{internal::demangle_type_name(typeid(TYPE).name())}; + + +/// Traits describing a type's "null value," if any. +/** Some C++ types have a special value or state which correspond directly to + * SQL's NULL. + * + * The @c nullness traits describe whether it exists, and whether a particular + * value is null. + */ +template struct nullness +{ + /// Does this type have a null value? + static bool has_null; + + /// Is this type always null? + static bool always_null; + + /// Is @c value a null? + static bool is_null(TYPE const &value); + + /// Return a null value. + /** Don't use this in generic code to compare a value and see whether it is + * null. Some types may have multiple null values which do not compare as + * equal, or may define a null value which is not equal to anything including + * itself, like in SQL. + */ + [[nodiscard]] static TYPE null(); +}; + + +/// Nullness traits describing a type which does not have a null value. +template struct no_null +{ + /// Does @c TYPE have a "built-in null value"? + /** For example, a pointer can equal @c nullptr, which makes a very natural + * representation of an SQL null value. For such types, the code sometimes + * needs to make special allowances. + * + * for most types, such as @c int or @c std::string, there is no built-in + * null. If you want to represent an SQL null value for such a type, you + * would have to wrap it in something that does have a null value. For + * example, you could use @c std::optional for "either an @c int or a + * null value." + */ + static constexpr bool has_null = false; + + /// Are all values of this type null? + /** There are a few special C++ types which are always null - mainly + * @c std::nullptr_t. + */ + static constexpr bool always_null = false; + + /// Does a given value correspond to an SQL null value? + /** Most C++ types, such as @c int or @c std::string, have no inherent null + * value. But some types such as C-style string pointers do have a natural + * equivalent to an SQL null. + */ + [[nodiscard]] static constexpr bool is_null(TYPE const &) noexcept + { + return false; + } +}; + + +/// Traits class for use in string conversions. +/** Specialize this template for a type for which you wish to add to_string + * and from_string support. + * + * String conversions are not meant to work for nulls. Check for null before + * converting a value of @c TYPE to a string, or vice versa. + */ +template struct string_traits +{ + /// Return a @c string_view representing value, plus terminating zero. + /** Produces a @c string_view containing the PostgreSQL string representation + * for @c value. + * + * Uses the space from @c begin to @c end as a buffer, if needed. The + * returned string may lie somewhere in that buffer, or it may be a + * compile-time constant, or it may be null if value was a null value. Even + * if the string is stored in the buffer, its @c begin() may or may not be + * the same as @c begin. + * + * The @c string_view is guaranteed to be valid as long as the buffer from + * @c begin to @c end remains accessible and unmodified. + * + * @throws pqxx::conversion_overrun if the provided buffer space may not be + * enough. For maximum performance, this is a conservative estimate. It may + * complain about a buffer which is actually large enough for your value, if + * an exact check gets too expensive. + */ + [[nodiscard]] static inline zview + to_buf(char *begin, char *end, TYPE const &value); + + /// Write value's string representation into buffer at @c begin. + /** Assumes that value is non-null. + * + * Writes value's string representation into the buffer, starting exactly at + * @c begin, and ensuring a trailing zero. Returns the address just beyond + * the trailing zero, so the caller could use it as the @c begin for another + * call to @c into_buf writing a next value. + */ + static inline char *into_buf(char *begin, char *end, TYPE const &value); + + /// Parse a string representation of a @c TYPE value. + /** Throws @c conversion_error if @c value does not meet the expected format + * for a value of this type. + */ + [[nodiscard]] static inline TYPE from_string(std::string_view text); + + // C++20: Can we make these all constexpr? + /// Estimate how much buffer space is needed to represent value. + /** The estimate may be a little pessimistic, if it saves time. + * + * The estimate includes the terminating zero. + */ + [[nodiscard]] static inline std::size_t + size_buffer(TYPE const &value) noexcept; +}; + + +/// Nullness: Enums do not have an inherent null value. +template +struct nullness>> : no_null +{}; +} // namespace pqxx + + +namespace pqxx::internal +{ +/// Helper class for defining enum conversions. +/** The conversion will convert enum values to numeric strings, and vice versa. + * + * To define a string conversion for an enum type, derive a @c string_traits + * specialisation for the enum from this struct. + * + * There's usually an easier way though: the @c PQXX_DECLARE_ENUM_CONVERSION + * macro. Use @c enum_traits manually only if you need to customise your + * traits type in more detail. + */ +template struct enum_traits +{ + using impl_type = std::underlying_type_t; + using impl_traits = string_traits; + + [[nodiscard]] static constexpr zview + to_buf(char *begin, char *end, ENUM const &value) + { + return impl_traits::to_buf(begin, end, to_underlying(value)); + } + + static constexpr char *into_buf(char *begin, char *end, ENUM const &value) + { + return impl_traits::into_buf(begin, end, to_underlying(value)); + } + + [[nodiscard]] static ENUM from_string(std::string_view text) + { + return static_cast(impl_traits::from_string(text)); + } + + [[nodiscard]] static std::size_t size_buffer(ENUM const &value) noexcept + { + return impl_traits::size_buffer(to_underlying(value)); + } + +private: + // C++23: Replace with std::to_underlying. + static constexpr impl_type to_underlying(ENUM const &value) noexcept + { + return static_cast(value); + } +}; +} // namespace pqxx::internal + + +/// Macro: Define a string conversion for an enum type. +/** This specialises the @c pqxx::string_traits template, so use it in the + * @c ::pqxx namespace. + * + * For example: + * + * #include + * #include + * enum X { xa, xb }; + * namespace pqxx { PQXX_DECLARE_ENUM_CONVERSION(x); } + * int main() { std::cout << pqxx::to_string(xa) << std::endl; } + */ +#define PQXX_DECLARE_ENUM_CONVERSION(ENUM) \ + template<> struct string_traits : pqxx::internal::enum_traits \ + {}; \ + template<> inline std::string const type_name { #ENUM } + + +namespace pqxx +{ +/// Parse a value in postgres' text format as a TYPE. +/** If the form of the value found in the string does not match the expected + * type, e.g. if a decimal point is found when converting to an integer type, + * the conversion fails. Overflows (e.g. converting "9999999999" to a 16-bit + * C++ type) are also treated as errors. If in some cases this behaviour + * should be inappropriate, convert to something bigger such as @c long @c int + * first and then truncate the resulting value. + * + * Only the simplest possible conversions are supported. Fancy features like + * hexadecimal or octal, spurious signs, or exponent notation won't work. + * Whitespace is not stripped away. Only the kinds of strings that come out of + * PostgreSQL and out of to_string() can be converted. + */ +template +[[nodiscard]] inline TYPE from_string(std::string_view text) +{ + return string_traits::from_string(text); +} + + +/// "Convert" a std::string_view to a std::string_view. +/** Just returns its input. + * + * @warning Of course the result is only valid for as long as the original + * string remains valid! Never access the string referenced by the return + * value after the original has been destroyed. + */ +template<> +[[nodiscard]] inline std::string_view from_string(std::string_view text) +{ + return text; +} + + +/// Attempt to convert postgres-generated string to given built-in object. +/** This is like the single-argument form of the function, except instead of + * returning the value, it sets @c value. + * + * You may find this more convenient in that it infers the type you want from + * the argument you pass. But there are disadvantages: it requires an + * assignment operator, and it may be less efficient. + */ +template inline void from_string(std::string_view text, T &value) +{ + value = from_string(text); +} + + +/// Convert a value to a readable string that PostgreSQL will understand. +/** The conversion does no special formatting, and ignores any locale settings. + * The resulting string will be human-readable and in a format suitable for use + * in SQL queries. It won't have niceties such as "thousands separators" + * though. + */ +template inline std::string to_string(TYPE const &value); + + +/// Convert multiple values to strings inside a single buffer. +/** There must be enough room for all values, or this will throw + * @c conversion_overrun. You can obtain a conservative estimate of the buffer + * space required by calling @c size_buffer() on the values. + * + * The @c std::string_view results may point into the buffer, so don't assume + * that they will remain valid after you destruct or move the buffer. + */ +template +[[nodiscard]] inline std::vector +to_buf(char *here, char const *end, TYPE... value) +{ + return {[&here, end](auto v) { + auto begin = here; + here = string_traits::into_buf(begin, end, v); + // Exclude the trailing zero out of the string_view. + auto len{static_cast(here - begin) - 1}; + return std::string_view{begin, len}; + }(value)...}; +} + +/// Convert a value to a readable string that PostgreSQL will understand. +/** This variant of to_string can sometimes save a bit of time in loops, by + * re-using a std::string for multiple conversions. + */ +template +inline void into_string(TYPE const &value, std::string &out); + + +/// Is @c value null? +template +[[nodiscard]] inline constexpr bool is_null(TYPE const &value) noexcept +{ + return nullness>::is_null(value); +} + + +/// Estimate how much buffer space is needed to represent values as a string. +/** The estimate may be a little pessimistic, if it saves time. It also + * includes room for a terminating zero after each value. + */ +template +[[nodiscard]] inline std::size_t size_buffer(TYPE const &...value) noexcept +{ + return (string_traits>::size_buffer(value) + ...); +} + + +/// Does this type translate to an SQL array? +/** Specialisations may override this to be true for container types. + * + * This may not always be a black-and-white choice. For instance, a + * @c std::string is a container, but normally it translates to an SQL string, + * not an SQL array. + */ +template inline constexpr bool is_sql_array{false}; + + +/// Can we use this type in arrays and composite types without quoting them? +/** Define this as @c true only if values of @c TYPE can never contain any + * special characters that might need escaping or confuse the parsing of array + * or composite * types, such as commas, quotes, parentheses, braces, newlines, + * and so on. + * + * When converting a value of such a type to a string in an array or a field in + * a composite type, we do not need to add quotes, nor escape any special + * characters. + * + * This is just an optimisation, so it defaults to @c false to err on the side + * of slow correctness. + */ +template inline constexpr bool is_unquoted_safe{false}; + + +/// Element separator between SQL array elements of this type. +template inline constexpr char array_separator{','}; + + +/// What's the preferred format for passing non-null parameters of this type? +/** This affects how we pass parameters of @c TYPE when calling parameterised + * statements or prepared statements. + * + * Generally we pass parameters in text format, but binary strings are the + * exception. We also pass nulls in binary format, so this function need not + * handle null values. + */ +template inline constexpr format param_format(TYPE const &) +{ + return format::text; +} + + +/// Implement @c string_traits::to_buf by calling @c into_buf. +/** When you specialise @c string_traits for a new type, most of the time its + * @c to_buf implementation has no special optimisation tricks and just writes + * its text into the buffer it receives from the caller, starting at the + * beginning. + * + * In that common situation, you can implement @c to_buf as just a call to + * @c generic_to_buf. It will call @c into_buf and return the right result for + * @c to_buf. + */ +template +inline zview generic_to_buf(char *begin, char *end, TYPE const &value) +{ + using traits = string_traits; + // The trailing zero does not count towards the zview's size, so subtract 1 + // from the result we get from into_buf(). + if (is_null(value)) + return {}; + else + return {begin, traits::into_buf(begin, end, value) - begin - 1}; +} + + +#if defined(PQXX_HAVE_CONCEPTS) +/// Concept: Binary string, akin to @c std::string for binary data. +/** Any type that satisfies this concept can represent an SQL BYTEA value. + * + * A @c binary has a @c begin(), @c end(), @c size(), and @data(). Each byte + * is a @c std::byte, and they must all be laid out contiguously in memory so + * we can reference them by a pointer. + */ +template +concept binary = std::ranges::contiguous_range and + std::is_same_v>, std::byte>; +#endif +//@} +} // namespace pqxx + + +#include "pqxx/internal/conversions.hxx" +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_from b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_from new file mode 100644 index 000000000..972762443 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_from @@ -0,0 +1,8 @@ +/** pqxx::stream_from class. + * + * pqxx::stream_from enables optimized batch reads from a database table. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/stream_from.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_from.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_from.hxx new file mode 100644 index 000000000..ff4a93d2e --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_from.hxx @@ -0,0 +1,361 @@ +/* Definition of the pqxx::stream_from class. + * + * pqxx::stream_from enables optimized batch reads from a database table. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/stream_from instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STREAM_FROM +#define PQXX_H_STREAM_FROM + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/connection.hxx" +#include "pqxx/except.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/encoding_group.hxx" +#include "pqxx/internal/stream_iterator.hxx" +#include "pqxx/separated_list.hxx" +#include "pqxx/transaction_focus.hxx" + + +namespace pqxx +{ +class transaction_base; + + +/// Pass this to a `stream_from` constructor to stream table contents. +/** @deprecated Use @ref stream_from::table() instead. + */ +constexpr from_table_t from_table; +/// Pass this to a `stream_from` constructor to stream query results. +/** @deprecated Use stream_from::query() instead. + */ +constexpr from_query_t from_query; + + +/// Stream data from the database. +/** For larger data sets, retrieving data this way is likely to be faster than + * executing a query and then iterating and converting the rows fields. You + * will also be able to start processing before all of the data has come in. + * + * There are also downsides. Not all kinds of query will work in a stream. + * But straightforward `SELECT` and `UPDATE ... RETURNING` queries should work. + * This function makes use of @ref pqxx::stream_from, which in turn uses + * PostgreSQL's `COPY` command, so see the documentation for those to get the + * full details. + * + * There are other downsides. If there stream encounters an error, it may + * leave the entire connection in an unusable state, so you'll have to give the + * whole thing up. Finally, opening a stream puts the connection in a special + * state, so you won't be able to do many other things with the connection or + * the transaction while the stream is open. + * + * There are two ways of starting a stream: you stream either all rows in a + * table (using one of the factories, `table()` or `raw_table()`), or the + * results of a query (using the `query()` factory). + * + * Usually you'll want the `stream` convenience wrapper in + * @ref transaction_base, * so you don't need to deal with this class directly. + * + * @warning While a stream is active, you cannot execute queries, open a + * pipeline, etc. on the same transaction. A transaction can have at most one + * object of a type derived from @ref pqxx::transaction_focus active on it at a + * time. + */ +class PQXX_LIBEXPORT stream_from : transaction_focus +{ +public: + using raw_line = + std::pair>, std::size_t>; + + /// Factory: Execute query, and stream the results. + /** The query can be a SELECT query or a VALUES query; or it can be an + * UPDATE, INSERT, or DELETE with a RETURNING clause. + * + * The query is executed as part of a COPY statement, so there are additional + * restrictions on what kind of query you can use here. See the PostgreSQL + * documentation for the COPY command: + * + * https://www.postgresql.org/docs/current/sql-copy.html + */ + static stream_from query(transaction_base &tx, std::string_view q) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return {tx, from_query, q}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /** + * @name Streaming data from tables + * + * You can use `stream_from` to read a table's contents. This is a quick + * and easy way to read a table, but it comes with limitations. It cannot + * stream from a view, only from a table. It does not support conditions. + * And there are no guarantees about ordering. If you need any of those + * things, consider streaming from a query instead. + */ + //@{ + + /// Factory: Stream data from a pre-quoted table and columns. + /** Use this factory if you need to create multiple streams using the same + * table path and/or columns list, and you want to save a bit of work on + * composing the internal SQL statement for starting the stream. It lets you + * compose the string representations for the table path and the columns + * list, so you can compute these once and then re-use them later. + * + * @param tx The transaction within which the stream will operate. + * @param path Name or path for the table upon which the stream will + * operate. If any part of the table path may contain special + * characters or be case-sensitive, quote the path using + * pqxx::connection::quote_table(). + * @param columns Columns which the stream will read. They should be + * comma-separated and, if needed, quoted. You can produce the string + * using pqxx::connection::quote_columns(). If you omit this argument, + * the stream will read all columns in the table, in schema order. + */ + static stream_from raw_table( + transaction_base &tx, std::string_view path, + std::string_view columns = ""sv); + + /// Factory: Stream data from a given table. + /** This is the convenient way to stream from a table. + */ + static stream_from table( + transaction_base &tx, table_path path, + std::initializer_list columns = {}); + //@} + + /// Execute query, and stream over the results. + /** @deprecated Use factory function @ref query instead. + */ + [[deprecated("Use query() factory instead.")]] stream_from( + transaction_base &, from_query_t, std::string_view query); + + /// Stream all rows in table, all columns. + /** @deprecated Use factories @ref table or @ref raw_table instead. + */ + [[deprecated("Use table() or raw_table() factory instead.")]] stream_from( + transaction_base &, from_table_t, std::string_view table); + + /// Stream given columns from all rows in table. + /** @deprecated Use factories @ref table or @ref raw_table instead. + */ + template + [[deprecated("Use table() or raw_table() factory instead.")]] stream_from( + transaction_base &, from_table_t, std::string_view table, + Iter columns_begin, Iter columns_end); + + /// Stream given columns from all rows in table. + /** @deprecated Use factory function @ref query instead. + */ + template + [[deprecated("Use table() or raw_table() factory instead.")]] stream_from( + transaction_base &tx, from_table_t, std::string_view table, + Columns const &columns); + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// @deprecated Use factories @ref table or @ref raw_table instead. + [[deprecated("Use the from_table_t overload instead.")]] stream_from( + transaction_base &tx, std::string_view table) : + stream_from{tx, from_table, table} + {} +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /// @deprecated Use factories @ref table or @ref raw_table instead. + template + [[deprecated("Use the from_table_t overload instead.")]] stream_from( + transaction_base &tx, std::string_view table, Columns const &columns) : + stream_from{tx, from_table, table, columns} + {} + + /// @deprecated Use factories @ref table or @ref raw_table instead. + template + [[deprecated("Use the from_table_t overload instead.")]] stream_from( + transaction_base &, std::string_view table, Iter columns_begin, + Iter columns_end); + + ~stream_from() noexcept; + + /// May this stream still produce more data? + [[nodiscard]] constexpr operator bool() const noexcept + { + return not m_finished; + } + /// Has this stream produced all the data it is going to produce? + [[nodiscard]] constexpr bool operator!() const noexcept + { + return m_finished; + } + + /// Finish this stream. Call this before continuing to use the connection. + /** Consumes all remaining lines, and closes the stream. + * + * This may take a while if you're abandoning the stream before it's done, so + * skip it in error scenarios where you're not planning to use the connection + * again afterwards. + */ + void complete(); + + /// Read one row into a tuple. + /** Converts the row's fields into the fields making up the tuple. + * + * For a column which can contain nulls, be sure to give the corresponding + * tuple field a type which can be null. For example, to read a field as + * `int` when it may contain nulls, read it as `std::optional`. + * Using `std::shared_ptr` or `std::unique_ptr` will also work. + */ + template stream_from &operator>>(Tuple &); + + /// Doing this with a `std::variant` is going to be horrifically borked. + template + stream_from &operator>>(std::variant &) = delete; + + /// Iterate over this stream. Supports range-based "for" loops. + /** Produces an input iterator over the stream. + * + * Do not call this yourself. Use it like "for (auto data : stream.iter())". + */ + template [[nodiscard]] auto iter() & + { + return pqxx::internal::stream_input_iteration{*this}; + } + + /// Read a row. Return fields as views, valid until you read the next row. + /** Returns `nullptr` when there are no more rows to read. Do not attempt + * to read any further rows after that. + * + * Do not access the vector, or the storage referenced by the views, after + * closing or completing the stream, or after attempting to read a next row. + * + * A @ref pqxx::zview is like a `std::string_view`, but with the added + * guarantee that if its data pointer is non-null, the string is followed by + * a terminating zero (which falls just outside the view itself). + * + * If any of the views' data pointer is null, that means that the + * corresponding SQL field is null. + * + * @warning The return type may change in the future, to support C++20 + * coroutine-based usage. + */ + std::vector const *read_row() &; + + /// Read a raw line of text from the COPY command. + /** @warning Do not use this unless you really know what you're doing. */ + raw_line get_raw_line(); + +private: + // TODO: Clean up this signature once we cull the deprecated constructors. + /// @deprecated + stream_from( + transaction_base &tx, std::string_view table, std::string_view columns, + from_table_t); + + // TODO: Clean up this signature once we cull the deprecated constructors. + /// @deprecated + stream_from( + transaction_base &, std::string_view unquoted_table, + std::string_view columns, from_table_t, int); + + template + void extract_fields(Tuple &t, std::index_sequence) const + { + (extract_value(t), ...); + } + + pqxx::internal::glyph_scanner_func *m_glyph_scanner; + + /// Current row's fields' text, combined into one reusable string. + std::string m_row; + + /// The current row's fields. + std::vector m_fields; + + bool m_finished = false; + + void close(); + + template + void extract_value(Tuple &) const; + + /// Read a line of COPY data, write `m_row` and `m_fields`. + void parse_line(); +}; + + +template +inline stream_from::stream_from( + transaction_base &tx, from_table_t, std::string_view table_name, + Columns const &columns) : + stream_from{ + tx, from_table, table_name, std::begin(columns), std::end(columns)} +{} + + +template +inline stream_from::stream_from( + transaction_base &tx, from_table_t, std::string_view table, + Iter columns_begin, Iter columns_end) : + stream_from{ + tx, table, separated_list(",", columns_begin, columns_end), + from_table, 1} +{} + + +template inline stream_from &stream_from::operator>>(Tuple &t) +{ + if (m_finished) + return *this; + static constexpr auto tup_size{std::tuple_size_v}; + m_fields.reserve(tup_size); + parse_line(); + if (m_finished) + return *this; + + if (std::size(m_fields) != tup_size) + throw usage_error{internal::concat( + "Tried to extract ", tup_size, " field(s) from a stream of ", + std::size(m_fields), ".")}; + + extract_fields(t, std::make_index_sequence{}); + return *this; +} + + +template +inline void stream_from::extract_value(Tuple &t) const +{ + using field_type = strip_t(t))>; + using nullity = nullness; + assert(index < std::size(m_fields)); + if constexpr (nullity::always_null) + { + if (std::data(m_fields[index]) != nullptr) + throw conversion_error{"Streaming non-null value into null field."}; + } + else if (std::data(m_fields[index]) == nullptr) + { + if constexpr (nullity::has_null) + std::get(t) = nullity::null(); + else + internal::throw_null_conversion(type_name); + } + else + { + // Don't ever try to convert a non-null value to nullptr_t! + std::get(t) = from_string(m_fields[index]); + } +} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_to b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_to new file mode 100644 index 000000000..8760cf1f4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_to @@ -0,0 +1,8 @@ +/** pqxx::stream_to class. + * + * pqxx::stream_to enables optimized batch updates to a database table. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/stream_to.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_to.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_to.hxx new file mode 100644 index 000000000..2a49d8f85 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/stream_to.hxx @@ -0,0 +1,455 @@ +/* Definition of the pqxx::stream_to class. + * + * pqxx::stream_to enables optimized batch updates to a database table. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/stream_to.hxx instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STREAM_TO +#define PQXX_H_STREAM_TO + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/separated_list.hxx" +#include "pqxx/transaction_base.hxx" + + +namespace pqxx +{ +/// Efficiently write data directly to a database table. +/** If you wish to insert rows of data into a table, you can compose INSERT + * statements and execute them. But it's slow and tedious, and you need to + * worry about quoting and escaping the data. + * + * If you're just inserting a single row, it probably won't matter much. You + * can use prepared or parameterised statements to take care of the escaping + * for you. But if you're inserting large numbers of rows you will want + * something better. + * + * Inserting rows one by one using INSERT statements involves a lot of + * pointless overhead, especially when you are working with a remote database + * server over the network. You may end up sending each row over the network + * as a separate query, and waiting for a reply. Do it "in bulk" using + * `stream_to`, and you may find that it goes many times faster. Sometimes + * you gain orders of magnitude in speed. + * + * Here's how it works: you create a `stream_to` stream to start writing to + * your table. You will probably want to specify the columns. Then, you + * feed your data into the stream one row at a time. And finally, you call the + * stream's @ref complete function to tell it to finalise the operation, wait + * for completion, and check for errors. + * + * (You _must_ complete the stream before committing or aborting the + * transaction. The connection is in a special state while the stream is + * active, where it can't process commands, and can't commit or abort a + * transaction.) + * + * So how do you feed a row of data into the stream? There's several ways, but + * the preferred one is to call its @ref write_values. Pass the field values + * as arguments. Doesn't matter what type they are, as long as libpqxx knows + * how to convert them to PostgreSQL's text format: `int`, `std::string` or + * `std:string_view`, `float` and `double`, `bool`... lots of basic types + * are supported. If some of the values are null, feel free to use + * `std::optional`, `std::shared_ptr`, or `std::unique_ptr`. + * + * The arguments' types don't even have to match the fields' SQL types. If you + * want to insert an `int` into a `DECIMAL` column, that's your choice -- it + * will produce a `DECIMAL` value which happens to be integral. Insert a + * `float` into a `VARCHAR` column? That's fine, you'll get a string whose + * contents happen to read like a number. And so on. You can even insert + * different types of value in the same column on different rows. If you have + * a code path where a particular field is always null, just insert `nullptr`. + * + * There is another way to insert rows: the `<<` ("shift-left") operator. + * It's not as fast and it doesn't support variable arguments: each row must be + * either a `std::tuple` or something iterable, such as a `std::vector`, or + * anything else with a `begin()` and `end()`. + * + * @warning While a stream is active, you cannot execute queries, open a + * pipeline, etc. on the same transaction. A transaction can have at most one + * object of a type derived from @ref pqxx::transaction_focus active on it at a + * time. + */ +class PQXX_LIBEXPORT stream_to : transaction_focus +{ +public: + /// Stream data to a pre-quoted table and columns. + /** This factory can be useful when it's not convenient to provide the + * columns list in the form of a `std::initializer_list`, or when the list + * of columns is simply not known at compile time. + * + * Also use this if you need to create multiple streams using the same table + * path and/or columns list, and you want to save a bit of work on composing + * the internal SQL statement for starting the stream. It lets you compose + * the string representations for the table path and the columns list, so you + * can compute these once and then re-use them later. + * + * @param tx The transaction within which the stream will operate. + * @param path Name or path for the table upon which the stream will + * operate. If any part of the table path may contain special + * characters or be case-sensitive, quote the path using + * pqxx::connection::quote_table(). + * @param columns Columns to which the stream will write. They should be + * comma-separated and, if needed, quoted. You can produce the string + * using pqxx::connection::quote_columns(). If you omit this argument, + * the stream will write all columns in the table, in schema order. + */ + static stream_to raw_table( + transaction_base &tx, std::string_view path, std::string_view columns = "") + { + return {tx, path, columns}; + } + + /// Create a `stream_to` writing to a named table and columns. + /** Use this to stream data to a table, where the list of columns is known at + * compile time. + * + * @param tx The transaction within which the stream will operate. + * @param path A @ref table_path designating the target table. + * @param columns Optionally, the columns to which the stream should write. + * If you do not pass this, the stream will write to all columns in the + * table, in schema order. + */ + static stream_to table( + transaction_base &tx, table_path path, + std::initializer_list columns = {}) + { + auto const &conn{tx.conn()}; + return raw_table(tx, conn.quote_table(path), conn.quote_columns(columns)); + } + +#if defined(PQXX_HAVE_CONCEPTS) + /// Create a `stream_to` writing to a named table and columns. + /** Use this version to stream data to a table, when the list of columns is + * not known at compile time. + * + * @param tx The transaction within which the stream will operate. + * @param path A @ref table_path designating the target table. + * @param columns The columns to which the stream should write. + */ + template + static stream_to + table(transaction_base &tx, table_path path, COLUMNS const &columns) + { + auto const &conn{tx.conn()}; + return stream_to::raw_table( + tx, conn.quote_table(path), tx.conn().quote_columns(columns)); + } + + /// Create a `stream_to` writing to a named table and columns. + /** Use this version to stream data to a table, when the list of columns is + * not known at compile time. + * + * @param tx The transaction within which the stream will operate. + * @param path A @ref table_path designating the target table. + * @param columns The columns to which the stream should write. + */ + template + static stream_to + table(transaction_base &tx, std::string_view path, COLUMNS const &columns) + { + return stream_to::raw_table(tx, path, tx.conn().quote_columns(columns)); + } +#endif // PQXX_HAVE_CONCEPTS + + /// Create a stream, without specifying columns. + /** @deprecated Use @ref table or @ref raw_table as a factory. + * + * Fields will be inserted in whatever order the columns have in the + * database. + * + * You'll probably want to specify the columns, so that the mapping between + * your data fields and the table is explicit in your code, and not hidden + * in an "implicit contract" between your code and your schema. + */ + [[deprecated("Use table() or raw_table() factory.")]] stream_to( + transaction_base &tx, std::string_view table_name) : + stream_to{tx, table_name, ""sv} + {} + + /// Create a stream, specifying column names as a container of strings. + /** @deprecated Use @ref table or @ref raw_table as a factory. + */ + template + [[deprecated("Use table() or raw_table() factory.")]] stream_to( + transaction_base &, std::string_view table_name, Columns const &columns); + + /// Create a stream, specifying column names as a sequence of strings. + /** @deprecated Use @ref table or @ref raw_table as a factory. + */ + template + [[deprecated("Use table() or raw_table() factory.")]] stream_to( + transaction_base &, std::string_view table_name, Iter columns_begin, + Iter columns_end); + + ~stream_to() noexcept; + + /// Does this stream still need to @ref complete()? + [[nodiscard]] constexpr operator bool() const noexcept + { + return not m_finished; + } + /// Has this stream been through its concluding @c complete()? + [[nodiscard]] constexpr bool operator!() const noexcept + { + return m_finished; + } + + /// Complete the operation, and check for errors. + /** Always call this to close the stream in an orderly fashion, even after + * an error. (In the case of an error, abort the transaction afterwards.) + * + * The only circumstance where it's safe to skip this is after an error, if + * you're discarding the entire connection. + */ + void complete(); + + /// Insert a row of data. + /** Returns a reference to the stream, so you can chain the calls. + * + * The @c row can be a tuple, or any type that can be iterated. Each + * item becomes a field in the row, in the same order as the columns you + * specified when creating the stream. + * + * If you don't already happen to have your fields in the form of a tuple or + * container, prefer @c write_values. It's faster and more convenient. + */ + template stream_to &operator<<(Row const &row) + { + write_row(row); + return *this; + } + + /// Stream a `stream_from` straight into a `stream_to`. + /** This can be useful when copying between different databases. If the + * source and the destination are on the same database, you'll get better + * performance doing it all in a regular query. + */ + stream_to &operator<<(stream_from &); + + /// Insert a row of data, given in the form of a @c std::tuple or container. + /** The @c row can be a tuple, or any type that can be iterated. Each + * item becomes a field in the row, in the same order as the columns you + * specified when creating the stream. + * + * The preferred way to insert a row is @c write_values. + */ + template void write_row(Row const &row) + { + fill_buffer(row); + write_buffer(); + } + + /// Insert values as a row. + /** This is the recommended way of inserting data. Pass your field values, + * of any convertible type. + */ + template void write_values(Ts const &...fields) + { + fill_buffer(fields...); + write_buffer(); + } + +private: + /// Stream a pre-quoted table name and columns list. + stream_to( + transaction_base &tx, std::string_view path, std::string_view columns); + + bool m_finished = false; + + /// Reusable buffer for a row. Saves doing an allocation for each row. + std::string m_buffer; + + /// Reusable buffer for converting/escaping a field. + std::string m_field_buf; + + /// Glyph scanner, for parsing the client encoding. + internal::glyph_scanner_func *m_scanner; + + /// Write a row of raw text-format data into the destination table. + void write_raw_line(std::string_view); + + /// Write a row of data from @c m_buffer into the destination table. + /** Resets the buffer for the next row. + */ + void write_buffer(); + + /// COPY encoding for a null field, plus subsequent separator. + static constexpr std::string_view null_field{"\\N\t"}; + + /// Estimate buffer space needed for a field which is always null. + template + static std::enable_if_t::always_null, std::size_t> + estimate_buffer(T const &) + { + return std::size(null_field); + } + + /// Estimate buffer space needed for field f. + /** The estimate is not very precise. We don't actually know how much space + * we'll need once the escaping comes in. + */ + template + static std::enable_if_t::always_null, std::size_t> + estimate_buffer(T const &field) + { + return is_null(field) ? std::size(null_field) : size_buffer(field); + } + + /// Append escaped version of @c data to @c m_buffer, plus a tab. + void escape_field_to_buffer(std::string_view data); + + /// Append string representation for @c f to @c m_buffer. + /** This is for the general case, where the field may contain a value. + * + * Also appends a tab. The tab is meant to be a separator, not a terminator, + * so if you write any fields at all, you'll end up with one tab too many + * at the end of the buffer. + */ + template + std::enable_if_t::always_null> + append_to_buffer(Field const &f) + { + // We append each field, terminated by a tab. That will leave us with + // one tab too many, assuming we write any fields at all; we remove that + // at the end. + if (is_null(f)) + { + // Easy. Append null and tab in one go. + m_buffer.append(null_field); + } + else + { + // Convert f into m_buffer. + + using traits = string_traits; + auto const budget{estimate_buffer(f)}; + auto const offset{std::size(m_buffer)}; + + if constexpr (std::is_arithmetic_v) + { + // Specially optimised for "safe" types, which never need any + // escaping. Convert straight into m_buffer. + + // The budget we get from size_buffer() includes room for the trailing + // zero, which we must remove. But we're also inserting tabs between + // fields, so we re-purpose the extra byte for that. + auto const total{offset + budget}; + m_buffer.resize(total); + auto const data{m_buffer.data()}; + char *const end{traits::into_buf(data + offset, data + total, f)}; + *(end - 1) = '\t'; + // Shrink to fit. Keep the tab though. + m_buffer.resize(static_cast(end - data)); + } + else if constexpr ( + std::is_same_v or + std::is_same_v or + std::is_same_v) + { + // This string may need escaping. + m_field_buf.resize(budget); + escape_field_to_buffer(f); + } + else + { + // This field needs to be converted to a string, and after that, + // escaped as well. + m_field_buf.resize(budget); + auto const data{m_field_buf.data()}; + escape_field_to_buffer( + traits::to_buf(data, data + std::size(m_field_buf), f)); + } + } + } + + /// Append string representation for a null field to @c m_buffer. + /** This special case is for types which are always null. + * + * Also appends a tab. The tab is meant to be a separator, not a terminator, + * so if you write any fields at all, you'll end up with one tab too many + * at the end of the buffer. + */ + template + std::enable_if_t::always_null> + append_to_buffer(Field const &) + { + m_buffer.append(null_field); + } + + /// Write raw COPY line into @c m_buffer, based on a container of fields. + template + std::enable_if_t> + fill_buffer(Container const &c) + { + // To avoid unnecessary allocations and deallocations, we run through c + // twice: once to determine how much buffer space we may need, and once to + // actually write it into the buffer. + std::size_t budget{0}; + for (auto const &f : c) budget += estimate_buffer(f); + m_buffer.reserve(budget); + for (auto const &f : c) append_to_buffer(f); + } + + /// Estimate how many buffer bytes we need to write tuple. + template + static std::size_t + budget_tuple(Tuple const &t, std::index_sequence) + { + return (estimate_buffer(std::get(t)) + ...); + } + + /// Write tuple of fields to @c m_buffer. + template + void append_tuple(Tuple const &t, std::index_sequence) + { + (append_to_buffer(std::get(t)), ...); + } + + /// Write raw COPY line into @c m_buffer, based on a tuple of fields. + template void fill_buffer(std::tuple const &t) + { + using indexes = std::make_index_sequence; + + m_buffer.reserve(budget_tuple(t, indexes{})); + append_tuple(t, indexes{}); + } + + /// Write raw COPY line into @c m_buffer, based on varargs fields. + template void fill_buffer(const Ts &...fields) + { + (..., append_to_buffer(fields)); + } + + constexpr static std::string_view s_classname{"stream_to"}; +}; + + +template +inline stream_to::stream_to( + transaction_base &tx, std::string_view table_name, Columns const &columns) : + stream_to{tx, table_name, std::begin(columns), std::end(columns)} +{} + + +template +inline stream_to::stream_to( + transaction_base &tx, std::string_view table_name, Iter columns_begin, + Iter columns_end) : + stream_to{ + tx, + tx.quote_name( + table_name, + separated_list(",", columns_begin, columns_end, [&tx](auto col) { + return tx.quote_name(*col); + }))} +{} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/subtransaction b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/subtransaction new file mode 100644 index 000000000..e0d154903 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/subtransaction @@ -0,0 +1,8 @@ +/** pqxx::subtransaction class. + * + * pqxx::subtransaction is a nested transaction, i.e. one inside a transaction. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/subtransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/subtransaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/subtransaction.hxx new file mode 100644 index 000000000..e66b7a7a8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/subtransaction.hxx @@ -0,0 +1,96 @@ +/* Definition of the pqxx::subtransaction class. + * + * pqxx::subtransaction is a nested transaction, i.e. one within a transaction. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/subtransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_SUBTRANSACTION +#define PQXX_H_SUBTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/dbtransaction.hxx" + +namespace pqxx +{ +/** + * @ingroup transactions + */ +/// "Transaction" nested within another transaction +/** A subtransaction can be executed inside a backend transaction, or inside + * another subtransaction. This can be useful when, for example, statements in + * a transaction may harmlessly fail and you don't want them to abort the + * entire transaction. Here's an example of how a temporary table may be + * dropped before re-creating it, without failing if the table did not exist: + * + * ```cxx + * void do_job(connection &C) + * { + * string const temptable = "fleetingtable"; + * + * work W(C, "do_job"); + * do_firstpart(W); + * + * // Attempt to delete our temporary table if it already existed. + * try + * { + * subtransaction S(W, "droptemp"); + * S.exec0("DROP TABLE " + temptable); + * S.commit(); + * } + * catch (undefined_table const &) + * { + * // Table did not exist. Which is what we were hoping to achieve anyway. + * // Carry on without regrets. + * } + * + * // S may have gone into a failed state and been destroyed, but the + * // upper-level transaction W is still fine. We can continue to use it. + * W.exec0("CREATE TEMP TABLE " + temptable + "(bar integer, splat + * varchar)"); + * + * do_lastpart(W); + * } + * ``` + * + * (This is just an example. If you really wanted to do drop a table without + * an error if it doesn't exist, you'd use DROP TABLE IF EXISTS.) + * + * There are no isolation levels inside a transaction. They are not needed + * because all actions within the same backend transaction are always performed + * sequentially anyway. + * + * @warning While the subtransaction is "live," you cannot execute queries or + * open streams etc. on its parent transaction. A transaction can have at most + * one object of a type derived from @ref pqxx::transaction_focus active on it + * at a time. + */ +class PQXX_LIBEXPORT subtransaction : public transaction_focus, + public dbtransaction +{ +public: + /// Nest a subtransaction nested in another transaction. + explicit subtransaction(dbtransaction &t, std::string_view tname = ""sv); + + /// Nest a subtransaction in another subtransaction. + explicit subtransaction(subtransaction &t, std::string_view name = ""sv); + + virtual ~subtransaction() noexcept override; + +private: + std::string quoted_name() const + { + return quote_name(transaction_focus::name()); + } + virtual void do_commit() override; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/time b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/time new file mode 100644 index 000000000..85df05744 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/time @@ -0,0 +1,6 @@ +/** Date/time string conversions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/time.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/time.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/time.hxx new file mode 100644 index 000000000..effed05e0 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/time.hxx @@ -0,0 +1,88 @@ +/** Support for date/time values. + * + * At the moment this supports dates, but not times. + */ +#ifndef PQXX_H_TIME +#define PQXX_H_TIME + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/internal/concat.hxx" +#include "pqxx/strconv.hxx" + + +#if defined(PQXX_HAVE_YEAR_MONTH_DAY) + +namespace pqxx +{ +using namespace std::literals; + +template<> +struct nullness + : no_null +{}; + + +/// String representation for a Gregorian date in ISO-8601 format. +/** @warning Experimental. There may still be design problems, particularly + * when it comes to BC years. + * + * PostgreSQL supports a choice of date formats, but libpqxx does not. The + * other formats in turn support a choice of "month before day" versus "day + * before month," meaning that it's not necessarily known which format a given + * date is supposed to be. So I repeat: ISO-8601-style format only! + * + * Invalid dates will not convert. This includes February 29 on non-leap + * years, which is why it matters that `year_month_day` represents a + * _Gregorian_ date. + * + * The range of years is limited. At the time of writing, PostgreSQL 14 + * supports years from 4713 BC to 294276 AD inclusive, and C++20 supports + * a range of 32767 BC to 32767 AD inclusive. So in practice, years must fall + * between 4713 BC and 32767 AD, inclusive. + * + * @warning Support for BC (or BCE) years is still experimental. I still need + * confirmation on this issue: it looks as if C++ years are astronomical years, + * which means they have a Year Zero. Regular BC/AD years do not have a year + * zero, so the year 1 AD follows directly after 1 BC. + * + * So, what to our calendars (and to PostgreSQL) is the year "0001 BC" seems to + * count as year "0" in a `std::chrono::year_month_day`. The year 0001 AD is + * still equal to 1 as you'd expect, and all AD years work normally, but all + * years before then are shifted by one. For instance, the year 543 BC would + * be -542 in C++. + */ +template<> struct PQXX_LIBEXPORT string_traits +{ + [[nodiscard]] static zview + to_buf(char *begin, char *end, std::chrono::year_month_day const &value) + { + return generic_to_buf(begin, end, value); + } + + static char * + into_buf(char *begin, char *end, std::chrono::year_month_day const &value); + + [[nodiscard]] static std::chrono::year_month_day + from_string(std::string_view text); + + [[nodiscard]] static std::size_t + size_buffer(std::chrono::year_month_day const &) noexcept + { + static_assert(int{(std::chrono::year::min)()} >= -99999); + static_assert(int{(std::chrono::year::max)()} <= 99999); + return 5 + 1 + 2 + 1 + 2 + std::size(s_bc) + 1; + } + +private: + /// The "BC" suffix for years before 1 AD. + static constexpr std::string_view s_bc{" BC"sv}; +}; +} // namespace pqxx +#endif // PQXX_HAVE_YEAR_MONTH_DAY +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction new file mode 100644 index 000000000..a7ae39d43 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction @@ -0,0 +1,8 @@ +/** pqxx::transaction class. + * + * pqxx::transaction represents a standard database transaction. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/transaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction.hxx new file mode 100644 index 000000000..e90917e38 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction.hxx @@ -0,0 +1,108 @@ +/* Definition of the pqxx::transaction class. + * pqxx::transaction represents a standard database transaction. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/transaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTION +#define PQXX_H_TRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/dbtransaction.hxx" + +namespace pqxx::internal +{ +/// Helper base class for the @ref transaction class template. +class PQXX_LIBEXPORT basic_transaction : public dbtransaction +{ +protected: + basic_transaction( + connection &c, zview begin_command, std::string_view tname); + basic_transaction(connection &c, zview begin_command, std::string &&tname); + basic_transaction(connection &c, zview begin_command); + + virtual ~basic_transaction() noexcept override = 0; + +private: + virtual void do_commit() override; +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/** + * @ingroup transactions + */ +//@{ + +/// Standard back-end transaction, templatised on isolation level. +/** This is the type you'll normally want to use to represent a transaction on + * the database. + * + * Usage example: double all wages. + * + * ```cxx + * extern connection C; + * work T(C); + * try + * { + * T.exec0("UPDATE employees SET wage=wage*2"); + * T.commit(); // NOTE: do this inside try block + * } + * catch (exception const &e) + * { + * cerr << e.what() << endl; + * T.abort(); // Usually not needed; same happens when T's life ends. + * } + * ``` + */ +template< + isolation_level ISOLATION = isolation_level::read_committed, + write_policy READWRITE = write_policy::read_write> +class transaction final : public internal::basic_transaction +{ +public: + /// Begin a transaction. + /** + * @param c Connection for this transaction to operate on. + * @param tname Optional name for transaction. Must begin with a letter and + * may contain letters and digits only. + */ + transaction(connection &c, std::string_view tname) : + internal::basic_transaction{ + c, internal::begin_cmd, tname} + {} + + /// Begin a transaction. + /** + * @param c Connection for this transaction to operate on. + * may contain letters and digits only. + */ + explicit transaction(connection &c) : + internal::basic_transaction{ + c, internal::begin_cmd} + {} + + virtual ~transaction() noexcept override { close(); } +}; + + +/// The default transaction type. +using work = transaction<>; + +/// Read-only transaction. +using read_transaction = + transaction; + +//@} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_base b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_base new file mode 100644 index 000000000..c39219aac --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_base @@ -0,0 +1,9 @@ +/** Base for the transaction classes. + * + * pqxx::transaction_base defines the interface for any abstract class that + * represents a database transaction. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/transaction_base.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_base.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_base.hxx new file mode 100644 index 000000000..4363cc56a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_base.hxx @@ -0,0 +1,810 @@ +/* Common code and definitions for the transaction classes. + * + * pqxx::transaction_base defines the interface for any abstract class that + * represents a database transaction. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/transaction_base instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTION_BASE +#define PQXX_H_TRANSACTION_BASE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +/* End-user programs need not include this file, unless they define their own + * transaction classes. This is not something the typical program should want + * to do. + * + * However, reading this file is worthwhile because it defines the public + * interface for the available transaction classes such as transaction and + * nontransaction. + */ + +#include "pqxx/connection.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/encoding_group.hxx" +#include "pqxx/isolation.hxx" +#include "pqxx/result.hxx" +#include "pqxx/row.hxx" +#include "pqxx/stream_from.hxx" +#include "pqxx/util.hxx" + +namespace pqxx::internal::gate +{ +class transaction_subtransaction; +class transaction_sql_cursor; +class transaction_stream_to; +class transaction_transaction_focus; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +using namespace std::literals; + + +class transaction_focus; + + +/** + * @defgroup transactions Transaction classes + * + * All database access goes through instances of these classes. + * However, not all implementations of this interface need to provide full + * transactional integrity. + * + * Several implementations of this interface are shipped with libpqxx, + * including the plain transaction class, the entirely unprotected + * nontransaction, and the more cautious robusttransaction. + */ + +/// Interface definition (and common code) for "transaction" classes. +/** + * @ingroup transactions + * + * Abstract base class for all transaction types. + */ +class PQXX_LIBEXPORT PQXX_NOVTABLE transaction_base +{ +public: + transaction_base() = delete; + transaction_base(transaction_base const &) = delete; + transaction_base(transaction_base &&) = delete; + transaction_base &operator=(transaction_base const &) = delete; + transaction_base &operator=(transaction_base &&) = delete; + + virtual ~transaction_base() = 0; + + /// Commit the transaction. + /** Make the effects of this transaction definite. If you destroy a + * transaction without invoking its @ref commit() first, that will implicitly + * abort it. (For the @ref nontransaction class though, "commit" and "abort" + * really don't do anything, hence its name.) + * + * There is, however, a minute risk that you might lose your connection to + * the database at just the wrong moment here. In that case, libpqxx may be + * unable to determine whether the database was able to complete the + * transaction, or had to roll it back. In that scenario, @ref commit() will + * throw an in_doubt_error. There is a different transaction class called + * @ref robusttransaction which takes some special precautions to reduce this + * risk. + */ + void commit(); + + /// Abort the transaction. + /** No special effort is required to call this function; it will be called + * implicitly when the transaction is destructed. + */ + void abort(); + + /** + * @ingroup escaping-functions + * + * Use these when writing SQL queries that incorporate C++ values as SQL + * constants. + * + * The functions you see here are just convenience shortcuts to the same + * functions on the connection object. + */ + //@{ + /// Escape string for use as SQL string literal in this transaction. + template [[nodiscard]] auto esc(ARGS &&...args) const + { + return conn().esc(std::forward(args)...); + } + + /// Escape binary data for use as SQL string literal in this transaction. + /** Raw, binary data is treated differently from regular strings. Binary + * strings are never interpreted as text, so they may safely include byte + * values or byte sequences that don't happen to represent valid characters + * in the character encoding being used. + * + * The binary string does not stop at the first zero byte, as is the case + * with textual strings. Instead, it may contain zero bytes anywhere. If + * it happens to contain bytes that look like quote characters, or other + * things that can disrupt their use in SQL queries, they will be replaced + * with special escape sequences. + */ + template [[nodiscard]] auto esc_raw(ARGS &&...args) const + { + return conn().esc_raw(std::forward(args)...); + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(zview text) const + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return conn().unesc_raw(text); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard]] std::basic_string unesc_bin(zview text) + { + return conn().unesc_bin(text); + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(char const *text) const + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return conn().unesc_raw(text); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard]] std::basic_string unesc_bin(char const text[]) + { + return conn().unesc_bin(text); + } + + /// Represent object as SQL string, including quoting & escaping. + /** Nulls are recognized and represented as SQL nulls. */ + template [[nodiscard]] std::string quote(T const &t) const + { + return conn().quote(t); + } + + [[deprecated( + "Use std::basic_string instead of binarystring.")]] std::string + quote(binarystring const &t) const + { + return conn().quote(t.bytes_view()); + } + + /// Binary-escape and quote a binary string for use as an SQL constant. + [[deprecated("Use quote(std::basic_string_view).")]] std::string + quote_raw(unsigned char const bin[], std::size_t len) const + { + return quote(binary_cast(bin, len)); + } + + /// Binary-escape and quote a binary string for use as an SQL constant. + [[deprecated("Use quote(std::basic_string_view).")]] std::string + quote_raw(zview bin) const; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Binary-escape and quote a binary string for use as an SQL constant. + /** For binary data you can also just use @ref quote(data). */ + template + [[nodiscard]] std::string quote_raw(DATA const &data) const + { + return conn().quote_raw(data); + } +#endif + + /// Escape an SQL identifier for use in a query. + [[nodiscard]] std::string quote_name(std::string_view identifier) const + { + return conn().quote_name(identifier); + } + + /// Escape string for literal LIKE match. + [[nodiscard]] std::string + esc_like(std::string_view bin, char escape_char = '\\') const + { + return conn().esc_like(bin, escape_char); + } + //@} + + /** + * @name Command execution + * + * There are many functions for executing (or "performing") a command (or + * "query"). This is the most fundamental thing you can do with the library, + * and you always do it from a transaction class. + * + * Command execution can throw many types of exception, including sql_error, + * broken_connection, and many sql_error subtypes such as + * feature_not_supported or insufficient_privilege. But any exception thrown + * by the C++ standard library may also occur here. All exceptions you will + * see libpqxx throw are derived from std::exception. + * + * One unusual feature in libpqxx is that you can give your query a name or + * description. This does not mean anything to the database, but sometimes + * it can help libpqxx produce more helpful error messages, making problems + * in your code easier to debug. + * + * Many of the execution functions used to accept a `desc` argument, a + * human-readable description of the statement for use in error messages. + * This could make failures easier to debug. Future versions will use + * C++20's `std::source_location` to identify the failing statement. + */ + //@{ + + /// Execute a command. + /** + * @param query Query or command to execute. + * @param desc Optional identifier for query, to help pinpoint SQL errors. + * @return A result set describing the query's or command's result. + */ + [[deprecated("The desc parameter is going away.")]] result + exec(std::string_view query, std::string_view desc); + + /// Execute a command. + /** + * @param query Query or command to execute. + * @return A result set describing the query's or command's result. + */ + result exec(std::string_view query) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec(query, std::string_view{}); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute a command. + /** + * @param query Query or command to execute. + * @param desc Optional identifier for query, to help pinpoint SQL errors. + * @return A result set describing the query's or command's result. + */ + [[deprecated( + "Pass your query as a std::string_view, not stringstream.")]] result + exec(std::stringstream const &query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec(query.str(), desc); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute command, which should return zero rows of data. + /** Works like @ref exec, but fails if the result contains data. It still + * returns a result, however, which may contain useful metadata. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + [[deprecated("The desc parameter is going away.")]] result + exec0(zview query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec_n(0, query, desc); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute command, which should return zero rows of data. + /** Works like @ref exec, but fails if the result contains data. It still + * returns a result, however, which may contain useful metadata. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + result exec0(zview query) { return exec_n(0, query); } + + /// Execute command returning a single row of data. + /** Works like @ref exec, but requires the result to contain exactly one row. + * The row can be addressed directly, without the need to find the first row + * in a result set. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + [[deprecated("The desc parameter is going away.")]] row + exec1(zview query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec_n(1, query, desc).front(); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute command returning a single row of data. + /** Works like @ref exec, but requires the result to contain exactly one row. + * The row can be addressed directly, without the need to find the first row + * in a result set. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + row exec1(zview query) { return exec_n(1, query).front(); } + + /// Execute command, expect given number of rows. + /** Works like @ref exec, but checks that the result has exactly the expected + * number of rows. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + [[deprecated("The desc parameter is going away.")]] result + exec_n(result::size_type rows, zview query, std::string_view desc); + + /// Execute command, expect given number of rows. + /** Works like @ref exec, but checks that the result has exactly the expected + * number of rows. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + result exec_n(result::size_type rows, zview query) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec_n(rows, query, std::string_view{}); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Perform query, expecting exactly 1 row with 1 field, and convert it. + /** This is convenience shorthand for querying exactly one value from the + * database. It returns that value, converted to the type you specify. + */ + template + [[deprecated("The desc parameter is going away.")]] TYPE + query_value(zview query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row const r{exec1(query, desc)}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + if (std::size(r) != 1) + throw usage_error{internal::concat( + "Queried single value from result with ", std::size(r), " columns.")}; + return r[0].as(); + } + + /// Perform query, expecting exactly 1 row with 1 field, and convert it. + /** This is convenience shorthand for querying exactly one value from the + * database. It returns that value, converted to the type you specify. + */ + template TYPE query_value(zview query) + { + row const r{exec1(query)}; + if (std::size(r) != 1) + throw usage_error{internal::concat( + "Queried single value from result with ", std::size(r), " columns.")}; + return r[0].as(); + } + + /// Execute a query, and loop over the results row by row. + /** Converts the rows to `std::tuple`, of the column types you specify. + * + * Use this with a range-based "for" loop. It executes the query, and + * directly maps the resulting rows onto a `std::tuple` of the types you + * specify. It starts before all the data from the server is in, so if your + * network connection to the server breaks while you're iterating, you'll get + * an exception partway through. + * + * The stream lives entirely within the lifetime of the transaction. Make + * sure you destroy the stream before you destroy the transaction. Either + * iterate the stream all the way to the end, or destroy first the stream + * and then the transaction without touching either in any other way. Until + * the stream has finished, the transaction is in a special state where it + * cannot execute queries. + * + * As a special case, tuple may contain `std::string_view` fields, but the + * strings to which they point will only remain valid until you extract the + * next row. After that, the memory holding the string may be overwritten or + * deallocated. + * + * If any of the columns can be null, and the C++ type to which it translates + * does not have a null value, wrap the type in `std::optional` (or if + * you prefer, `std::shared_ptr` or `std::unique_ptr)`. These templates + * do recognise null values, and libpqxx will know how to convert to them. + * + * The connection is in a special state until the iteration finishes. So if + * it does not finish due to a `break` or a `return` or an exception, then + * the entire connection becomes effectively unusable. + * + * Querying in this way is faster than the `exec()` methods for larger + * results (but slower for small ones). You can start processing rows before + * the full result is in. Also, `stream()` scales better in terms of memory + * usage. Where @ref exec() reads the entire result into memory at once, + * `stream()` will read and process one row at at a time. + * + * Your query executes as part of a COPY command, not as a stand-alone query, + * so there are limitations to what you can do in the query. It can be + * either a SELECT or VALUES query; or an INSERT, UPDATE, or DELETE with a + * RETURNING clause. See the documentation for PostgreSQL's COPY command for + * the details: + * + * https://www.postgresql.org/docs/current/sql-copy.html + * + * Iterating in this way does require each of the field types you pass to be + * default-constructible, copy-constructible, and assignable. These + * requirements may be loosened once libpqxx moves on to C++20. + */ + template + [[nodiscard]] auto stream(std::string_view query) & + { + // Tricky: std::make_unique() supports constructors but not RVO functions. + return pqxx::internal::owning_stream_input_iteration{ + std::unique_ptr{ + new stream_from{stream_from::query(*this, query)}}}; + } + + // C++20: Concept like std::invocable, but without specifying param types. + /// Perform a streaming query, and for each result row, call `func`. + /** Here, `func` can be a function, a `std::function`, a lambda, or an + * object that supports the function call operator. Of course `func` must + * have an unambiguous signature; it can't be overloaded or generic. + * + * The `for_each` function executes `query` in a stream using + * @ref pqxx::stream_from. Every time a row of data comes in from the + * server, it converts the row's fields to the types of `func`'s respective + * parameters, and calls `func` with those values. + * + * This will not work for all queries, but straightforward `SELECT` and + * `UPDATE ... RETURNING` queries should work. Consult the documentation for + * @ref pqxx::stream_from and PostgreSQL's underlying `COPY` command for the + * full details. + * + * Streaming a query like this is likely to be slower than the @ref exec() + * functions for small result sets, but faster for large result sets. So if + * performance matters, you'll want to use `for_each` if you query large + * amounts of data, but not if you do lots of queries with small outputs. + */ + template + inline auto for_each(std::string_view query, CALLABLE &&func) + { + using param_types = + pqxx::internal::strip_types_t>; + param_types const *const sample{nullptr}; + auto data_stream{stream_like(query, sample)}; + for (auto const &fields : data_stream) std::apply(func, fields); + } + + /** + * @name Parameterized statements + * + * You'll often need parameters in the queries you execute: "select the + * car with this licence plate." If the parameter is a string, you need to + * quote it and escape any special characters inside it, or it may become a + * target for an SQL injection attack. If it's an integer (for example), + * you need to convert it to a string, but in the database's format, without + * locale-specific niceties like "," separators between the thousands. + * + * Parameterised statements are an easier and safer way to do this. They're + * like prepared statements, but for a single use. You don't need to name + * them, and you don't need to prepare them first. + * + * Your query will include placeholders like `$1` and `$2` etc. in the places + * where you want the arguments to go. Then, you pass the argument values + * and the actual query is constructed for you. + * + * Pass the exact right number of parameters, and in the right order. The + * parameters in the query don't have to be neatly ordered from `$1` to + * `$2` to `$3` - but you must pass the argument for `$1` first, the one + * for `$2` second, etc. + * + * @warning Beware of "nul" bytes. Any string you pass as a parameter will + * end at the first char with value zero. If you pass a string that contains + * a zero byte, the last byte in the value will be the one just before the + * zero. + */ + //@{ + /// Execute an SQL statement with parameters. + template result exec_params(zview query, Args &&...args) + { + params pp(args...); + return internal_exec_params(query, pp.make_c_params()); + } + + // Execute parameterised statement, expect a single-row result. + /** @throw unexpected_rows if the result does not consist of exactly one row. + */ + template row exec_params1(zview query, Args &&...args) + { + return exec_params_n(1, query, std::forward(args)...).front(); + } + + // Execute parameterised statement, expect a result with zero rows. + /** @throw unexpected_rows if the result contains rows. + */ + template result exec_params0(zview query, Args &&...args) + { + return exec_params_n(0, query, std::forward(args)...); + } + + // Execute parameterised statement, expect exactly a given number of rows. + /** @throw unexpected_rows if the result contains the wrong number of rows. + */ + template + result exec_params_n(std::size_t rows, zview query, Args &&...args) + { + auto const r{exec_params(query, std::forward(args)...)}; + check_rowcount_params(rows, std::size(r)); + return r; + } + //@} + + /** + * @name Prepared statements + * + * These are very similar to parameterised statements. The difference is + * that you prepare them in advance, giving them identifying names. You can + * then call them by these names, passing in the argument values appropriate + * for that call. + * + * You prepare a statement on the connection, using + * @ref pqxx::connection::prepare(). But you then call the statement in a + * transaction, using the functions you see here. + * + * Never try to prepare, execute, or unprepare a prepared statement manually + * using direct SQL queries when you also use the libpqxx equivalents. For + * any given statement, either prepare, manage, and execute it through the + * dedicated libpqxx functions; or do it all directly in SQL. Don't mix the + * two, or the code may get confused. + * + * See \ref prepared for a full discussion. + * + * @warning Beware of "nul" bytes. Any string you pass as a parameter will + * end at the first char with value zero. If you pass a string that contains + * a zero byte, the last byte in the value will be the one just before the + * zero. If you need a zero byte, you're dealing with binary strings, not + * regular strings. Represent binary strings on the SQL side as `BYTEA` + * (or as large objects). On the C++ side, use types like + * `std::basic_string` or `std::basic_string_view` + * or (in C++20) `std::vector`. Also, consider large objects on + * the SQL side and @ref blob on the C++ side. + */ + //@{ + + /// Execute a prepared statement, with optional arguments. + template + result exec_prepared(zview statement, Args &&...args) + { + params pp(args...); + return internal_exec_prepared(statement, pp.make_c_params()); + } + + /// Execute a prepared statement, and expect a single-row result. + /** @throw pqxx::unexpected_rows if the result was not exactly 1 row. + */ + template + row exec_prepared1(zview statement, Args &&...args) + { + return exec_prepared_n(1, statement, std::forward(args)...).front(); + } + + /// Execute a prepared statement, and expect a result with zero rows. + /** @throw pqxx::unexpected_rows if the result contained rows. + */ + template + result exec_prepared0(zview statement, Args &&...args) + { + return exec_prepared_n(0, statement, std::forward(args)...); + } + + /// Execute a prepared statement, expect a result with given number of rows. + /** @throw pqxx::unexpected_rows if the result did not contain exactly the + * given number of rows. + */ + template + result + exec_prepared_n(result::size_type rows, zview statement, Args &&...args) + { + auto const r{exec_prepared(statement, std::forward(args)...)}; + check_rowcount_prepared(statement, rows, std::size(r)); + return r; + } + + //@} + + /** + * @name Error/warning output + */ + //@{ + /// Have connection process a warning message. + void process_notice(char const msg[]) const { m_conn.process_notice(msg); } + /// Have connection process a warning message. + void process_notice(zview msg) const { m_conn.process_notice(msg); } + //@} + + /// The connection in which this transaction lives. + [[nodiscard]] constexpr connection &conn() const noexcept { return m_conn; } + + /// Set session variable using SQL "SET" command. + /** @deprecated To set a transaction-local variable, execute an SQL `SET` + * command. To set a session variable, use the connection's + * @ref set_session_var function. + * + * @warning When setting a string value, you must make sure that the string + * is "safe." If you call @ref quote() on the string, it will return a + * safely escaped and quoted version for use as an SQL literal. + * + * @warning This function executes SQL. Do not try to set or get variables + * while a pipeline or table stream is active. + * + * @param var The variable to set. + * @param value The new value to store in the variable. This can be any SQL + * expression. + */ + [[deprecated( + "Set transaction-local variables using SQL SET statements.")]] void + set_variable(std::string_view var, std::string_view value); + + /// Read session variable using SQL "SHOW" command. + /** @warning This executes SQL. Do not try to set or get variables while a + * pipeline or table stream is active. + */ + [[deprecated("Read variables using SQL SHOW statements.")]] std::string + get_variable(std::string_view); + + // C++20: constexpr. + /// Transaction name, if you passed one to the constructor; or empty string. + [[nodiscard]] std::string_view name() const &noexcept { return m_name; } + +protected: + /// Create a transaction (to be called by implementation classes only). + /** The name, if nonempty, must begin with a letter and may contain letters + * and digits only. + */ + transaction_base( + connection &c, std::string_view tname, + std::shared_ptr rollback_cmd) : + m_conn{c}, m_name{tname}, m_rollback_cmd{rollback_cmd} + {} + + /// Create a transaction (to be called by implementation classes only). + /** Its rollback command will be "ROLLBACK". + * + * The name, if nonempty, must begin with a letter and may contain letters + * and digits only. + */ + transaction_base(connection &c, std::string_view tname); + + /// Create a transaction (to be called by implementation classes only). + explicit transaction_base(connection &c); + + /// Register this transaction with the connection. + void register_transaction(); + + /// End transaction. To be called by implementing class' destructor. + void close() noexcept; + + /// To be implemented by derived implementation class: commit transaction. + virtual void do_commit() = 0; + + /// Transaction type-specific way of aborting a transaction. + /** @warning This will become "final", since this function can be called + * from the implementing class destructor. + */ + virtual void do_abort(); + + /// Set the rollback command. + void set_rollback_cmd(std::shared_ptr cmd) + { + m_rollback_cmd = cmd; + } + + /// Execute query on connection directly. + result direct_exec(std::string_view, std::string_view desc = ""sv); + result + direct_exec(std::shared_ptr, std::string_view desc = ""sv); + +private: + enum class status + { + active, + aborted, + committed, + in_doubt + }; + + PQXX_PRIVATE void check_pending_error(); + + result + internal_exec_prepared(zview statement, internal::c_params const &args); + + result internal_exec_params(zview query, internal::c_params const &args); + + /// Throw unexpected_rows if prepared statement returned wrong no. of rows. + void check_rowcount_prepared( + zview statement, result::size_type expected_rows, + result::size_type actual_rows); + + /// Throw unexpected_rows if wrong row count from parameterised statement. + void + check_rowcount_params(std::size_t expected_rows, std::size_t actual_rows); + + /// Describe this transaction to humans, e.g. "transaction 'foo'". + [[nodiscard]] std::string description() const; + + friend class pqxx::internal::gate::transaction_transaction_focus; + PQXX_PRIVATE void register_focus(transaction_focus *); + PQXX_PRIVATE void unregister_focus(transaction_focus *) noexcept; + PQXX_PRIVATE void register_pending_error(zview) noexcept; + PQXX_PRIVATE void register_pending_error(std::string &&) noexcept; + + /// Like @ref stream(), but takes a tuple rather than a parameter pack. + template + auto stream_like(std::string_view query, std::tuple const *) + { + return stream(query); + } + + connection &m_conn; + + /// Current "focus": a pipeline, a nested transaction, a stream... + /** This pointer is used for only one purpose: sanity checks against mistakes + * such as opening one while another is still active. + */ + transaction_focus const *m_focus = nullptr; + + status m_status = status::active; + bool m_registered = false; + std::string m_name; + std::string m_pending_error; + + /// SQL command for aborting this type of transaction. + std::shared_ptr m_rollback_cmd; + + static constexpr std::string_view s_type_name{"transaction"sv}; +}; + + +// C++20: Can borrowed_range help? +/// Forbidden specialisation: underlying buffer immediately goes out of scope. +template<> +std::string_view transaction_base::query_value( + zview query, std::string_view desc) = delete; +/// Forbidden specialisation: underlying buffer immediately goes out of scope. +template<> +zview transaction_base::query_value( + zview query, std::string_view desc) = delete; + +} // namespace pqxx + + +namespace pqxx::internal +{ +/// The SQL command for starting a given type of transaction. +template +extern const zview begin_cmd; + +// These are not static members, so "constexpr" does not imply "inline". +template<> +inline constexpr zview begin_cmd{ + "BEGIN"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN READ ONLY"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL REPEATABLE READ"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL REPEATABLE READ READ ONLY"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL SERIALIZABLE"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL SERIALIZABLE READ ONLY"_zv}; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_focus b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_focus new file mode 100644 index 000000000..fe78a9bcc --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_focus @@ -0,0 +1,7 @@ +/** + * Transaction focus: types which monopolise a transaction's attention. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/types.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_focus.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_focus.hxx new file mode 100644 index 000000000..0707e3cc4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transaction_focus.hxx @@ -0,0 +1,89 @@ +/** Transaction focus: types which monopolise a transaction's attention. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTION_FOCUS +#define PQXX_H_TRANSACTION_FOCUS + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/util.hxx" + +namespace pqxx +{ +/// Base class for things that monopolise a transaction's attention. +/** You probably won't need to use this class. But it can be useful to _know_ + * that a given libpqxx class is derived from it. + * + * Pipelines, SQL statements, and data streams are examples of classes derived + * from `transaction_focus`. For any given transaction, only one object of + * such a class can be active at any given time. + */ +class PQXX_LIBEXPORT transaction_focus +{ +public: + transaction_focus( + transaction_base &t, std::string_view cname, std::string_view oname) : + m_trans{t}, m_classname{cname}, m_name{oname} + {} + + transaction_focus( + transaction_base &t, std::string_view cname, std::string &&oname) : + m_trans{t}, m_classname{cname}, m_name{std::move(oname)} + {} + + transaction_focus(transaction_base &t, std::string_view cname) : + m_trans{t}, m_classname{cname} + {} + + transaction_focus() = delete; + transaction_focus(transaction_focus const &) = delete; + transaction_focus &operator=(transaction_focus const &) = delete; + + /// Class name, for human consumption. + [[nodiscard]] constexpr std::string_view classname() const noexcept + { + return m_classname; + } + + /// Name for this object, if the caller passed one; empty string otherwise. + [[nodiscard]] std::string_view name() const &noexcept { return m_name; } + + [[nodiscard]] std::string description() const + { + return pqxx::internal::describe_object(m_classname, m_name); + } + + /// Can't move a transaction_focus. + /** Moving the transaction_focus would break the transaction's reference back + * to the object. + */ + transaction_focus(transaction_focus &&) = delete; + + /// Can't move a transaction_focus. + /** Moving the transaction_focus would break the transaction's reference back + * to the object. + */ + transaction_focus &operator=(transaction_focus &&) = delete; + +protected: + void register_me(); + void unregister_me() noexcept; + void reg_pending_error(std::string const &) noexcept; + bool registered() const noexcept { return m_registered; } + + transaction_base &m_trans; + +private: + bool m_registered = false; + std::string_view m_classname; + std::string m_name; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transactor b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transactor new file mode 100644 index 000000000..29d1b9640 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transactor @@ -0,0 +1,8 @@ +/** pqxx::transactor class. + * + * pqxx::transactor is a framework-style wrapper for safe transactions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/transactor.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transactor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transactor.hxx new file mode 100644 index 000000000..eefd04ba1 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/transactor.hxx @@ -0,0 +1,147 @@ +/* Transactor framework, a wrapper for safely retryable transactions. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/transactor instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTOR +#define PQXX_H_TRANSACTOR + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/connection.hxx" +#include "pqxx/transaction.hxx" + +namespace pqxx +{ +/** + * @defgroup transactor Transactor framework + * + * Sometimes a transaction can fail for completely transient reasons, such as a + * conflict with another transaction in SERIALIZABLE isolation. The right way + * to handle those failures is often just to re-run the transaction from + * scratch. + * + * For example, your REST API might be handling each HTTP request in its own + * database transaction, and if this kind of transient failure happens, you + * simply want to "replay" the whole request, in a fresh transaction. + * + * You won't necessarily want to execute the exact same SQL commands with the + * exact same data. Some of your SQL statements may depend on state that can + * vary between retries. Data in the database may already have changed, for + * instance. So instead of dumbly replaying the SQL, you re-run the same + * application code that produced those SQL commands, from the start. + * + * The transactor framework makes it a little easier for you to do this safely, + * and avoid typical pitfalls. You encapsulate the work that you want to do + * into a callable that you pass to the @ref perform function. + * + * Here's how it works. You write your transaction code as a lambda or + * function, which creates its own transaction object, does its work, and + * commits at the end. You pass that callback to @ref pqxx::perform, which + * runs it for you. + * + * If there's a failure inside your callback, there will be an exception. Your + * transaction object goes out of scope and gets destroyed, so that it aborts + * implicitly. Seeing this, @ref perform tries running your callback again. It + * stops doing that when the callback succeeds, or when it has failed too many + * times, or when there's an error that leaves the database in an unknown + * state, such as a lost connection just while we're waiting for the database + * to confirm a commit. It all depends on the type of exception. + * + * The callback takes no arguments. If you're using lambdas, the easy way to + * pass arguments is for the lambda to "capture" them from your variables. Or, + * if you're using functions, you may want to use `std::bind`. + * + * Once your callback succeeds, it can return a result, and @ref perform will + * return that result back to you. + */ +//@{ + +/// Simple way to execute a transaction with automatic retry. +/** + * Executes your transaction code as a callback. Repeats it until it completes + * normally, or it throws an error other than the few libpqxx-generated + * exceptions that the framework understands, or after a given number of failed + * attempts, or if the transaction ends in an "in-doubt" state. + * + * (An in-doubt state is one where libpqxx cannot determine whether the server + * finally committed a transaction or not. This can happen if the network + * connection to the server is lost just while we're waiting for its reply to + * a "commit" statement. The server may have completed the commit, or not, but + * it can't tell you because there's no longer a connection. + * + * Using this still takes a bit of care. If your callback makes use of data + * from the database, you'll probably have to query that data within your + * callback. If the attempt to perform your callback fails, and the framework + * tries again, you'll be in a new transaction and the data in the database may + * have changed under your feet. + * + * Also be careful about changing variables or data structures from within + * your callback. The run may still fail, and perhaps get run again. The + * ideal way to do it (in most cases) is to return your result from your + * callback, and change your program's data state only after @ref perform + * completes successfully. + * + * @param callback Transaction code that can be called with no arguments. + * @param attempts Maximum number of times to attempt performing callback. + * Must be greater than zero. + * @return Whatever your callback returns. + */ +template +inline auto perform(TRANSACTION_CALLBACK &&callback, int attempts = 3) + -> std::invoke_result_t +{ + if (attempts <= 0) + throw std::invalid_argument{ + "Zero or negative number of attempts passed to pqxx::perform()."}; + + for (; attempts > 0; --attempts) + { + try + { + return std::invoke(callback); + } + catch (in_doubt_error const &) + { + // Not sure whether transaction went through or not. The last thing in + // the world that we should do now is try again! + throw; + } + catch (statement_completion_unknown const &) + { + // Not sure whether our last statement succeeded. Don't risk running it + // again. + throw; + } + catch (broken_connection const &) + { + // Connection failed. May be worth retrying, if the transactor opens its + // own connection. + if (attempts <= 1) + throw; + continue; + } + catch (transaction_rollback const &) + { + // Some error that may well be transient, such as serialization failure + // or deadlock. Worth retrying. + if (attempts <= 1) + throw; + continue; + } + } + throw pqxx::internal_error{"No outcome reached on perform()."}; +} +} // namespace pqxx +//@} +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/types b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/types new file mode 100644 index 000000000..23a5caae1 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/types @@ -0,0 +1,7 @@ +/** + * Basic typedefs and forward declarations. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/types.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/types.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/types.hxx new file mode 100644 index 000000000..f95b598f8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/types.hxx @@ -0,0 +1,173 @@ +/* Basic type aliases and forward declarations. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TYPES +#define PQXX_H_TYPES + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#if defined(PQXX_HAVE_CONCEPTS) && __has_include() +# include +#endif + + +namespace pqxx +{ +/// Number of rows in a result set. +using result_size_type = int; + +/// Difference between result sizes. +using result_difference_type = int; + +/// Number of fields in a row of database data. +using row_size_type = int; + +/// Difference between row sizes. +using row_difference_type = int; + +/// Number of bytes in a field of database data. +using field_size_type = std::size_t; + +/// Number of bytes in a large object. +using large_object_size_type = int64_t; + + +// Forward declarations, to help break compilation dependencies. +// These won't necessarily include all classes in libpqxx. +class binarystring; +class connection; +class const_result_iterator; +class const_reverse_result_iterator; +class const_reverse_row_iterator; +class const_row_iterator; +class dbtransaction; +class field; +class largeobjectaccess; +class notification_receiver; +struct range_error; +class result; +class row; +class stream_from; +class transaction_base; + +/// Marker for @ref stream_from constructors: "stream from table." +/** @deprecated Use @ref stream_from::table() instead. + */ +struct from_table_t +{}; + +/// Marker for @ref stream_from constructors: "stream from query." +/** @deprecated Use @ref stream_from::query() instead. + */ +struct from_query_t +{}; + + +/// Format code: is data text or binary? +/** Binary-compatible with libpq's format codes. + */ +enum class format : int +{ + text = 0, + binary = 1, +}; + + +/// Remove any constness, volatile, and reference-ness from a type. +/** @deprecated In C++20 we'll replace this with std::remove_cvref. + */ +template +using strip_t = std::remove_cv_t>; + + +#if defined(PQXX_HAVE_CONCEPTS) +/// The type of a container's elements. +/** At the time of writing there's a similar thing in `std::experimental`, + * which we may or may not end up using for this. + */ +template +using value_type = strip_t()))>; +#else // PQXX_HAVE_CONCEPTS +/// The type of a container's elements. +/** At the time of writing there's a similar thing in `std::experimental`, + * which we may or may not end up using for this. + */ +template +using value_type = strip_t()))>; +#endif // PQXX_HAVE_CONCEPTS + + +#if defined(PQXX_HAVE_CONCEPTS) +/// Concept: Any type that we can read as a string of `char`. +template +concept char_string = std::ranges::contiguous_range and + std::same_as < strip_t>, +char > ; + +/// Concept: Anything we can iterate to get things we can read as strings. +template +concept char_strings = + std::ranges::range and char_string>>; + +/// Concept: Anything we might want to treat as binary data. +template +concept potential_binary = std::ranges::contiguous_range and + (sizeof(value_type) == 1); +#endif // PQXX_HAVE_CONCEPTS + + +// C++20: Retire these compatibility definitions. +#if defined(PQXX_HAVE_CONCEPTS) + +/// Template argument type for a range. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_RANGE_ARG std::ranges::range + +/// Template argument type for @ref char_string. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRING_ARG pqxx::char_string + +/// Template argument type for @ref char_strings +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRINGS_ARG pqxx::char_strings + +#else // PQXX_HAVE_CONCEPTS + +/// Template argument type for a range. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_RANGE_ARG typename + +/// Template argument type for @ref char_string. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRING_ARG typename + +/// Template argument type for @ref char_strings +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRINGS_ARG typename + +#endif // PQXX_HAVE_CONCEPTS +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/util b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/util new file mode 100644 index 000000000..6d85ab611 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/util @@ -0,0 +1,6 @@ +/** Various utility definitions for libpqxx. + */ +// Actual definitions in .hxx file so editors and such recognize file type +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/util.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/util.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/util.hxx new file mode 100644 index 000000000..4aa5ecf57 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/util.hxx @@ -0,0 +1,521 @@ +/* Various utility definitions for libpqxx. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/util instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_UTIL +#define PQXX_H_UTIL + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if __has_include() +# include +#endif + +#include "pqxx/except.hxx" +#include "pqxx/internal/encodings.hxx" +#include "pqxx/types.hxx" +#include "pqxx/version.hxx" + + +/// The home of all libpqxx classes, functions, templates, etc. +namespace pqxx +{} + +#include + + +/// Internal items for libpqxx' own use. Do not use these yourself. +namespace pqxx::internal +{ + +// C++20: Retire wrapper. +/// Same as `std::cmp_less`, or a workaround where that's not available. +template +inline constexpr bool cmp_less(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_less(lhs, rhs); +#else + // We need a variable just because lgtm.com gives off a false positive + // warning when we compare the values directly. It considers that a + // "self-comparison." + constexpr bool left_signed{std::is_signed_v}; + if constexpr (left_signed == std::is_signed_v) + return lhs < rhs; + else if constexpr (std::is_signed_v) + return (lhs <= 0) ? true : (std::make_unsigned_t(lhs) < rhs); + else + return (rhs <= 0) ? false : (lhs < std::make_unsigned_t(rhs)); +#endif +} + + +// C++20: Retire wrapper. +/// C++20 std::cmp_greater, or workaround if not available. +template +inline constexpr bool cmp_greater(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_greater(lhs, rhs); +#else + return cmp_less(rhs, lhs); +#endif +} + + +// C++20: Retire wrapper. +/// C++20 std::cmp_less_equal, or workaround if not available. +template +inline constexpr bool cmp_less_equal(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_less_equal(lhs, rhs); +#else + return not cmp_less(rhs, lhs); +#endif +} + + +// C++20: Retire wrapper. +/// C++20 std::cmp_greater_equal, or workaround if not available. +template +inline constexpr bool cmp_greater_equal(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_greater_equal(lhs, rhs); +#else + return not cmp_less(lhs, rhs); +#endif +} + + +/// Efficiently concatenate two strings. +/** This is a special case of concatenate(), needed because dependency + * management does not let us use that function here. + */ +[[nodiscard]] inline std::string cat2(std::string_view x, std::string_view y) +{ + std::string buf; + auto const xs{std::size(x)}, ys{std::size(y)}; + buf.resize(xs + ys); + x.copy(std::data(buf), xs); + y.copy(std::data(buf) + xs, ys); + return buf; +} +} // namespace pqxx::internal + + +namespace pqxx +{ +using namespace std::literals; + +/// Suppress compiler warning about an unused item. +template inline constexpr void ignore_unused(T &&...) noexcept +{} + + +/// Cast a numeric value to another type, or throw if it underflows/overflows. +/** Both types must be arithmetic types, and they must either be both integral + * or both floating-point types. + */ +template +inline TO check_cast(FROM value, std::string_view description) +{ + static_assert(std::is_arithmetic_v); + static_assert(std::is_arithmetic_v); + static_assert(std::is_integral_v == std::is_integral_v); + + // The rest of this code won't quite work for bool, but bool is trivially + // convertible to other arithmetic types as far as I can see. + if constexpr (std::is_same_v) + return static_cast(value); + + // Depending on our "if constexpr" conditions, this parameter may not be + // needed. Some compilers will warn. + ignore_unused(description); + + using from_limits = std::numeric_limits; + using to_limits = std::numeric_limits; + if constexpr (std::is_signed_v) + { + if constexpr (std::is_signed_v) + { + if (value < to_limits::lowest()) + throw range_error{internal::cat2("Cast underflow: "sv, description)}; + } + else + { + // FROM is signed, but TO is not. Treat this as a special case, because + // there may not be a good broader type in which the compiler can even + // perform our check. + if (value < 0) + throw range_error{internal::cat2( + "Casting negative value to unsigned type: "sv, description)}; + } + } + else + { + // No need to check: the value is unsigned so can't fall below the range + // of the TO type. + } + + if constexpr (std::is_integral_v) + { + using unsigned_from = std::make_unsigned_t; + using unsigned_to = std::make_unsigned_t; + constexpr auto from_max{static_cast((from_limits::max)())}; + constexpr auto to_max{static_cast((to_limits::max)())}; + if constexpr (from_max > to_max) + { + if (internal::cmp_greater(value, to_max)) + throw range_error{internal::cat2("Cast overflow: "sv, description)}; + } + } + else if constexpr ((from_limits::max)() > (to_limits::max)()) + { + if (value > (to_limits::max)()) + throw range_error{internal::cat2("Cast overflow: ", description)}; + } + + return static_cast(value); +} + + +/** Check library version at link time. + * + * Ensures a failure when linking an application against a radically + * different libpqxx version than the one against which it was compiled. + * + * Sometimes application builds fail in unclear ways because they compile + * using headers from libpqxx version X, but then link against libpqxx + * binary version Y. A typical scenario would be one where you're building + * against a libpqxx which you have built yourself, but a different version + * is installed on the system. + * + * The check_library_version template is declared for any library version, + * but only actually defined for the version of the libpqxx binary against + * which the code is linked. + * + * If the library binary is a different version than the one declared in + * these headers, then this call will fail to link: there will be no + * definition for the function with these exact template parameter values. + * There will be a definition, but the version in the parameter values will + * be different. + */ +inline PQXX_PRIVATE void check_version() noexcept +{ + // There is no particular reason to do this here in @ref connection, except + // to ensure that every meaningful libpqxx client will execute it. The call + // must be in the execution path somewhere or the compiler won't try to link + // it. We can't use it to initialise a global or class-static variable, + // because a smart compiler might resolve it at compile time. + // + // On the other hand, we don't want to make a useless function call too + // often for performance reasons. A local static variable is initialised + // only on the definition's first execution. Compilers will be well + // optimised for this behaviour, so there's a minimal one-time cost. + static auto const version_ok{internal::PQXX_VERSION_CHECK()}; + ignore_unused(version_ok); +} + + +/// Descriptor of library's thread-safety model. +/** This describes what the library knows about various risks to thread-safety. + */ +struct PQXX_LIBEXPORT thread_safety_model +{ + /// Is the underlying libpq build thread-safe? + bool safe_libpq = false; + + /// Is Kerberos thread-safe? + /** @warning Is currently always `false`. + * + * If your application uses Kerberos, all accesses to libpqxx or Kerberos + * must be serialized. Confine their use to a single thread, or protect it + * with a global lock. + */ + bool safe_kerberos = false; + + /// A human-readable description of any thread-safety issues. + std::string description; +}; + + +/// Describe thread safety available in this build. +[[nodiscard]] PQXX_LIBEXPORT thread_safety_model describe_thread_safety(); + + +#if defined(PQXX_HAVE_CONCEPTS) +# define PQXX_POTENTIAL_BINARY_ARG pqxx::potential_binary +#else +# define PQXX_POTENTIAL_BINARY_ARG typename +#endif + + +/// Cast binary data to a type that libpqxx will recognise as binary. +/** There are many different formats for storing binary data in memory. You + * may have yours as a `std::string`, or a `std::vector`, or one of + * many other types. + * + * But for libpqxx to recognise your data as binary, it needs to be a + * `std::basic_string`, or a `std::basic_string_view`; + * or in C++20 or better, any contiguous block of `std::byte`. + * + * Use `binary_cast` as a convenience helper to cast your data as a + * `std::basic_string_view`. + * + * @warning There are two things you should be aware of! First, the data must + * be contiguous in memory. In C++20 the compiler will enforce this, but in + * C++17 it's your own problem. Second, you must keep the object where you + * store the actual data alive for as long as you might use this function's + * return value. + */ +template +std::basic_string_view binary_cast(TYPE const &data) +{ + static_assert(sizeof(value_type) == 1); + return { + reinterpret_cast( + const_cast const *>( + std::data(data))), + std::size(data)}; +} + + +#if defined(PQXX_HAVE_CONCEPTS) +template +concept char_sized = (sizeof(CHAR) == 1); +# define PQXX_CHAR_SIZED_ARG char_sized +#else +# define PQXX_CHAR_SIZED_ARG typename +#endif + +/// Construct a type that libpqxx will recognise as binary. +/** Takes a data pointer and a size, without being too strict about their + * types, and constructs a `std::basic_string_view` pointing to + * the same data. + * + * This makes it a little easier to turn binary data, in whatever form you + * happen to have it, into binary data as libpqxx understands it. + */ +template +std::basic_string_view binary_cast(CHAR const *data, SIZE size) +{ + static_assert(sizeof(CHAR) == 1); + return { + reinterpret_cast(data), + check_cast(size, "binary data size")}; +} + + +/// The "null" oid. +constexpr oid oid_none{0}; +} // namespace pqxx + + +/// Private namespace for libpqxx's internal use; do not access. +/** This namespace hides definitions internal to libpqxx. These are not + * supposed to be used by client programs, and they may change at any time + * without notice. + * + * Conversely, if you find something in this namespace tremendously useful, by + * all means do lodge a request for its publication. + * + * @warning Here be dragons! + */ +namespace pqxx::internal +{ +using namespace std::literals; + + +/// A safer and more generic replacement for `std::isdigit`. +/** Turns out `std::isdigit` isn't as easy to use as it sounds. It takes an + * `int`, but requires it to be nonnegative. Which means it's an outright + * liability on systems where `char` is signed. + */ +template inline constexpr bool is_digit(CHAR c) noexcept +{ + return (c >= '0') and (c <= '9'); +} + + +/// Describe an object for humans, based on class name and optional name. +/** Interprets an empty name as "no name given." + */ +[[nodiscard]] std::string +describe_object(std::string_view class_name, std::string_view name); + + +/// Check validity of registering a new "guest" in a "host." +/** The host might be e.g. a connection, and the guest a transaction. The + * host can only have one guest at a time, so it is an error to register a new + * guest while the host already has a guest. + * + * If the new registration is an error, this function throws a descriptive + * exception. + * + * Pass the old guest (if any) and the new guest (if any), for both, a type + * name (at least if the guest is not null), and optionally an object name + * (but which may be omitted if the caller did not assign one). + */ +void check_unique_register( + void const *old_guest, std::string_view old_class, std::string_view old_name, + void const *new_guest, std::string_view new_class, + std::string_view new_name); + + +/// Like @ref check_unique_register, but for un-registering a guest. +/** Pass the guest which was registered, as well as the guest which is being + * unregistered, so that the function can check that they are the same one. + */ +void check_unique_unregister( + void const *old_guest, std::string_view old_class, std::string_view old_name, + void const *new_guest, std::string_view new_class, + std::string_view new_name); + + +/// Compute buffer size needed to escape binary data for use as a BYTEA. +/** This uses the hex-escaping format. The return value includes room for the + * "\x" prefix. + */ +inline constexpr std::size_t size_esc_bin(std::size_t binary_bytes) noexcept +{ + return 2 + (2 * binary_bytes) + 1; +} + + +/// Compute binary size from the size of its escaped version. +/** Do not include a terminating zero in `escaped_bytes`. + */ +inline constexpr std::size_t size_unesc_bin(std::size_t escaped_bytes) noexcept +{ + return (escaped_bytes - 2) / 2; +} + + +// TODO: Use actual binary type for "data". +/// Hex-escape binary data into a buffer. +/** The buffer must be able to accommodate + * `size_esc_bin(std::size(binary_data))` bytes, and the function will write + * exactly that number of bytes into the buffer. This includes a trailing + * zero. + */ +void PQXX_LIBEXPORT +esc_bin(std::basic_string_view binary_data, char buffer[]) noexcept; + + +/// Hex-escape binary data into a std::string. +std::string PQXX_LIBEXPORT +esc_bin(std::basic_string_view binary_data); + + +/// Reconstitute binary data from its escaped version. +void PQXX_LIBEXPORT +unesc_bin(std::string_view escaped_data, std::byte buffer[]); + + +/// Reconstitute binary data from its escaped version. +std::basic_string + PQXX_LIBEXPORT unesc_bin(std::string_view escaped_data); + + +/// Transitional: std::ssize(), or custom implementation if not available. +template auto ssize(T const &c) +{ +#if defined(__cpp_lib_ssize) && __cplusplus >= __cpp_lib_ssize + return std::ssize(c); +#else + using signed_t = std::make_signed_t; + return static_cast(std::size(c)); +#endif // __cpp_lib_ssize +} + + +/// Helper for determining a function's parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple args_f(RETURN (&func)(ARGS...)); + + +/// Helper for determining a `std::function`'s parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple args_f(std::function const &); + + +/// Helper for determining a member function's parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple member_args_f(RETURN (CLASS::*)(ARGS...)); + + +/// Helper for determining a const member function's parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple member_args_f(RETURN (CLASS::*)(ARGS...) const); + + +/// Helper for determining a callable type's parameter types. +/** This specialisation should work for lambdas. + * + * This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +auto args_f(CALLABLE const &f) + -> decltype(member_args_f(&CALLABLE::operator())); + + +/// A callable's parameter types, as a tuple. +template +using args_t = decltype(args_f(std::declval())); + + +/// Helper: Apply `strip_t` to each of a tuple type's component types. +/** This function has no definition. It is not meant to be called, only to be + * used to deduce the right types. + */ +template +std::tuple...> strip_types(std::tuple const &); + + +/// Take a tuple type and apply @ref strip_t to its component types. +template +using strip_types_t = decltype(strip_types(std::declval())); +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/version b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/version new file mode 100644 index 000000000..8dd5e48d4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/version @@ -0,0 +1,7 @@ +/** libpqxx version info. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/version.hxx" +#include "pqxx/internal/header-post.hxx" + diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/version.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/version.hxx new file mode 100644 index 000000000..a159f1bed --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/version.hxx @@ -0,0 +1,55 @@ +/* Version info for libpqxx. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/version instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_VERSION + +# if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +# endif + +/// Full libpqxx version string. +# define PQXX_VERSION "7.7.3" +/// Library ABI version. +# define PQXX_ABI "7.7" + +/// Major version number. +# define PQXX_VERSION_MAJOR 7 +/// Minor version number. +# define PQXX_VERSION_MINOR 7 + +# define PQXX_VERSION_CHECK check_pqxx_version_7_7 + +namespace pqxx::internal +{ +/// Library version check stub. +/** Helps detect version mismatches between libpqxx headers and the libpqxx + * library binary. + * + * Sometimes users run into trouble linking their code against libpqxx because + * they build their own libpqxx, but the system also has a different version + * installed. The declarations in the headers against which they compile their + * code will differ from the ones used to build the libpqxx version they're + * using, leading to confusing link errors. The solution is to generate a link + * error when the libpqxx binary is not the same version as the libpqxx headers + * used to compile the code. + * + * This function's definition is in the libpqxx binary, so it's based on the + * version as found in the binary. The headers contain a call to the function, + * whose name contains the libpqxx version as found in the headers. (The + * library build process will use its own local headers even if another version + * of the headers is installed on the system.) + * + * If the libpqxx binary was compiled for a different version than the user's + * code, linking will fail with an error: `check_pqxx_version_*_*` will not + * exist for the given version number. + */ +PQXX_LIBEXPORT int PQXX_VERSION_CHECK() noexcept; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/zview b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/zview new file mode 100644 index 000000000..66ea2a625 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/zview @@ -0,0 +1,6 @@ +/** Zero-terminated string view class. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/zview.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/zview.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/zview.hxx new file mode 100644 index 000000000..36a779f51 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/include/pqxx/zview.hxx @@ -0,0 +1,163 @@ +/* Zero-terminated string view. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/zview instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ZVIEW +#define PQXX_H_ZVIEW + +#include +#include +#include + +#include "pqxx/types.hxx" + + +namespace pqxx +{ +/// Marker-type wrapper: zero-terminated `std::string_view`. +/** @warning Use this only if the underlying string is zero-terminated. + * + * When you construct a zview, you are promising that if the data pointer is + * non-null, the underlying string is zero-terminated. It otherwise behaves + * exactly like a std::string_view. + * + * The terminating zero is not "in" the string, so it does not count as part of + * the view's length. + * + * The added guarantee lets the view be used as a C-style string, which often + * matters since libpqxx builds on top of a C library. For this reason, zview + * also adds a @ref c_str method. + */ +class zview : public std::string_view +{ +public: + constexpr zview() noexcept = default; + + /// Convenience overload: construct using pointer and signed length. + constexpr zview(char const text[], std::ptrdiff_t len) : + std::string_view{text, static_cast(len)} + {} + + /// Convenience overload: construct using pointer and signed length. + constexpr zview(char text[], std::ptrdiff_t len) : + std::string_view{text, static_cast(len)} + {} + + /// Explicitly promote a `string_view` to a `zview`. + explicit constexpr zview(std::string_view other) noexcept : + std::string_view{other} + {} + + /// Construct from any initialiser you might use for `std::string_view`. + /** @warning Only do this if you are sure that the string is zero-terminated. + */ + template + explicit constexpr zview(Args &&...args) : + std::string_view(std::forward(args)...) + {} + + // C++20: constexpr. + /// @warning There's an implicit conversion from `std::string`. + zview(std::string const &str) noexcept : + std::string_view{str.c_str(), str.size()} + {} + + /// Construct a `zview` from a C-style string. + /** @warning This scans the string to discover its length. So if you need to + * do it many times, it's probably better to create the `zview` once and + * re-use it. + */ + constexpr zview(char const str[]) : std::string_view{str} {} + + /// Construct a `zview` from a string literal. + /** A C++ string literal ("foo") normally looks a lot like a pointer to + * char const, but that's not really true. It's actually an array of char, + * which _devolves_ to a pointer when you pass it. + * + * For the purpose of creating a `zview` there is one big difference: if we + * know the array's size, we don't need to scan through the string in order + * to find out its length. + */ + template + constexpr zview(char const (&literal)[size]) : zview(literal, size - 1) + {} + + /// Either a null pointer, or a zero-terminated text buffer. + [[nodiscard]] constexpr char const *c_str() const &noexcept + { + return data(); + } +}; + + +/// Support @ref zview literals. +/** You can "import" this selectively into your namespace, without pulling in + * all of the @ref pqxx namespace: + * + * ```cxx + * using pqxx::operator"" _zv; + * ``` + */ +constexpr zview operator"" _zv(char const str[], std::size_t len) noexcept +{ + return zview{str, len}; +} +} // namespace pqxx + + +#if defined(PQXX_HAVE_CONCEPTS) +/// A zview is a view. +template<> inline constexpr bool std::ranges::enable_view{true}; + + +/// A zview is a borrowed range. +template<> +inline constexpr bool std::ranges::enable_borrowed_range{true}; + +namespace pqxx::internal +{ +/// Concept: T is a known zero-terminated string type. +/** There's no unified API for these string types. It's just a check for some + * known types. Any code that makes use of the concept will still have to + * support each of these individually. + */ +template +concept ZString = std::is_convertible_v < strip_t, +char const * > or std::is_convertible_v, zview> or + std::is_convertible_v; +} // namespace pqxx::internal +#endif // PQXX_HAVE_CONCEPTS + + +namespace pqxx::internal +{ +/// Get a raw C string pointer. +inline constexpr char const *as_c_string(char const str[]) noexcept +{ + return str; +} +/// Get a raw C string pointer. +template +inline constexpr char const *as_c_string(char (&str)[N]) noexcept +{ + return str; +} +/// Get a raw C string pointer. +inline constexpr char const *as_c_string(pqxx::zview str) noexcept +{ + return str.c_str(); +} +// C++20: Make this constexpr. +/// Get a raw C string pointer. +inline char const *as_c_string(std::string const &str) noexcept +{ + return str.c_str(); +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-config-version.cmake b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-config-version.cmake new file mode 100644 index 000000000..c47d6956d --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-config-version.cmake @@ -0,0 +1,70 @@ +# This is a basic version file for the Config-mode of find_package(). +# It is used by write_basic_package_version_file() as input file for configure_file() +# to create a version-file which can be installed along a config.cmake file. +# +# The created file sets PACKAGE_VERSION_EXACT if the current version string and +# the requested version string are exactly the same and it sets +# PACKAGE_VERSION_COMPATIBLE if the current version is >= requested version, +# but only if the requested major version is the same as the current one. +# The variable CVF_VERSION must be set before calling configure_file(). + + +set(PACKAGE_VERSION "7.7.3") + +if(PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION) + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + + if("7.7.3" MATCHES "^([0-9]+)\\.") + set(CVF_VERSION_MAJOR "${CMAKE_MATCH_1}") + if(NOT CVF_VERSION_MAJOR VERSION_EQUAL 0) + string(REGEX REPLACE "^0+" "" CVF_VERSION_MAJOR "${CVF_VERSION_MAJOR}") + endif() + else() + set(CVF_VERSION_MAJOR "7.7.3") + endif() + + if(PACKAGE_FIND_VERSION_RANGE) + # both endpoints of the range must have the expected major version + math (EXPR CVF_VERSION_MAJOR_NEXT "${CVF_VERSION_MAJOR} + 1") + if (NOT PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR + OR ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX_MAJOR STREQUAL CVF_VERSION_MAJOR) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX VERSION_LESS_EQUAL CVF_VERSION_MAJOR_NEXT))) + set(PACKAGE_VERSION_COMPATIBLE FALSE) + elseif(PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR + AND ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND PACKAGE_VERSION VERSION_LESS_EQUAL PACKAGE_FIND_VERSION_MAX) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION_MAX))) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_COMPATIBLE FALSE) + endif() + else() + if(PACKAGE_FIND_VERSION_MAJOR STREQUAL CVF_VERSION_MAJOR) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_COMPATIBLE FALSE) + endif() + + if(PACKAGE_FIND_VERSION STREQUAL PACKAGE_VERSION) + set(PACKAGE_VERSION_EXACT TRUE) + endif() + endif() +endif() + + +# if the installed project requested no architecture check, don't perform the check +if("FALSE") + return() +endif() + +# if the installed or the using project don't have CMAKE_SIZEOF_VOID_P set, ignore it: +if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "8" STREQUAL "") + return() +endif() + +# check that the installed version has the same 32/64bit-ness as the one which is currently searching: +if(NOT CMAKE_SIZEOF_VOID_P STREQUAL "8") + math(EXPR installedBits "8 * 8") + set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)") + set(PACKAGE_VERSION_UNSUITABLE TRUE) +endif() diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-config.cmake b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-config.cmake new file mode 100644 index 000000000..cb25a05f2 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-config.cmake @@ -0,0 +1,4 @@ +include(CMakeFindDependencyMacro) +find_dependency(PostgreSQL) + +include("${CMAKE_CURRENT_LIST_DIR}/libpqxx-targets.cmake") diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-targets-noconfig.cmake b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-targets-noconfig.cmake new file mode 100644 index 000000000..980f46098 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-targets-noconfig.cmake @@ -0,0 +1,19 @@ +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "libpqxx::pqxx" for configuration "" +set_property(TARGET libpqxx::pqxx APPEND PROPERTY IMPORTED_CONFIGURATIONS NOCONFIG) +set_target_properties(libpqxx::pqxx PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_NOCONFIG "CXX" + IMPORTED_LOCATION_NOCONFIG "${_IMPORT_PREFIX}/lib/libpqxx-7.7.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS libpqxx::pqxx ) +list(APPEND _IMPORT_CHECK_FILES_FOR_libpqxx::pqxx "${_IMPORT_PREFIX}/lib/libpqxx-7.7.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-targets.cmake b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-targets.cmake new file mode 100644 index 000000000..4716fb7b2 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/cmake/libpqxx/libpqxx-targets.cmake @@ -0,0 +1,99 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.6) + message(FATAL_ERROR "CMake >= 2.6.0 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.6...3.20) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_targetsDefined) +set(_targetsNotDefined) +set(_expectedTargets) +foreach(_expectedTarget libpqxx::pqxx) + list(APPEND _expectedTargets ${_expectedTarget}) + if(NOT TARGET ${_expectedTarget}) + list(APPEND _targetsNotDefined ${_expectedTarget}) + endif() + if(TARGET ${_expectedTarget}) + list(APPEND _targetsDefined ${_expectedTarget}) + endif() +endforeach() +if("${_targetsDefined}" STREQUAL "${_expectedTargets}") + unset(_targetsDefined) + unset(_targetsNotDefined) + unset(_expectedTargets) + set(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT "${_targetsDefined}" STREQUAL "") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_targetsDefined}\nTargets not yet defined: ${_targetsNotDefined}\n") +endif() +unset(_targetsDefined) +unset(_targetsNotDefined) +unset(_expectedTargets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target libpqxx::pqxx +add_library(libpqxx::pqxx STATIC IMPORTED) + +set_target_properties(libpqxx::pqxx PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + INTERFACE_LINK_LIBRARIES "/usr/lib/x86_64-linux-gnu/libpq.so" +) + +if(CMAKE_VERSION VERSION_LESS 2.8.12) + message(FATAL_ERROR "This file relies on consumers using CMake 2.8.12 or greater.") +endif() + +# Load information for each installed configuration. +get_filename_component(_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) +file(GLOB CONFIG_FILES "${_DIR}/libpqxx-targets-*.cmake") +foreach(f ${CONFIG_FILES}) + include(${f}) +endforeach() + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(target ${_IMPORT_CHECK_TARGETS} ) + foreach(file ${_IMPORT_CHECK_FILES_FOR_${target}} ) + if(NOT EXISTS "${file}" ) + message(FATAL_ERROR "The imported target \"${target}\" references the file + \"${file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + unset(_IMPORT_CHECK_FILES_FOR_${target}) +endforeach() +unset(_IMPORT_CHECK_TARGETS) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/libpqxx-7.7.a b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/libpqxx-7.7.a new file mode 100644 index 000000000..fb941debb Binary files /dev/null and b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/libpqxx-7.7.a differ diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/libpqxx.a b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/libpqxx.a new file mode 120000 index 000000000..d9fcdab85 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/lib/libpqxx.a @@ -0,0 +1 @@ +libpqxx-7.7.a \ No newline at end of file diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/accessing-results.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/accessing-results.md new file mode 100644 index 000000000..920fb6f3b --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/accessing-results.md @@ -0,0 +1,157 @@ +Accessing results and result rows {#accessing-results} +--------------------------------- + +When you execute a query using one of the transaction `exec` functions, you +normally get a `result` object back. A `result` is a container of `row`s. + +(There are exceptions. The `exec1` functions expect exactly one row of data, +so they return just a `row`, not a full `result`.) + +Result objects are an all-or-nothing affair. The `exec` function waits until +it's received all the result data, and then gives it to you in the form of the +`result`. _(There is a faster, easier way of executing simple queries, so see +"streaming rows" below as well.)_ + +For example, your code might do: + +```cxx + pqxx::result r = tx.exec("SELECT * FROM mytable"); +``` + +Now, how do you access the data inside `r`? + +Result sets act as standard C++ containers of rows. Rows act as standard +C++ containers of fields. So the easiest way to go through them is: + +```cxx + for (auto const &row: r) + { + for (auto const &field: row) std::cout << field.c_str() << '\t'; + std::cout << '\n'; + } +``` + +But results and rows also support other kinds of access. Array-style +indexing, for instance, such as `r[rownum]`: + +```cxx + std::size_t const num_rows = std::size(r); + for (std::size_t rownum=0u; rownum < num_rows; ++rownum) + { + pqxx::row const row = r[rownum]; + std::size_t const num_cols = std::size(row); + for (std::size_t colnum=0u; colnum < num_cols; ++colnum) + { + pqxx::field const field = row[colnum]; + std::cout << field.c_str() << '\t'; + } + + std::cout << '\n'; + } +``` + +Every row in the result has the same number of columns, so you don't need to +look up the number of fields again for each one: + +```cxx + std::size_t const num_rows = std::size(r); + std::size_t const num_cols = r.columns(); + for (std::size_t rownum=0u; rownum < num_rows; ++rownum) + { + pqxx::row const row = r[rownum]; + for (std::size_t colnum=0u; colnum < num_cols; ++colnum) + { + pqxx::field const field = row[colnum]; + std::cout << field.c_str() << '\t'; + } + + std::cout << '\n'; + } +``` + +You can even address a field by indexing the `row` using the field's _name:_ + +```cxx + std::cout << row["salary"] << '\n'; +``` + +But try not to do that if speed matters, because looking up the column by name +takes time. At least you'd want to look up the column index before your loop +and then use numerical indexes inside the loop. + +For C++23 or better, there's also a two-dimensional array access operator: + +```cxx + for (std::size_t rownum=0u; rownum < num_rows; ++rownum) + { + for (std::size_t colnum=0u; colnum < num_cols; ++colnum) + std::cout result[rownum, colnum].c_str() << '\t'; + std::cout << '\n'; + } +``` + +And of course you can use classic "begin/end" loops: + +```cxx + for (auto row = std::begin(r); row != std::end(r); row++) + { + for (auto field = std::begin(row); field != std::end(row); field++) + std::cout << field->c_str() << '\t'; + std::cout << '\n'; + } +``` + +Result sets are immutable, so all iterators on results and rows are actually +`const_iterator`s. There are also `const_reverse_iterator` types, which +iterate backwards from `rbegin()` to `rend()` exclusive. + +All these iterator types provide one extra bit of convenience that you won't +normally find in C++ iterators: referential transparency. You don't need to +dereference them to get to the row or field they refer to. That is, instead +of `row->end()` you can also choose to say `row.end()`. Similarly, you +may prefer `field.c_str()` over `field->c_str()`. + +This becomes really helpful with the array-indexing operator. With regular +C++ iterators you would need ugly expressions like `(*row)[0]` or +`row->operator[](0)`. With the iterator types defined by the result and +row classes you can simply say `row[0]`. + + +Streaming rows +-------------- + +There's another way to go through the rows coming out of a query. It's +usually easier and faster, but there are drawbacks. + +**One,** you start getting rows before all the data has come in from the +database. That speeds things up, but what happens if you lose your network +connection while transferring the data? Your application may already have +processed some of the data before finding out that the rest isn't coming. If +that is a problem for your application, streaming may not be the right choice. + +**Two,** streaming only works for some types of query. The `stream()` function +wraps your query in a PostgreSQL `COPY` command, and `COPY` only supports a few +commands: `SELECT`, `VALUES`, `or an `INSERT`, `UPDATE`, or `DELETE` with a +`RETURNING` clause. See the `COPY` documentation here: +https://www.postgresql.org/docs/current/sql-copy.html + +**Three,** when you convert a field to a "view" type (such as +`std::string_view` or `std::basic_string_view`), the view points to +underlying data which only stays valid until you iterate to the next row or +exit the loop. So if you want to use that data for longer than a single +iteration of the streaming loop, you'll have to store it somewhere yourself. + +Now for the good news. Streaming does make it very easy to query data and loop +over it: + +```cxx + for (auto [id, name, x, y] : + tx.stream( + "SELECT id, name, x, y FROM point")) + process(id + 1, "point-" + name, x * 10.0, y * 10.0); +``` + +The conversion to C++ types (here `int`, `std::string_view`, and two `float`s) +is built into the function. You never even see `row` objects, `field` objects, +iterators, or conversion methods. You just put in your query and you receive +your data. diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/binary-data.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/binary-data.md new file mode 100644 index 000000000..20da8dc0c --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/binary-data.md @@ -0,0 +1,56 @@ +Binary data {#binary} +=========== + +The database has two ways of storing binary data: `BYTEA` is like a string, but +containing bytes rather than text characters. And _large objects_ are more +like a separate table containing binary objects. + +Generally you'll want to use `BYTEA` for reasonably-sized values, and large +objects for very large values. + +That's the database side. On the C++ side, in libpqxx, all binary data must be +either `std::basic_string` or `std::basic_string_view`; +or if you're building in C++20 or better, anything that's a block of +contiguous `std::byte` in memory. + +So for example, if you want to write a large object, you'd create a +`pqxx::blob` object. And you might use that to write data in the form of +`std::basic_string_view`. + +Your particular binary data may look different though. You may have it in a +`std::string`, or a `std::vector`, or a pointer to `char` +accompanied by a size (which could be signed or unsigned, and of any of a few +different widths). Sometimes that's your choice, or sometimes some other +library will dictate what form it takes. + +So long as it's _basically_ still a block of bytes though, you can use +`pqxx::binary_cast` to construct a `std::basic_string_view` from it. + +There are two forms of `binary_cast`. One takes a single argument that must +support `std::data()` and `std::size()`: + + std::string hi{"Hello binary world"}; + my_blob.write(pqxx::binary_cast(hi); + +The other takes a pointer and a size: + + char const greeting[] = "Hello binary world"; + char const *hi = greeting; + my_blob.write(pqxx::binary_cast(hi, sizeof(greeting))); + + +Caveats +------- + +There are some restrictions on `binary_cast` that you must be aware of. + +First, your data must of a type that gives us _bytes._ So: `char`, +`unsigned char`, `signed char`, `int8_t`, `uint8_t`, or of course `std::byte`. +You can't feed in a vector of `double`, or anything like that. + +Second, the data must be laid out as a contiguous block in memory. If there's +no `std::data()` implementation for your type, it's not suitable. + +Third, `binary_cast` only constructs something like a `std::string_view`. It +does not make a copy of your actual data. So, make sure that your data remains +alive and in the same place while you're using it. diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/datatypes.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/datatypes.md new file mode 100644 index 000000000..bc14c8b90 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/datatypes.md @@ -0,0 +1,373 @@ +Supporting additional data types {#datatypes} +================================ + +Communication with the database mostly happens in a text format. When you +include an integer value in a query, you use `to_string` to convert it to that +text format. When you get a query result field "as a float," it converts from +the text format to a floating-point type. These conversions are everywhere in +libpqxx. + +The conversion sydstem supports many built-in types, but it is also extensible. +You can "teach" libpqxx (in the scope of your own application) to convert +additional types of values to and from PostgreSQL's string format. + +This is massively useful, but it's not for the faint of heart. You'll need to +specialise some templates. And, **the API for doing this can change with any +major libpqxx release.** + + +Converting types +---------------- + +In your application, a conversion is driven entirely by a C++ type you specify. +The value's SQL type has nothing to do with it, nor is there anything in the +string that would identify its type. + +So, if you've SELECTed a 64-bit integer from the database, and you try to +convert it to a C++ "short," one of two things will happen: either the number +is small enough to fit in your `short` (and it just works), or else it throws a +conversion exception. + +Or, your database table might have a text column, but a given field may contain +a string that _looks_ just like a number. You can convert that value to an +integer type just fine. Or to a floating-point type. All that matters to the +conversion is the actual value, and the type. + +In some cases the templates for these conversions can tell the type from the +arguments you pass them: + + auto x = to_string(99); + +In other cases you may need to instantiate template explicitly: + + auto y = from_string("99"); + + +Supporting a new type +--------------------- + +Let's say you have some other SQL type which you want to be able to store in, +or retrieve from, the database. What would it take to support that? + +Sometimes you do not need _complete_ support. You might need a conversion _to_ +a string but not _from_ a string, for example. The conversion is defined at +compile time, so don't be too afraid to be incomplete. If you leave out one of +these steps, it's not going to crash at run time or mess up your data. The +worst that can happen is that your code won't build. + +So what do you need for a complete conversion? + +First off, of course, you need a C++ type. It may be your own, but it +doesn't have to be. It could be a type from a third-party library, or even one +from the standard library that libpqxx does not yet support. + +You also specialise the `pqxx::type_name` variable to specify the type's name. +This is important for all code which mentions your type in human-readable text, +such as error messages. + +Then, does your type have a built-in null value? You specialise the +`pqxx::nullness` template to specify the details. + +Finally, you specialise the `pqxx::string_traits` template. This is where you +define the actual conversions. + +Let's go through these steps one by one. + + +Your type +--------- + +You'll need a type for which the conversions are not yet defined, because the +C++ type is what determines the right conversion. One type, one set of +conversions. + +The type doesn't have to be one that you create. The conversion logic was +designed such that you can build it around any type. So you can just as +easily build a conversion for a type that's defined somewhere else. There's +no need to include any special methods or other members inside it. That's also +how libpqxx can support converting built-in types like `int`. + +By the way, if the type is an enum, you don't need to do any of this. Just +invoke the preprocessor macro `PQXX_DECLARE_ENUM_CONVERSION`, from the global +namespace near the top of your translation unit, and pass the type as an +argument. + +The library also provides specialisations for `std::optional`, +`std::shared_ptr`, and `std::unique_ptr`. If you have conversions for +`T`, you'll also have conversions for those. + + +Specialise `type_name` +---------------------- + +When errors happen during conversion, libpqxx will compose error messages for +the user. Sometimes these will include the name of the type that's being +converted. + +To tell libpqxx the name of each type, there's a template variable called +`pqxx::type_name`. For any given type `T`, it should have a specialisation +that provides that `T`'s human-readable name: + + namespace pqxx + { + template<> std::string const type_name{"T"}; + } + +(Yes, this means that you need to define something inside the pqxx namespace. +Future versions of libpqxx may move this into a separate namespace.) + +Define this early on in your translation unit, before any code that might cause +libpqxx to need the name. That way, the libpqxx code which needs to know the +type's name can see your definition. + + +Specialise `nullness` +--------------------- + +A struct template `pqxx::nullness` defines whether your type has a natural +"null value" built in. If so, it also provides member functions for producing +and recognising null values. + +The simplest scenario is also the most common: most types don't have a null +value built in. In that case, derive your nullness traits from +`pqxx::no_null`: + + namespace pqxx + { + template<> struct nullness : pqxx::no_null {}; + } + +(Here again you're defining this in the pqxx namespace.) + +If your type does have a natural null value, the definition gets a little more +complex: + + namespace pqxx + { + template<> struct nullness + { + static constexpr bool has_null{true}; + static constexpr bool always_null{false}; + + static bool is_null(T const &value) + { + // Return whether "value" is null. + return ...; + } + + [[nodiscard]] static T null() + { + // Return a null value. + return ...; + } + }; + } + +You may be wondering why there's a function to produce a null value, but also a +function to check whether a value is null. Why not just compare the value to +the result of `null()`? Because two null values may not be equal. `T` may +have several different null values. Or it may override the comparison +operator, similar to SQL where NULL is not equal to NULL. + +As a third case, your type may be one that _always_ represents a null value. +This is the case for `std::nullptr_t` and `std::nullopt_t`. In that case, you +set `nullness::always_null` to `true` (as well as `has_null` of course), +and you won't need to define any actual conversions. + + +Specialise `string_traits` +------------------------- + +This part is more work. (You can skip it for types that are _always_ null, +but those will be rare.) Specialise the `pqxx::string_traits` template: + + namespace pqxx + { + template<> struct string_traits + { + static T from_string(std::string_view text); + static zview to_buf(char *begin, char *end, T const &value); + static char *into_buf(char *begin, char *end, T const &value); + static std::size_t size_buffer(T const &value) noexcept; + }; + } + +You'll also need to write those member functions, or as many of them as needed +to get your code to build. + + +### `from_string` + +We start off simple: `from_string` parses a string as a value of `T`, and +returns that value. + +The string may not be zero-terminated; it's just the `string_view` from +beginning to end (exclusive). In your tests, cover cases where the string +does not end in a zero byte. + +It's perfectly possible that the string isn't actually a `T` value. Mistakes +happen. In that case, throw a `pqxx::conversion_error`. + +(Of course it's also possible that you run into some other error, so it's fine +to throw different exceptions as well. But when it's definitely "this is not +the right format for a `T`," throw `conversion_error`.) + + +### `to_buf` + +In this function, you convert a value of `T` into a string that the postgres +server will understand. + +The caller will provide you with a buffer where you can write the string, if +you need it: from `begin` to `end` exclusive. It's a half-open interval, so +don't access `*end`. + +If the buffer is insufficient for you to do the conversion, throw a +`pqxx::conversion_overrun`. It doesn't have to be exact: you can be a little +pessimistic and demand a bit more space than you need. Just be sure to throw +the exception if there's any risk of overrunning the buffer. + +You don't _have_ to use the buffer for this function though. For example, +`pqxx::string_traits::to_buf` returns a compile-time constant string and +ignores the buffer. + +Even if you do use the buffer, your string does not _have_ to start at the +beginning of the buffer. For example, the integer conversions start by writing +the _least_ significant digit to the _end_ of the buffer, and then writes the +more significant digits before it. It was just more convenient. + +Return a `pqxx::zview`. This is basically a `std::string_view`, but with one +difference: a `zview` guarantees that there will be a valid zero byte right +after the `string_view`. The zero byte is not counted as part of its size, but +it will be there. + +Expressed in code, this rule must hold: + + void invariant(zview z) + { + assert(z[std::size(z)] == 0); + } + +Make sure you write your trailing zero _before_ the `end`. If the trailing +zero doesn't fit in the buffer, then there's just not enough room to perform +the conversion. + +Beware of locales when converting. If you use standard library features like +`sprintf`, they may obey whatever locale is currently set on the system. That +means that a simple integer like 1000000 may come out as "1000000" on your +system, but as "1,000,000" on mine, or as "1.000.000" for somebody else, and on +an Indian system it may be "1,00,000". Values coming from or going to the +database should be in non-localised formats. You can use libpqxx functions for +those conversions: `pqxx::from_string`, `pqxx::to_string`, `pqxx::to_buf`. + + +### `into_buf` + +This is a stricter version of `to_buf`. All the same requirements apply, but +in addition you must write your string into the buffer provided, starting +_exactly_ at `begin`. + +That's why this function returns just a simple pointer: the address right +behind the trailing zero. If the caller wants to use the string, they can +find it at `begin`. If they want to write a different value into the rest of +the buffer, they can start at the location you returned. + + +### `size_buffer` + +Here you estimate how much buffer space you need for converting a `T` to a +string. Be precise if you can, but pessimistic if you must. It's usually +better to waste a few unnecessary bytes than to spend a lot of time computing +the exact buffer space you need. And failing the conversion because you +under-budgeted the buffer is worst of all. + +Include the trailing zero in the buffer size. If your `to_buf` takes more +space than just what's needed to store the result, include that too. + +Make `size_buffer` a `constexpr` function if you can. It can allow the caller +to allocate the buffer on the stack, with a size known at compile time. + + +Optional: Specialise `is_unquoted_safe` +--------------------------------------- + +When converting arrays or composite values to strings, libpqxx may need to +quote values and escape any special characters. This takes time. + +Some types though, such as integral or floating-point types, can never have +any special characters such as quotes, commas, or backslashes in their string +representations. In such cases, there's no need to quote or escape such values +in arrays or composite types. + +If your type is like that, you can tell libpqxx about this by defining: + + namespace pqxx + { + template<> inline constexpr bool is_unquoted_safe{true}; + } + +The code that converts this type of field to strings in an array or a composite +type can then use a simpler, more efficient variant of the code. It's always +safe to leave this out; it's _just_ an optimisation for when you're completely +sure that it's safe. + +Do not do this if a string representation of your type may contain a comma; +semicolon; parenthesis; brace; quote; backslash; newline; or any other +character that might need escaping. + + +Optional: Specialise `param_format` +----------------------------------- + +This one you don't generally need to worry about. Read on if you're writing a +type which represents raw binary data, or if you're writing a template where +_some specialisations_ may contain raw binary data. + +When you call parameterised statements, or prepared statements with parameters, +libpqxx needs to your parameters on to libpq, the underlying C-level PostgreSQL +client library. + +There are two formats for doing that: _text_ and _binary._ In the first, we +represent all values as strings, and the server then converts them into its own +internal binary representation. That's what the string conversions are all +about, and it's what we do for almost all types of parameters. + +But we do it differently when the parameter is a contiguous series of raw bytes +and the corresponding SQL type is `BYTEA`. There is a text format for those, +but we bypass it for efficiency. The server can use the binary data in the +exact same form, without any conversion or extra processing. The binary data +is also twice as compact during transport. + +(People sometimes ask why we can't just treat all types as binary. However the +general case isn't so clear-cut. The binary formats are not documented, there +are no guarantees that they will be platform-independent or that they will +remain stable, and there's no really solid way to detect when we might get the +format wrong. But also, the conversions aren't necessarily as straightforward +and efficient as they sound. So, for the general case, libpqxx sticks with the +text formats. Raw binary data alone stands out as a clear win.) + +Long story short, the machinery for passing parameters needs to know: is this +parameter a binary string, or not? In the normal case it can assume "no," and +that's what it does. The text format is always a safe choice; we just try to +use the binary format where it's faster. + +The `param_format` function template is what makes the decision. We specialise +it for types which may be binary strings, and use the default for all other +types. + +"Types which _may_ be binary"? You might think we know whether a type is a +binary type or not. But there are some complications with generic types. + +Templates like `std::shared_ptr`, `std::optional`, and so on act like +"wrappers" for another type. A `std::optional` is binary if `T` is binary. +Otherwise, it's not. If you're building support for a template of this nature, +you'll probably want to implement `param_format` for it. + +The decision to use binary format is made based on a given object, not +necessarily based on the type in general. Look at `std::variant`. If you have +a `std::variant` type which can hold an `int` or a binary string, is that a +binary parameter? We can't decide without knowing the individual object. + +Containers are another hard case. Should we pass `std::vector` in binary? +Even when `T` is a binary type, we don't currently have any way to pass an +array in binary format, so we always pass it as text. diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/escaping.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/escaping.md new file mode 100644 index 000000000..2ad9fe3db --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/escaping.md @@ -0,0 +1,74 @@ +String escaping {#escaping} +=============== + +Writing queries as strings is easy. But sometimes you need a variable in +there: `"SELECT id FROM user WHERE name = '" + name + "'"`. + +This is dangerous. See the bug? If `name` can contain quotes, you may have +an SQL injection vulnerability there, where users can enter nasty stuff like +"`.'; DROP TABLE user`". Or if you're lucky, it's just a nasty bug that you +discover when `name` happens to be "d'Arcy". + +So, you'll need to _escape_ the `name` before you insert it. This is where +quotes and other problematic characters are marked as "this is just a character +in the string, not the end of the string." There are +[several functions](@ref escaping-functions) in libpqxx to do this for you. + + +SQL injection +------------- + +To understand what SQL injection vulnerabilities are and why they should be +prevented, imagine you use the following SQL statement somewhere in your +program: + + TX.exec( + "SELECT number,amount " + "FROM accounts " + "WHERE allowed_to_see('" + userid + "','" + password + "')"); + +This shows a logged-in user important information on all accounts he is +authorized to view. The userid and password strings are variables entered +by the user himself. + +Now, if the user is actually an attacker who knows (or can guess) the +general shape of this SQL statement, imagine he enters the following +password: + + x') OR ('x' = 'x + +Does that make sense to you? Probably not. But if this is inserted into +the SQL string by the C++ code above, the query becomes: + + SELECT number,amount + FROM accounts + WHERE allowed_to_see('user','x') OR ('x' = 'x') + +Is this what you wanted to happen? Probably not! The neat `allowed_to_see()` +clause is completely circumvented by the "`OR ('x' = 'x')`" clause, which is +always `true`. Therefore, the attacker will get to see all accounts in the +database! + + +Using the esc functions +----------------------- + +Here's how you can fix the problem in the example above: + + TX.exec( + "SELECT number,amount " + "FROM accounts " + "WHERE allowed_to_see('" + TX.esc(userid) + "', " + "'" + TX.esc(password) + "')"); + +Now, the quotes embedded in the attacker's string will be neatly escaped so +they can't "break out" of the quoted SQL string they were meant to go into: + + SELECT number,amount + FROM accounts + WHERE allowed_to_see('user', 'x'') OR (''x'' = ''x') + +If you look carefully, you'll see that thanks to the added escape characters +(a single-quote is escaped in SQL by doubling it) all we get is a very +strange-looking password string--but not a change in the SQL statement. + diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/getting-started.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/getting-started.md new file mode 100644 index 000000000..1b87b881f --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/getting-started.md @@ -0,0 +1,142 @@ +Getting started {#getting-started} +=============== + +The most basic three types in libpqxx are the _connection_, the _transaction_, +and the _result_. + +They fit together as follows: +* You connect to the database by creating a `pqxx::connection` object (see + @ref connections). + +* You create a transaction object (see @ref transactions) operating on that + connection. You'll usually want the `pqxx::work` variety. + + Once you're done you call the transaction's `commit` function to make its + work final. If you don't call this, the work will be rolled back when the + transaction object is destroyed. + +* Until then, use the transaction's `exec`, `query_value`, and `stream` + functions (and variants) to execute SQL statements. You pass the statements + themselves in as simple strings. (See @ref streams for more about data + streaming). + +* Most of the `exec` functions return a `pqxx::result` object, which acts + as a standard container of rows: `pqxx::row`. + + Each row in a result, in turn, acts as a container of fields: `pqxx::field`. + See @ref accessing-results for more about results, rows, and fields. + +* Each field's data is stored internally as a text string, in a format defined + by PostgreSQL. You can convert field or row values using their `as()` and + `to()` member functions. + +* After you've closed the transaction, the connection is free to run a next + transaction. + +Here's a very basic example. It connects to the default database (you'll +need to have one set up), queries it for a very simple result, converts it to +an `int`, and prints it out. It also contains some basic error handling. + + #include + #include + + int main() + { + try + { + // Connect to the database. In practice we may have to pass some + // arguments to say where the database server is, and so on. + // The constructor parses options exactly like libpq's + // PQconnectdb/PQconnect, see: + // https://www.postgresql.org/docs/10/static/libpq-connect.html + pqxx::connection c; + + // Start a transaction. In libpqxx, you always work in one. + pqxx::work w(c); + + // work::exec1() executes a query returning a single row of data. + // We'll just ask the database to return the number 1 to us. + pqxx::row r = w.exec1("SELECT 1"); + + // Commit your transaction. If an exception occurred before this + // point, execution will have left the block, and the transaction will + // have been destroyed along the way. In that case, the failed + // transaction would implicitly abort instead of getting to this point. + w.commit(); + + // Look at the first and only field in the row, parse it as an integer, + // and print it. + // + // "r[0]" returns the first field, which has an "as<...>()" member + // function template to convert its contents from their string format + // to a type of your choice. + std::cout << r[0].as() << std::endl; + } + catch (std::exception const &e) + { + std::cerr << e.what() << std::endl; + return 1; + } + } + +This prints the number 1. Notice that you can keep the result object around +after you've closed the transaction or even the connection. There are +situations where you can't do it, but generally it's fine. If you're +interested: you can install your own callbacks for receiving error messages +from the database, and in that case you'll have to keep the connection object +alive. But otherwise, it's nice to be able to "fire and forget" your +connection and deal with the data. + +You can also convert an entire row to a series of C++-side types in one go, +using the @c as member function on the row: + + pqxx::connection c; + pqxx::work w(c); + pqxx::row r = w.exec1("SELECT 1, 2, 'Hello'"); + auto [one, two, hello] = r.as(); + std::cout << (one + two) << ' ' << std::strlen(hello) << std::endl; + +Here's a slightly more complicated example. It takes an argument from the +command line and retrieves a string with that value. The interesting part is +that it uses the escaping-and-quoting function `quote` to embed this +string value in SQL safely. It also reads the result field's value as a +plain C-style string using its `c_str` function. + + #include + #include + #include + + int main(int argc, char *argv[]) + { + try + { + if (!argv[1]) throw std::runtime_error("Give me a string!"); + + pqxx::connection c; + pqxx::work w(c); + + // work::exec() returns a full result set, which can consist of any + // number of rows. + pqxx::result r = w.exec("SELECT " + w.quote(argv[1])); + + // End our transaction here. We can still use the result afterwards. + w.commit(); + + // Print the first field of the first row. Read it as a C string, + // just like std::string::c_str() does. + std::cout << r[0][0].c_str() << std::endl; + } + catch (std::exception const &e) + { + std::cerr << e.what() << std::endl; + return 1; + } + } + +You can find more about converting field values to native types, or +converting values to strings for use with libpqxx, under +@ref stringconversion. More about getting to the rows and fields of a +result is under @ref accessing-results. + +If you want to handle exceptions thrown by libpqxx in more detail, for +example to print the SQL contents of a query that failed, see @ref exception. diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/mainpage.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/mainpage.md new file mode 100644 index 000000000..5d4b8f9b2 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/mainpage.md @@ -0,0 +1,28 @@ +libpqxx {#mainpage} +======= + +@version 7.7.3 +@author Jeroen T. Vermeulen +@see http://pqxx.org +@see https://github.com/jtv/libpqxx + +Welcome to libpqxx, the C++ API to the PostgreSQL database management system. + +Compiling this package requires PostgreSQL to be installed -- including the +C headers for client development. The library builds on top of PostgreSQL's +standard C API, libpq. The libpq headers are not needed to compile client +programs, however. + +For a quick introduction to installing and using libpqxx, see the README.md +file. The latest information can be found at http://pqxx.org/ + + +Some links that should help you find your bearings: +* @ref getting-started +* @ref thread-safety +* @ref connections +* @ref transactions +* @ref escaping +* @ref performance +* @ref transactor +* @ref datatypes diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/parameters.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/parameters.md new file mode 100644 index 000000000..7ac792025 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/parameters.md @@ -0,0 +1,90 @@ +Statement parameters {#parameters} +==================== + +When you execute a prepared statement (see @ref prepared), or a parameterised +statement (using functions like `pqxx::connection::exec_params`), you may write +special _placeholders_ in the query text. They look like `$1`, `$2`, and so +on. + +If you execute the query and pass parameter values, the call will respectively +substitute the first where it finds `$1`, the second where it finds `$2`, et +cetera. + +Doing this saves you work. If you don't use statement parameters, you'll need +to quote and escape your values (see `connection::quote()` and friends) as you +insert them into your query as literal values. + +Or if you forget to do that, you leave yourself open to horrible +[SQL injection attacks](https://xkcd.com/327/). Trust me, I was born in a town +whose name started with an apostrophe! + +Statement parameters save you this work. With these parameters you can pass +your values as-is, and they will go across the wire to the database in a safe +format. + +In some cases it may even be faster! When a parameter represents binary data +(as in the SQL `BYTEA` type), libpqxx will send it directly as binary, which is +a bit more efficient. If you insert the binary data directly in your query +text, your CPU will have some extra work to do, converting the data into a text +format, escaping it, and adding quotes. + + +Dynamic parameter lists +----------------------- + +In rare cases you may just not know how many parameters you'll pass into your +statement when you call it. + +For these situations, have a look at `params`. It lets you compose your +parameters list on the fly, even add whole ranges of parameters at a time. + +You can pass a `params` into your statement as a normal parameter. It will +fill in all the parameter values it contains into that position of the +statement's overall parameter list. + +So if you call your statement passing a regular parameter `a`, a +`params` containing just a parameter `b`, and another regular parameter `c`, +then your call will pass parameters `a`, `b`, and `c`. Or if the params object +is empty, it will pass just `a` and `c`. If the params object contains `x` and +`y`, your call will pass `a, x, y, c`. + +You can mix static and dynamic parameters freely. Don't go overboard though: +complexity is where bugs happen! + + +Generating placeholders +----------------------- + +If your code gets particularly complex, it may sometimes happen that it becomes +hard to track which parameter value belongs with which placeholder. Did you +intend to pass this numeric value as `$7`, or as `$8`? The answer may depend +on an `if` that happened earlier in a different function. + +(Generally if things get that complex, it's a good idea to look for simpler +solutions. But especially when performance matters, sometimes you can't avoid +complexity like that.) + +There's a little helper class called `placeholders`. You can use it as a +counter which produces those placeholder strings, `$1`, `$2`, `$3`, et cetera. +When you start generating a complex statement, you can create both a `params` +and a `placeholders`: + + pqxx::params values; + pqxx::placeholders name; + +Let's say you've got some complex code to generate the conditions for an SQL +"WHERE" clause. You'll generally want to do these things close together in +your, so that you don't accidentally update one part and forget another: + + if (extra_clause) + { + // Extend the query text, using the current placeholder. + query += " AND x = " + name.get(); + // Add the parameter value. + values.append(my_x); + // Move on to the next placeholder value. + name.next(); + } + +Depending on the starting value of `name`, this might add to `query` a fragment +like "` AND x = $3`" or "` AND x = $5`". diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/performance.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/performance.md new file mode 100644 index 000000000..6c403684f --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/performance.md @@ -0,0 +1,24 @@ +Performance features {#performance} +==================== + +If your program's database interaction is not as efficient as it needs to be, +the first place to look is usually the SQL you're executing. But libpqxx +has a few specialized features to help you squeeze more performance out +of how you issue commands and retrieve data: + +* @ref streams. Use these as a faster way to transfer data between your + code and the database. +* `std::string_view` and `pqxx::zview`. In places where traditional C++ worked + with `std::string`, see whether `std::string_view` or `pqxx::zview` will + do. Of course that means that you'll have to look at the data's lifetime + more carefully, but it'll save the computer a lot of copying. +* @ref prepared. These can be executed many times without the server + parsing and planning them anew each time. They also save you having to + escape string parameters. +* `pqxx::pipeline` lets you send queries to the database in batches, and + continue other processing while they are executing. +* `pqxx::connecting` lets you start setting up a database connection, but + without blocking the thread. + +As always of course, don't risk the quality of your code for optimizations +that you don't need! diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/prepared-statement.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/prepared-statement.md new file mode 100644 index 000000000..5193866a6 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/prepared-statement.md @@ -0,0 +1,125 @@ +Prepared statements {#prepared} +=================== + +Prepared statements are SQL queries that you define once and then invoke +as many times as you like, typically with varying parameters. It's basically +a function that you can define ad hoc. + +If you have an SQL statement that you're going to execute many times in +quick succession, it may be more efficient to prepare it once and reuse it. +This saves the database backend the effort of parsing complex SQL and +figuring out an efficient execution plan. Another nice side effect is that +you don't need to worry about escaping parameters. Some corporate coding +standards require all SQL parameters to be passed in this way, to reduce the +risk of programmer mistakes leaving room for SQL injections. + + +Preparing a statement +--------------------- + +You create a prepared statement by preparing it on the connection (using the +`pqxx::connection::prepare` functions), passing an identifier and its SQL text. + +The identifier is the name by which the prepared statement will be known; it +should consist of ASCII letters, digits, and underscores only, and start with +an ASCII letter. The name is case-sensitive. + +```cxx + void prepare_my_statement(pqxx::connection &c) + { + c.prepare( + "my_statement", + "SELECT * FROM Employee WHERE name = 'Xavier'"); + } +``` + +Once you've done this, you'll be able to call `my_statement` from any +transaction you execute on the same connection. For this, use the +`pqxx::transaction_base::exec_prepared` functions. + +```cxx + pqxx::result execute_my_statement(pqxx::transaction_base &t) + { + return t.exec_prepared("my_statement"); + } +``` + + +Parameters +---------- + +Did I mention that prepared statements can have parameters? The query text +can contain `$1`, `$2` etc. as placeholders for parameter values that you +will provide when you invoke the prepared satement. + +See @ref parameters for more about this. And here's a simple example of +preparing a statement and invoking it with parameters: + +```cxx + void prepare_find(pqxx::connection &c) + { + // Prepare a statement called "find" that looks for employees with a + // given name (parameter 1) whose salary exceeds a given number + // (parameter 2). + c.prepare( + "find", + "SELECT * FROM Employee WHERE name = $1 AND salary > $2"); + } +``` + +This example looks up the prepared statement "find," passes `name` and +`min_salary` as parameters, and invokes the statement with those values: + +```cxx + pqxx::result execute_find( + pqxx::transaction_base &t, std::string name, int min_salary) + { + return t.exec_prepared("find", name, min_salary); + } +``` + + +A special prepared statement +---------------------------- + +There is one special case: the _nameless_ prepared statement. You may prepare +a statement without a name, i.e. whose name is an empty string. The unnamed +statement can be redefined at any time, without un-preparing it first. + + +Performance note +---------------- + +Don't assume that using prepared statements will speed up your application. +There are cases where prepared statements are actually slower than plain SQL. + +The reason is that the backend can often produce a better execution plan when +it knows the statement's actual parameter values. + +For example, say you've got a web application and you're querying for users +with status "inactive" who have email addresses in a given domain name X. If +X is a very popular provider, the best way for the database engine to plan the +query may be to list the inactive users first and then filter for the email +addresses you're looking for. But in other cases, it may be much faster to +find matching email addresses first and then see which of their owners are +"inactive." A prepared statement must be planned to fit either case, but a +direct query will be optimised based on table statistics, partial indexes, etc. + + +Zero bytes +---------- + +@warning Beware of "nul" bytes! + +Any string you pass as a parameter will end at the _first char with value +zero._ If you pass a string that contains a zero byte, the last byte in the +value will be the one just before the zero. + +So, if you need a zero byte in a string, consider that it's really a _binary +string,_ which is not the same thing as a text string. SQL represents binary +data as the `BYTEA` type, or in binary large objects ("blobs"). + +In libpqxx, you represent binary data as a range of `std::byte`. They must be +contiguous in memory, so that libpqxx can pass pointers to the underlying C +library. So you might use `std::basic_string`, or +`std::basic_string_view`, or `std::vector`. diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/streams.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/streams.md new file mode 100644 index 000000000..3df4d6126 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/streams.md @@ -0,0 +1,107 @@ +Streams {#streams} +======= + +Most of the time it's fine to retrieve data from the database using `SELECT` +queries, and store data using `INSERT`. But for those cases where efficiency +matters, there are two classes to help you do this better: `stream_from` and +`stream_to`. They're less flexible than SQL queries, and there's the risk of +losing your connection while you're in mid-stream, but you get some speed and +memory efficiencies in return. + +Both stream classes do data conversion for you: `stream_from` receives values +from the database in PostgreSQL's text format, and converts them to the C++ +types you specify. Likewise, `stream_to` converts C++ values you provide to +PostgreSQL's text format for transfer. (On its end, the database of course +converts values to and from their SQL types.) + + +Null values +----------- + +So how do you deal with nulls? It depends on the C++ type you're using. Some +types may have a built-in null value. For instance, if you have a +`char const *` value and you convert it to an SQL string, then converting a +`nullptr` will produce a NULL SQL value. + +But what do you do about C++ types which don't have a built-in null value, such +as `int`? The trick is to wrap it in `std::optional`. The difference between +`int` and `std::optional` is that the former always has an `int` value, +and the latter doesn't have to. + +Actually it's not just `std::optional`. You can do the same thing with +`std::unique_ptr` or `std::shared_ptr`. A smart pointer is less efficient than +`std::optional` in most situations because they allocate their value on the +heap, but sometimes that's what you want in order to save moving or copying +large values around. + +This part is not generic though. It won't work with just any smart-pointer +type, just the ones which are explicitly supported: `shared_ptr` and +`unique_ptr`. If you really need to, you can build support for additional +wrappers and smart pointers by copying the implementation patterns from the +existing smart-pointer support. + + +stream\_from +------------ + +Use `stream_from` to read data directly from the database. It's faster than +the transaction's `exec` functions if the result contains enough rows. But +also, you won't need to keep your full result set in memory. That can really +matter with larger data sets. + +And, you can start processing your data right after the first row of data comes +in from the server. With `exec()` you need to wait to receive all data, and +then you begin processing. With `stream_from` you can be processing data on +the client side while the server is still sending you the rest. + +You don't actually need to create a `stream_from` object yourself, though you +can. Two shorthand functions, @ref pqxx::transaction_base::stream +and @ref pqxx::transaction_base::for_each, can create the streams for you with +a minimum of overhead. + +Not all kinds of queries will work in a stream. Internally the streams make +use of PostgreSQL's `COPY` command, so see the PostgreSQL documentation for +`COPY` for the exact limitations. Basic `SELECT` and `UPDATE ... RETURNING` +queries should just work. + +As you read a row, the stream converts its fields to a tuple type containing +the value types you ask for: + + auto stream pqxx::stream_from::query( + tx, "SELECT name, points FROM score"); + std::tuple row; + while (stream >> row) + process(row); + stream.complete(); + +As the stream reads each row, it converts that row's data into your tuple, +goes through your loop body, and then promptly forgets that row's data. This +means you can easily process more data than will fit in memory. + + +stream\_to +---------- + +Use `stream_to` to write data directly to a database table. This saves you +having to perform an `INSERT` for every row, and so it can be significantly +faster if you want to insert more than just one or two rows at a time. + +As with `stream_from`, you can specify the table and the columns, and not much +else. You insert tuple-like objects of your choice: + + pqxx::stream_to stream{ + tx, + "score", + std::vector{"name", "points"}}; + for (auto const &entry: scores) + stream << entry; + stream.complete(); + +Each row is processed as you provide it, and not retained in memory after that. + +The call to `complete()` is more important here than it is for `stream_from`. +It's a lot like a "commit" or "abort" at the end of a transaction. If you omit +it, it will be done automatically during the stream's destructor. But since +destructors can't throw exceptions, any failures at that stage won't be visible +in your code. So, always call `complete()` on a `stream_to` to close it off +properly! diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/thread-safety.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/thread-safety.md new file mode 100644 index 000000000..07c7f9984 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/amd64/share/doc/libpqxx/thread-safety.md @@ -0,0 +1,29 @@ +Thread safety {#thread-safety} +============= + +This library does not contain any locking code to protect objects against +simultaneous modification in multi-threaded programs. Therefore it is up +to you, the user of the library, to ensure that your threaded client +programs perform no conflicting operations concurrently. + +Most of the time this isn't hard. Result sets are immutable, so you can +share them between threads without problem. The main rule is: + +@li Treat a connection, together with any and all objects related to it, as +a "world" of its own. You should generally make sure that the same "world" +is never accessed by another thread while you're doing anything non-const +in there. + +That means: don't issue a query on a transaction while you're also opening +a subtransaction, don't access a cursor while you may also be committing, +and so on. + +In particular, cursors are tricky. It's easy to perform a non-const +operation without noticing. So, if you're going to share cursors or +cursor-related objects between threads, lock very conservatively! + +Use `pqxx::describe_thread_safety` to find out at runtime what level of +thread safety is implemented in your build and version of libpqxx. It +returns a `pqxx::thread_safety_model` describing what you can and cannot rely +on. A command-line utility `tools/pqxxthreadsafety` prints out the same +information. diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/array b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/array new file mode 100644 index 000000000..689f5b27b --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/array @@ -0,0 +1,6 @@ +/** Handling of SQL arrays. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/array.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/array.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/array.hxx new file mode 100644 index 000000000..8440a244f --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/array.hxx @@ -0,0 +1,103 @@ +/* Handling of SQL arrays. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/field instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ARRAY +#define PQXX_H_ARRAY + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/internal/encoding_group.hxx" +#include "pqxx/internal/encodings.hxx" + + +namespace pqxx +{ +/// Low-level array parser. +/** Use this to read an array field retrieved from the database. + * + * This parser will only work reliably if your client encoding is UTF-8, ASCII, + * or a single-byte encoding which is a superset of ASCII (such as Latin-1). + * + * Also, the parser only supports array element types which use either a comma + * or a semicolon ("," or ";") as the separator between array elements. All + * built-in types use comma, except for one which uses semicolon, but some + * custom types may not work. + * + * The input is a C-style string containing the textual representation of an + * array, as returned by the database. The parser reads this representation + * on the fly. The string must remain in memory until parsing is done. + * + * Parse the array by making calls to @ref get_next until it returns a + * @ref juncture of "done". The @ref juncture tells you what the parser found + * in that step: did the array "nest" to a deeper level, or "un-nest" back up? + */ +class PQXX_LIBEXPORT array_parser +{ +public: + /// What's the latest thing found in the array? + enum class juncture + { + /// Starting a new row. + row_start, + /// Ending the current row. + row_end, + /// Found a NULL value. + null_value, + /// Found a string value. + string_value, + /// Parsing has completed. + done, + }; + + // TODO: constexpr noexcept. Breaks ABI. + /// Constructor. You don't need this; use @ref field::as_array instead. + /** The parser only remains valid while the data underlying the @ref result + * remains valid. Once all `result` objects referring to that data have been + * destroyed, the parser will no longer refer to valid memory. + */ + explicit array_parser( + std::string_view input, + internal::encoding_group = internal::encoding_group::MONOBYTE); + + /// Parse the next step in the array. + /** Returns what it found. If the juncture is @ref juncture::string_value, + * the string will contain the value. Otherwise, it will be empty. + * + * Call this until the @ref array_parser::juncture it returns is + * @ref juncture::done. + */ + std::pair get_next(); + +private: + std::string_view m_input; + internal::glyph_scanner_func *const m_scan; + + /// Current parsing position in the input. + std::string::size_type m_pos = 0u; + + std::string::size_type scan_single_quoted_string() const; + std::string parse_single_quoted_string(std::string::size_type end) const; + std::string::size_type scan_double_quoted_string() const; + std::string parse_double_quoted_string(std::string::size_type end) const; + std::string::size_type scan_unquoted_string() const; + std::string parse_unquoted_string(std::string::size_type end) const; + + std::string::size_type scan_glyph(std::string::size_type pos) const; + std::string::size_type + scan_glyph(std::string::size_type pos, std::string::size_type end) const; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/binarystring b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/binarystring new file mode 100644 index 000000000..77551d9f7 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/binarystring @@ -0,0 +1,6 @@ +/** BYTEA (binary string) conversions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/binarystring.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/binarystring.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/binarystring.hxx new file mode 100644 index 000000000..47c82a035 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/binarystring.hxx @@ -0,0 +1,236 @@ +/* Deprecated representation for raw, binary data. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/binarystring instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_BINARYSTRING +#define PQXX_H_BINARYSTRING + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/result.hxx" +#include "pqxx/strconv.hxx" + +namespace pqxx +{ +class binarystring; +template<> struct string_traits; + + +/// Binary data corresponding to PostgreSQL's "BYTEA" binary-string type. +/** @ingroup escaping-functions + * @deprecated Use @c std::basic_string and + * @c std::basic_string_view for binary data. In C++20 or better, + * any @c contiguous_range of @c std::byte will do. + * + * This class represents a binary string as stored in a field of type @c bytea. + * + * Internally a binarystring is zero-terminated, but it may also contain null + * bytes, they're just like any other byte value. So don't assume that it's + * safe to treat the contents as a C-style string. + * + * The binarystring retains its value even if the result it was obtained from + * is destroyed, but it cannot be copied or assigned. + * + * \relatesalso transaction_base::quote_raw + * + * To include a @c binarystring value in an SQL query, escape and quote it + * using the transaction's @c quote_raw function. + * + * @warning This class is implemented as a reference-counting smart pointer. + * Copying, swapping, and destroying binarystring objects that refer to the + * same underlying data block is not thread-safe. If you wish to pass + * binarystrings around between threads, make sure that each of these + * operations is protected against concurrency with similar operations on the + * same object, or other objects pointing to the same data block. + */ +class PQXX_LIBEXPORT binarystring +{ +public: + using char_type = unsigned char; + using value_type = std::char_traits::char_type; + using size_type = std::size_t; + using difference_type = long; + using const_reference = value_type const &; + using const_pointer = value_type const *; + using const_iterator = const_pointer; + using const_reverse_iterator = std::reverse_iterator; + + [[deprecated("Use std::byte for binary data.")]] binarystring( + binarystring const &) = default; + + /// Read and unescape bytea field. + /** The field will be zero-terminated, even if the original bytea field + * isn't. + * @param F the field to read; must be a bytea field + */ + [[deprecated("Use std::byte for binary data.")]] explicit binarystring( + field const &); + + /// Copy binary data from std::string_view on binary data. + /** This is inefficient in that it copies the data to a buffer allocated on + * the heap. + */ + [[deprecated("Use std::byte for binary data.")]] explicit binarystring( + std::string_view); + + /// Copy binary data of given length straight out of memory. + [[deprecated("Use std::byte for binary data.")]] binarystring( + void const *, std::size_t); + + /// Efficiently wrap a buffer of binary data in a @c binarystring. + [[deprecated("Use std::byte for binary data.")]] binarystring( + std::shared_ptr ptr, size_type size) : + m_buf{std::move(ptr)}, m_size{size} + {} + + /// Size of converted string in bytes. + [[nodiscard]] size_type size() const noexcept { return m_size; } + /// Size of converted string in bytes. + [[nodiscard]] size_type length() const noexcept { return size(); } + [[nodiscard]] bool empty() const noexcept { return size() == 0; } + + [[nodiscard]] const_iterator begin() const noexcept { return data(); } + [[nodiscard]] const_iterator cbegin() const noexcept { return begin(); } + [[nodiscard]] const_iterator end() const noexcept { return data() + m_size; } + [[nodiscard]] const_iterator cend() const noexcept { return end(); } + + [[nodiscard]] const_reference front() const noexcept { return *begin(); } + [[nodiscard]] const_reference back() const noexcept + { + return *(data() + m_size - 1); + } + + [[nodiscard]] const_reverse_iterator rbegin() const + { + return const_reverse_iterator{end()}; + } + [[nodiscard]] const_reverse_iterator crbegin() const { return rbegin(); } + [[nodiscard]] const_reverse_iterator rend() const + { + return const_reverse_iterator{begin()}; + } + [[nodiscard]] const_reverse_iterator crend() const { return rend(); } + + /// Unescaped field contents. + [[nodiscard]] value_type const *data() const noexcept { return m_buf.get(); } + + [[nodiscard]] const_reference operator[](size_type i) const noexcept + { + return data()[i]; + } + + [[nodiscard]] PQXX_PURE bool operator==(binarystring const &) const noexcept; + [[nodiscard]] bool operator!=(binarystring const &rhs) const noexcept + { + return not operator==(rhs); + } + + binarystring &operator=(binarystring const &); + + /// Index contained string, checking for valid index. + const_reference at(size_type) const; + + /// Swap contents with other binarystring. + void swap(binarystring &); + + /// Raw character buffer (no terminating zero is added). + /** @warning No terminating zero is added! If the binary data did not end in + * a null character, you will not find one here. + */ + [[nodiscard]] char const *get() const noexcept + { + return reinterpret_cast(m_buf.get()); + } + + /// Read contents as a std::string_view. + [[nodiscard]] std::string_view view() const noexcept + { + return std::string_view(get(), size()); + } + + /// Read as regular C++ string (may include null characters). + /** This creates and returns a new string object. Don't call this + * repeatedly; retrieve your string once and keep it in a local variable. + * Also, do not expect to be able to compare the string's address to that of + * an earlier invocation. + */ + [[nodiscard]] std::string str() const; + + /// Access data as a pointer to @c std::byte. + [[nodiscard]] std::byte const *bytes() const + { + return reinterpret_cast(get()); + } + + /// Read data as a @c std::basic_string_view. + [[nodiscard]] std::basic_string_view bytes_view() const + { + return std::basic_string_view{bytes(), size()}; + } + +private: + std::shared_ptr m_buf; + size_type m_size{0}; +}; + + +template<> struct nullness : no_null +{}; + + +/// String conversion traits for @c binarystring. +/** Defines the conversions between a @c binarystring and its PostgreSQL + * textual format, for communication with the database. + * + * These conversions rely on the "hex" format which was introduced in + * PostgreSQL 9.0. Both your libpq and the server must be recent enough to + * speak this format. + */ +template<> struct string_traits +{ + static std::size_t size_buffer(binarystring const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview to_buf(char *begin, char *end, binarystring const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, binarystring const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + std::string_view text{value.view()}; + internal::esc_bin(binary_cast(text), begin); + return begin + budget; + } + + static binarystring from_string(std::string_view text) + { + auto const size{pqxx::internal::size_unesc_bin(std::size(text))}; + std::shared_ptr buf{ + new unsigned char[size], [](unsigned char const *x) { delete[] x; }}; + pqxx::internal::unesc_bin(text, reinterpret_cast(buf.get())); +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return binarystring{std::move(buf), size}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + } +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/blob b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/blob new file mode 100644 index 000000000..3fd0afac9 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/blob @@ -0,0 +1,6 @@ +/** Binary Large Objects interface. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/blob.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/blob.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/blob.hxx new file mode 100644 index 000000000..6d77be724 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/blob.hxx @@ -0,0 +1,351 @@ +/* Binary Large Objects interface. + * + * Read or write large objects, stored in their own storage on the server. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/largeobject instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_BLOB +#define PQXX_H_BLOB + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#if defined(PQXX_HAVE_PATH) +# include +#endif + +#if defined(PQXX_HAVE_RANGES) && __has_include() +# include +#endif + +#if defined(PQXX_HAVE_SPAN) && __has_include() +# include +#endif + +#include "pqxx/dbtransaction.hxx" + + +namespace pqxx +{ +/** Binary large object. + * + * This is how you store data that may be too large for the `BYTEA` type. + * Access operations are similar to those for a file: you can read, write, + * query or set the current reading/writing position, and so on. + * + * These large objects live in their own storage on the server, indexed by an + * integer object identifier ("oid"). + * + * Two `blob` objects may refer to the same actual large object in the + * database at the same time. Each will have its own reading/writing position, + * but writes to the one will of course affect what the other sees. + */ +class PQXX_LIBEXPORT blob +{ +public: + /// Create a new, empty large object. + /** You may optionally specify an oid for the new blob. If you do, then + * the new object will have that oid -- or creation will fail if there + * already is an object with that oid. + */ + [[nodiscard]] static oid create(dbtransaction &, oid = 0); + + /// Delete a large object, or fail if it does not exist. + static void remove(dbtransaction &, oid); + + /// Open blob for reading. Any attempt to write to it will fail. + [[nodiscard]] static blob open_r(dbtransaction &, oid); + // Open blob for writing. Any attempt to read from it will fail. + [[nodiscard]] static blob open_w(dbtransaction &, oid); + // Open blob for reading and/or writing. + [[nodiscard]] static blob open_rw(dbtransaction &, oid); + + /// You can default-construct a blob, but it won't do anything useful. + /** Most operations on a default-constructed blob will throw @ref + * usage_error. + */ + blob() = default; + + /// You can move a blob, but not copy it. The original becomes unusable. + blob(blob &&); + /// You can move a blob, but not copy it. The original becomes unusable. + blob &operator=(blob &&); + + blob(blob const &) = delete; + blob &operator=(blob const &) = delete; + ~blob(); + + /// Maximum number of bytes that can be read or written at a time. + /** The underlying protocol only supports reads and writes up to 2 GB + * exclusive. + * + * If you need to read or write more data to or from a binary large object, + * you'll have to break it up into chunks. + */ + static constexpr std::size_t chunk_limit = 0x7fffffff; + + /// Read up to `size` bytes of the object into `buf`. + /** Uses a buffer that you provide, resizing it as needed. If it suits you, + * this lets you allocate the buffer once and then re-use it multiple times. + * + * Resizes `buf` as needed. + * + * @warning The underlying protocol only supports reads up to 2GB at a time. + * If you need to read more, try making repeated calls to @ref append_to_buf. + */ + std::size_t read(std::basic_string &buf, std::size_t size); + +#if defined(PQXX_HAVE_SPAN) + /// Read up to `std::size(buf)` bytes from the object. + /** Retrieves bytes from the blob, at the current position, until `buf` is + * full or there are no more bytes to read, whichever comes first. + * + * Returns the filled portion of `buf`. This may be empty. + */ + template + std::span read(std::span buf) + { + return buf.subspan(0, raw_read(std::data(buf), std::size(buf))); + } +#endif // PQXX_HAVE_SPAN + +#if defined(PQXX_HAVE_CONCEPTS) && defined(PQXX_HAVE_SPAN) + /// Read up to `std::size(buf)` bytes from the object. + /** Retrieves bytes from the blob, at the current position, until `buf` is + * full or there are no more bytes to read, whichever comes first. + * + * Returns the filled portion of `buf`. This may be empty. + */ + template std::span read(DATA &buf) + { + return {std::data(buf), raw_read(std::data(buf), std::size(buf))}; + } +#else // PQXX_HAVE_CONCEPTS && PQXX_HAVE_SPAN + /// Read up to `std::size(buf)` bytes from the object. + /** @deprecated As libpqxx moves to C++20 as its baseline language version, + * this will take and return `std::span`. + * + * Retrieves bytes from the blob, at the current position, until `buf` is + * full (i.e. its current size is reached), or there are no more bytes to + * read, whichever comes first. + * + * This function will not change either the size or the capacity of `buf`, + * only its contents. + * + * Returns the filled portion of `buf`. This may be empty. + */ + template + std::basic_string_view read(std::vector &buf) + { + return {std::data(buf), raw_read(std::data(buf), std::size(buf))}; + } +#endif // PQXX_HAVE_CONCEPTS && PQXX_HAVE_SPAN + +#if defined(PQXX_HAVE_CONCEPTS) + /// Write `data` to large object, at the current position. + /** If the writing position is at the end of the object, this will append + * `data` to the object's contents and move the writing position so that + * it's still at the end. + * + * If the writing position was not at the end, writing will overwrite the + * prior data, but it will not remove data that follows the part where you + * wrote your new data. + * + * @warning This is a big difference from writing to a file. You can + * overwrite some data in a large object, but this does not truncate the + * data that was already there. For example, if the object contained binary + * data "abc", and you write "12" at the starting position, the object will + * contain "12c". + * + * @warning The underlying protocol only supports writes up to 2 GB at a + * time. If you need to write more, try making repeated calls to + * @ref append_from_buf. + */ + template void write(DATA const &data) + { + raw_write(std::data(data), std::size(data)); + } +#else + /// Write `data` large object, at the current position. + /** If the writing position is at the end of the object, this will append + * `data` to the object's contents and move the writing position so that + * it's still at the end. + * + * If the writing position was not at the end, writing will overwrite the + * prior data, but it will not remove data that follows the part where you + * wrote your new data. + * + * @warning This is a big difference from writing to a file. You can + * overwrite some data in a large object, but this does not truncate the + * data that was already there. For example, if the object contained binary + * data "abc", and you write "12" at the starting position, the object will + * contain "12c". + * + * @warning The underlying protocol only supports writes up to 2 GB at a + * time. If you need to write more, try making repeated calls to + * @ref append_from_buf. + */ + template void write(DATA const &data) + { + raw_write(std::data(data), std::size(data)); + } +#endif + + /// Resize large object to `size` bytes. + /** If the blob is more than `size` bytes long, this removes the end so as + * to make the blob the desired length. + * + * If the blob is less than `size` bytes long, it adds enough zero bytes to + * make it the desired length. + */ + void resize(std::int64_t size); + + /// Return the current reading/writing position in the large object. + [[nodiscard]] std::int64_t tell() const; + + /// Set the current reading/writing position to an absolute offset. + /** Returns the new file offset. */ + std::int64_t seek_abs(std::int64_t offset = 0); + /// Move the current reading/writing position forwards by an offset. + /** To move backwards, pass a negative offset. + * + * Returns the new file offset. + */ + std::int64_t seek_rel(std::int64_t offset = 0); + /// Set the current position to an offset relative to the end of the blob. + /** You'll probably want an offset of zero or less. + * + * Returns the new file offset. + */ + std::int64_t seek_end(std::int64_t offset = 0); + + /// Create a binary large object containing given `data`. + /** You may optionally specify an oid for the new object. If you do, and an + * object with that oid already exists, creation will fail. + */ + static oid from_buf( + dbtransaction &tx, std::basic_string_view data, oid id = 0); + + /// Append `data` to binary large object. + /** The underlying protocol only supports appending blocks up to 2 GB. + */ + static void append_from_buf( + dbtransaction &tx, std::basic_string_view data, oid id); + + /// Read client-side file and store it server-side as a binary large object. + [[nodiscard]] static oid from_file(dbtransaction &, char const path[]); + +#if defined(PQXX_HAVE_PATH) && !defined(_WIN32) + /// Read client-side file and store it server-side as a binary large object. + /** This overload is not available on Windows, where `std::filesystem::path` + * converts to a `wchar_t` string rather than a `char` string. + */ + [[nodiscard]] static oid + from_file(dbtransaction &tx, std::filesystem::path const &path) + { + return from_file(tx, path.c_str()); + } +#endif + + /// Read client-side file and store it server-side as a binary large object. + /** In this version, you specify the binary large object's oid. If that oid + * is already in use, the operation will fail. + */ + static oid from_file(dbtransaction &, char const path[], oid); + +#if defined(PQXX_HAVE_PATH) && !defined(_WIN32) + /// Read client-side file and store it server-side as a binary large object. + /** In this version, you specify the binary large object's oid. If that oid + * is already in use, the operation will fail. + * + * This overload is not available on Windows, where `std::filesystem::path` + * converts to a `wchar_t` string rather than a `char` string. + */ + static oid + from_file(dbtransaction &tx, std::filesystem::path const &path, oid id) + { + return from_file(tx, path.c_str(), id); + } +#endif + + /// Convenience function: Read up to `max_size` bytes from blob with `id`. + /** You could easily do this yourself using the @ref open_r and @ref read + * functions, but it can save you a bit of code to do it this way. + */ + static void to_buf( + dbtransaction &, oid, std::basic_string &, + std::size_t max_size); + + /// Read part of the binary large object with `id`, and append it to `buf`. + /** Use this to break up a large read from one binary large object into one + * massive buffer. Just keep calling this function until it returns zero. + * + * The `offset` is how far into the large object your desired chunk is, and + * `append_max` says how much to try and read in one go. + */ + static std::size_t append_to_buf( + dbtransaction &tx, oid id, std::int64_t offset, + std::basic_string &buf, std::size_t append_max); + + /// Write a binary large object's contents to a client-side file. + static void to_file(dbtransaction &, oid, char const path[]); + +#if defined(PQXX_HAVE_PATH) && !defined(_WIN32) + /// Write a binary large object's contents to a client-side file. + /** This overload is not available on Windows, where `std::filesystem::path` + * converts to a `wchar_t` string rather than a `char` string. + */ + static void + to_file(dbtransaction &tx, oid id, std::filesystem::path const &path) + { + to_file(tx, id, path.c_str()); + } +#endif + + /// Close this blob. + /** This does not delete the blob from the database; it only terminates your + * local object for accessing the blob. + * + * Resets the blob to a useless state similar to one that was + * default-constructed. + * + * The destructor will do this for you automatically. Still, there is a + * reason to `close()` objects explicitly where possible: if an error should + * occur while closing, `close()` can throw an exception. A destructor + * cannot. + */ + void close(); + +private: + PQXX_PRIVATE blob(connection &conn, int fd) noexcept : + m_conn{&conn}, m_fd{fd} + {} + static PQXX_PRIVATE blob open_internal(dbtransaction &, oid, int); + static PQXX_PRIVATE pqxx::internal::pq::PGconn * + raw_conn(pqxx::connection *) noexcept; + static PQXX_PRIVATE pqxx::internal::pq::PGconn * + raw_conn(pqxx::dbtransaction const &) noexcept; + static PQXX_PRIVATE std::string errmsg(connection const *); + static PQXX_PRIVATE std::string errmsg(dbtransaction const &tx) + { + return errmsg(&tx.conn()); + } + PQXX_PRIVATE std::string errmsg() const { return errmsg(m_conn); } + PQXX_PRIVATE std::int64_t seek(std::int64_t offset, int whence); + std::size_t raw_read(std::byte buf[], std::size_t size); + void raw_write(std::byte const buf[], std::size_t size); + + connection *m_conn = nullptr; + int m_fd = -1; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/composite b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/composite new file mode 100644 index 000000000..2bfa7ade9 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/composite @@ -0,0 +1,6 @@ +/** Handling of SQL "composite types." + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/composite.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/composite.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/composite.hxx new file mode 100644 index 000000000..439b133a8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/composite.hxx @@ -0,0 +1,149 @@ +#ifndef PQXX_H_COMPOSITE +#define PQXX_H_COMPOSITE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/internal/array-composite.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/util.hxx" + +namespace pqxx +{ +/// Parse a string representation of a value of a composite type. +/** @warning This code is still experimental. Use with care. + * + * You may use this as a helper while implementing your own @ref string_traits + * for a composite type. + * + * This function interprets `text` as the string representation of a value of + * some composite type, and sets each of `fields` to the respective values of + * its fields. The field types must be copy-assignable. + * + * The number of fields must match the number of fields in the composite type, + * and there must not be any other text in the input. The function is meant to + * handle any value string that the backend can produce, but not necessarily + * every valid alternative spelling. + * + * Fields in composite types can be null. When this happens, the C++ type of + * the corresponding field reference must be of a type that can handle nulls. + * If you are working with a type that does not have an inherent null value, + * such as e.g. `int`, consider using `std::optional`. + */ +template +inline void parse_composite( + pqxx::internal::encoding_group enc, std::string_view text, T &...fields) +{ + static_assert(sizeof...(fields) > 0); + + auto const scan{pqxx::internal::get_glyph_scanner(enc)}; + auto const data{std::data(text)}; + auto const size{std::size(text)}; + if (size == 0) + throw conversion_error{"Cannot parse composite value from empty string."}; + + std::size_t here{0}, next{scan(data, size, here)}; + if (next != 1 or data[here] != '(') + throw conversion_error{ + internal::concat("Invalid composite value string: ", text)}; + + here = next; + + constexpr auto num_fields{sizeof...(fields)}; + std::size_t index{0}; + (pqxx::internal::parse_composite_field( + index, text, here, fields, scan, num_fields - 1), + ...); + if (here != std::size(text)) + throw conversion_error{internal::concat( + "Composite value did not end at the closing parenthesis: '", text, + "'.")}; + if (text[here - 1] != ')') + throw conversion_error{internal::concat( + "Composive value did not end in parenthesis: '", text, "'")}; +} + + +/// Parse a string representation of a value of a composite type. +/** @warning This version only works for UTF-8 and single-byte encodings. + * + * For proper encoding support, use the composite-type support in the + * `field` class. + */ +template +inline void parse_composite(std::string_view text, T &...fields) +{ + parse_composite(pqxx::internal::encoding_group::MONOBYTE, text, fields...); +} +} // namespace pqxx + + +namespace pqxx::internal +{ +constexpr char empty_composite_str[]{"()"}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/// Estimate the buffer size needed to represent a value of a composite type. +/** Returns a conservative estimate. + */ +template +[[nodiscard]] inline std::size_t +composite_size_buffer(T const &...fields) noexcept +{ + constexpr auto num{sizeof...(fields)}; + + // Size for a multi-field composite includes room for... + // + opening parenthesis + // + field budgets + // + separating comma per field + // - comma after final field + // + closing parenthesis + // + terminating zero + + if constexpr (num == 0) + return std::size(pqxx::internal::empty_composite_str); + else + return 1 + (pqxx::internal::size_composite_field_buffer(fields) + ...) + + num + 1; +} + + +/// Render a series of values as a single composite SQL value. +/** @warning This code is still experimental. Use with care. + * + * You may use this as a helper while implementing your own `string_traits` + * for a composite type. + */ +template +inline char *composite_into_buf(char *begin, char *end, T const &...fields) +{ + if (std::size_t(end - begin) < composite_size_buffer(fields...)) + throw conversion_error{ + "Buffer space may not be enough to represent composite value."}; + + constexpr auto num_fields{sizeof...(fields)}; + if constexpr (num_fields == 0) + { + constexpr char empty[]{"()"}; + std::memcpy(begin, empty, std::size(empty)); + return begin + std::size(empty); + } + + char *pos{begin}; + *pos++ = '('; + + (pqxx::internal::write_composite_field(pos, end, fields), ...); + + // If we've got multiple fields, "backspace" that last comma. + if constexpr (num_fields > 1) + --pos; + *pos++ = ')'; + *pos++ = '\0'; + return pos; +} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/config-public-compiler.h b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/config-public-compiler.h new file mode 100644 index 000000000..3668a10f8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/config-public-compiler.h @@ -0,0 +1,81 @@ +/* include/pqxx/config.h.in. Generated from configure.ac by autoheader. */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_DLFCN_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_INTTYPES_H */ +/* Define to 1 if you have the `pq' library (-lpq). */ +/* #undef HAVE_LIBPQ */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_MEMORY_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_STDINT_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_STDLIB_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_STRINGS_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_STRING_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_STAT_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_SYS_TYPES_H */ +/* Define to 1 if you have the header file. */ +/* #undef HAVE_UNISTD_H */ +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +/* #undef LT_OBJDIR */ +/* Name of package */ +/* #undef PACKAGE */ +/* Define to the address where bug reports for this package should be sent. */ +/* #undef PACKAGE_BUGREPORT */ +/* Define to the full name of this package. */ +/* #undef PACKAGE_NAME */ +/* Define to the full name and version of this package. */ +/* #undef PACKAGE_STRING */ +/* Define to the one symbol short name of this package. */ +/* #undef PACKAGE_TARNAME */ +/* Define to the home page for this package. */ +/* #undef PACKAGE_URL */ +/* Define to the version of this package. */ +/* #undef PACKAGE_VERSION */ +/* Define if supports floating-point conversion. */ +#define PQXX_HAVE_CHARCONV_FLOAT +/* Define if supports integer conversion. */ +#define PQXX_HAVE_CHARCONV_INT +/* Define if compiler has C++20 std::cmp_greater etc. */ +/* #undef PQXX_HAVE_CMP */ +/* Define if compiler supports Concepts and header. */ +/* #undef PQXX_HAVE_CONCEPTS */ +/* Define if compiler supports __cxa_demangle */ +#define PQXX_HAVE_CXA_DEMANGLE +/* Define if g++ supports pure attribute */ +#define PQXX_HAVE_GCC_PURE +/* Define if g++ supports visibility attribute. */ +#define PQXX_HAVE_GCC_VISIBILITY +/* Define if likely & unlikely work. */ +/* #undef PQXX_HAVE_LIKELY */ +/* Define if operator[] can take multiple arguments. */ +/* #undef PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT */ +/* Define if compiler has usable std::filesystem::path. */ +#define PQXX_HAVE_PATH +/* Define if poll() is available. */ +#define PQXX_HAVE_POLL +/* Define if libpq has PQencryptPasswordConn (since pg 10). */ +#define PQXX_HAVE_PQENCRYPTPASSWORDCONN +/* Define if libpq has pipeline mode (since pg 14). */ +#define PQXX_HAVE_PQ_PIPELINE +/* Define if std::this_thread::sleep_for works. */ +#define PQXX_HAVE_SLEEP_FOR +/* Define if compiler has std::span. */ +/* #undef PQXX_HAVE_SPAN */ +/* Define if strerror_r() is available. */ +#define PQXX_HAVE_STRERROR_R +/* Define if strerror_s() is available. */ +/* #undef PQXX_HAVE_STRERROR_S */ +/* Define if thread_local is fully supported. */ +#define PQXX_HAVE_THREAD_LOCAL +/* Define if std::chrono has year_month_day etc. */ +/* #undef PQXX_HAVE_YEAR_MONTH_DAY */ +/* Define to 1 if you have the ANSI C header files. */ +/* #undef STDC_HEADERS */ +/* Version number of package */ +/* #undef VERSION */ diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/connection b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/connection new file mode 100644 index 000000000..82ff43aa5 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/connection @@ -0,0 +1,8 @@ +/** pqxx::connection class. + * + * pqxx::connection encapsulates a connection to a database. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/connection.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/connection.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/connection.hxx new file mode 100644 index 000000000..92454bb47 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/connection.hxx @@ -0,0 +1,1261 @@ +/* Definition of the connection class. + * + * pqxx::connection encapsulates a connection to a database. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/connection instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_CONNECTION +#define PQXX_H_CONNECTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Double-check in order to suppress an overzealous Visual C++ warning (#418). +#if defined(PQXX_HAVE_CONCEPTS) && __has_include() +# include +#endif + +#include "pqxx/errorhandler.hxx" +#include "pqxx/except.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/params.hxx" +#include "pqxx/separated_list.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/types.hxx" +#include "pqxx/util.hxx" +#include "pqxx/zview.hxx" + + +/** + * @addtogroup connections + * + * Use of the libpqxx library starts here. + * + * Everything that can be done with a database through libpqxx must go through + * a @ref pqxx::connection object. It connects to a database when you create + * it, and it terminates that communication during destruction. + * + * Many things come together in this class. Handling of error and warning + * messages, for example, is defined by @ref pqxx::errorhandler objects in the + * context of a connection. Prepared statements are also defined here. + * + * When you connect to a database, you pass a connection string containing any + * parameters and options, such as the server address and the database name. + * + * These are identical to the ones in libpq, the C language binding upon which + * libpqxx itself is built: + * + * https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING + * + * There are also environment variables you can set to provide defaults, again + * as defined by libpq: + * + * https://www.postgresql.org/docs/current/libpq-envars.html + * + * You can also create a database connection _asynchronously_ using an + * intermediate @ref pqxx::connecting object. + */ + +namespace pqxx::internal +{ +class sql_cursor; + +#if defined(PQXX_HAVE_CONCEPTS) +/// Concept: T is a range of pairs of zero-terminated strings. +template +concept ZKey_ZValues = std::ranges::input_range and requires(T t) +{ + {std::cbegin(t)}; + { + std::get<0>(*std::cbegin(t)) + } -> ZString; + { + std::get<1>(*std::cbegin(t)) + } -> ZString; +} and std::tuple_size_v::value_type> +== 2; +#endif // PQXX_HAVE_CONCEPTS +} // namespace pqxx::internal + + +namespace pqxx::internal::gate +{ +class connection_dbtransaction; +class connection_errorhandler; +class connection_largeobject; +class connection_notification_receiver; +class connection_pipeline; +class connection_sql_cursor; +class connection_stream_from; +class connection_stream_to; +class connection_transaction; +class const_connection_largeobject; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +/// Representation of a PostgreSQL table path. +/** A "table path" consists of a table name, optionally prefixed by a schema + * name, which in turn is optionally prefixed by a database name. + * + * A minimal example of a table path would be `{mytable}`. But a table path + * may also take the forms `{myschema,mytable}` or + * `{mydb,myschema,mytable}`. + */ +using table_path = std::initializer_list; + + +/// Encrypt a password. @deprecated Use connection::encrypt_password instead. +[[nodiscard, + deprecated("Use connection::encrypt_password instead.")]] std::string + PQXX_LIBEXPORT + encrypt_password(char const user[], char const password[]); + +/// Encrypt password. @deprecated Use connection::encrypt_password instead. +[[nodiscard, + deprecated("Use connection::encrypt_password instead.")]] inline std::string +encrypt_password(zview user, zview password) +{ +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return encrypt_password(user.c_str(), password.c_str()); +#include "pqxx/internal/ignore-deprecated-post.hxx" +} + + +/// Error verbosity levels. +enum class error_verbosity : int +{ + // These values must match those in libpq's PGVerbosity enum. + terse = 0, + normal = 1, + verbose = 2 +}; + + +/// Connection to a database. +/** This is the first class to look at when you wish to work with a database + * through libpqxx. The connection opens during construction, and closes upon + * destruction. + * + * When creating a connection, you can pass a connection URI or a postgres + * connection string, to specify the database server's address, a login + * username, and so on. If you don't, the connection will try to obtain them + * from certain environment variables. If those are not set either, the + * default is to try and connect to the local system's port 5432. + * + * Find more about connection strings here: + * + * https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING + * + * The variables are documented here: + * + * https://www.postgresql.org/docs/current/libpq-envars.html + * + * To query or manipulate the database once connected, use one of the + * transaction classes (see pqxx/transaction_base.hxx) and perhaps also the + * transactor framework (see pqxx/transactor.hxx). + * + * When a connection breaks, you will typically get a @ref broken_connection + * exception. This can happen at almost any point. + * + * @warning On Unix-like systems, including GNU and BSD systems, your program + * may receive the SIGPIPE signal when the connection to the backend breaks. By + * default this signal will abort your program. Use "signal(SIGPIPE, SIG_IGN)" + * if you want your program to continue running after a connection fails. + */ +class PQXX_LIBEXPORT connection +{ +public: + connection() : connection{""} {} + + /// Connect to a database, using `options` string. + explicit connection(char const options[]) + { + check_version(); + init(options); + } + + /// Connect to a database, using `options` string. + explicit connection(zview options) : connection{options.c_str()} + { + // (Delegates to other constructor which calls check_version for us.) + } + + /// Move constructor. + /** Moving a connection is not allowed if it has an open transaction, or has + * error handlers or notification receivers registered on it. In those + * situations, other objects may hold references to the old object which + * would become invalid and might produce hard-to-diagnose bugs. + */ + connection(connection &&rhs); + +#if defined(PQXX_HAVE_CONCEPTS) + /// Connect to a database, passing options as a range of key/value pairs. + /** @warning Experimental. Requires C++20 "concepts" support. Define + * `PQXX_HAVE_CONCEPTS` to enable it. + * + * There's no need to escape the parameter values. + * + * See the PostgreSQL libpq documentation for the full list of possible + * options: + * + * https://postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS + * + * The options can be anything that can be iterated as a series of pairs of + * zero-terminated strings: `std::pair`, or + * `std::tuple`, or + * `std::map`, and so on. + */ + template + inline connection(MAPPING const ¶ms); +#endif // PQXX_HAVE_CONCEPTS + + ~connection() + { + try + { + close(); + } + catch (std::exception const &) + {} + } + + /// Move assignment. + /** Neither connection can have an open transaction, registered error + * handlers, or registered notification receivers. + */ + connection &operator=(connection &&rhs); + + connection(connection const &) = delete; + connection &operator=(connection const &) = delete; + + /// Is this connection open at the moment? + /** @warning This function is **not** needed in most code. Resist the + * temptation to check it after opening a connection. The `connection` + * constructor will throw a @ref broken_connection exception if can't connect + * to the database. + */ + [[nodiscard]] bool PQXX_PURE is_open() const noexcept; + + /// Invoke notice processor function. The message should end in newline. + void process_notice(char const[]) noexcept; + /// Invoke notice processor function. Newline at end is recommended. + /** The zview variant, with a message ending in newline, is the most + * efficient way to call process_notice. + */ + void process_notice(zview) noexcept; + + /// Enable tracing to a given output stream, or nullptr to disable. + void trace(std::FILE *) noexcept; + + /** + * @name Connection properties + * + * These are probably not of great interest, since most are derived from + * information supplied by the client program itself, but they are included + * for completeness. + * + * The connection needs to be currently active for these to work. + */ + //@{ + /// Name of database we're connected to, if any. + [[nodiscard]] char const *dbname() const; + + /// Database user ID we're connected under, if any. + [[nodiscard]] char const *username() const; + + /// Address of server, or nullptr if none specified (i.e. default or local) + [[nodiscard]] char const *hostname() const; + + /// Server port number we're connected to. + [[nodiscard]] char const *port() const; + + /// Process ID for backend process, or 0 if inactive. + [[nodiscard]] int PQXX_PURE backendpid() const &noexcept; + + /// Socket currently used for connection, or -1 for none. Use with care! + /** Query the current socket number. This is intended for event loops based + * on functions such as select() or poll(), where you're waiting for any of + * multiple file descriptors to become ready for communication. + * + * Please try to stay away from this function. It is really only meant for + * event loops that need to wait on more than one file descriptor. If all + * you need is to block until a notification arrives, for instance, use + * await_notification(). If you want to issue queries and retrieve results + * in nonblocking fashion, check out the pipeline class. + */ + [[nodiscard]] int PQXX_PURE sock() const &noexcept; + + /// What version of the PostgreSQL protocol is this connection using? + /** The answer can be 0 (when there is no connection); 3 for protocol 3.0; or + * possibly higher values as newer protocol versions come into use. + */ + [[nodiscard]] int PQXX_PURE protocol_version() const noexcept; + + /// What version of the PostgreSQL server are we connected to? + /** The result is a bit complicated: each of the major, medium, and minor + * release numbers is written as a two-digit decimal number, and the three + * are then concatenated. Thus server version 9.4.2 will be returned as the + * decimal number 90402. If there is no connection to the server, this + * returns zero. + * + * @warning When writing version numbers in your code, don't add zero at the + * beginning! Numbers beginning with zero are interpreted as octal (base-8) + * in C++. Thus, 070402 is not the same as 70402, and 080000 is not a number + * at all because there is no digit "8" in octal notation. Use strictly + * decimal notation when it comes to these version numbers. + */ + [[nodiscard]] int PQXX_PURE server_version() const noexcept; + //@} + + /// @name Text encoding + /** + * Each connection is governed by a "client encoding," which dictates how + * strings and other text is represented in bytes. The database server will + * send text data to you in this encoding, and you should use it for the + * queries and data which you send to the server. + * + * Search the PostgreSQL documentation for "character set encodings" to find + * out more about the available encodings, how to extend them, and how to use + * them. Not all server-side encodings are compatible with all client-side + * encodings or vice versa. + * + * Encoding names are case-insensitive, so e.g. "UTF8" is equivalent to + * "utf8". + * + * You can change the client encoding, but this may not work when the + * connection is in a special state, such as when streaming a table. It's + * not clear what happens if you change the encoding during a transaction, + * and then abort the transaction. + */ + //@{ + /// Get client-side character encoding, by name. + [[nodiscard]] std::string get_client_encoding() const; + + /// Set client-side character encoding, by name. + /** + * @param encoding Name of the character set encoding to use. + */ + void set_client_encoding(zview encoding) & + { + set_client_encoding(encoding.c_str()); + } + + /// Set client-side character encoding, by name. + /** + * @param encoding Name of the character set encoding to use. + */ + void set_client_encoding(char const encoding[]) &; + + /// Get the connection's encoding, as a PostgreSQL-defined code. + [[nodiscard]] int PQXX_PRIVATE encoding_id() const; + + //@} + + /// Set session variable, using SQL's `SET` command. + /** @deprecated To set a session variable, use @ref set_session_var. To set + * a transaction-local variable, execute an SQL `SET` command. + * + * @warning When setting a string value, you must escape and quote it first. + * Use the @ref quote() function to do that. + * + * @warning This executes an SQL query, so do not get or set variables while + * a table stream or pipeline is active on the same connection. + * + * @param var Variable to set. + * @param value New value for Var. This can be any SQL expression. If it's + * a string, be sure that it's properly escaped and quoted. + */ + [[deprecated("To set session variables, use set_session_var.")]] void + set_variable(std::string_view var, std::string_view value) &; + + /// Set one of the session variables to a new value. + /** This executes SQL, so do not do it while a pipeline or stream is active + * on the connection. + * + * The value you set here will last for the rest of the connection's + * duration, or until you set a new value. + * + * If you set the value while in a @ref dbtransaction (i.e. any transaction + * that is not a @ref nontransaction), then rolling back the transaction will + * undo the change. + * + * All applies to setting _session_ variables. You can also set the same + * variables as _local_ variables, in which case they will always revert to + * their previous value when the transaction ends (or when you overwrite them + * of course). To set a local variable, simply execute an SQL statement + * along the lines of "`SET LOCAL var = 'value'`" inside your transaction. + * + * @param var The variable to set. + * @param value The new value for the variable. + * @throw @ref variable_set_to_null if the value is null; this is not + * allowed. + */ + template + void set_session_var(std::string_view var, TYPE const &value) & + { + if constexpr (nullness::has_null) + { + if (nullness::is_null(value)) + throw variable_set_to_null{ + internal::concat("Attempted to set variable ", var, " to null.")}; + } + exec(internal::concat("SET ", quote_name(var), "=", quote(value))); + } + + /// Read session variable, using SQL's `SHOW` command. + /** @warning This executes an SQL query, so do not get or set variables while + * a table stream or pipeline is active on the same connection. + */ + [[deprecated("Use get_var instead.")]] std::string + get_variable(std::string_view); + + /// Read currently applicable value of a variable. + /** This function executes an SQL statement, so it won't work while a + * @ref pipeline or query stream is active on the connection. + * + * @return a blank `std::optional` if the variable's value is null, or its + * string value otherwise. + */ + std::string get_var(std::string_view var); + + /// Read currently applicable value of a variable. + /** This function executes an SQL statement, so it won't work while a + * @ref pipeline or query stream is active on the connection. + * + * If there is any possibility that the variable is null, ensure that `TYPE` + * can represent null values. + */ + template TYPE get_var_as(std::string_view var) + { + return from_string(get_var(var)); + } + + /** + * @name Notifications and Receivers + */ + //@{ + /// Check for pending notifications and take appropriate action. + /** This does not block. To wait for incoming notifications, either call + * await_notification() (it calls this function); or wait for incoming data + * on the connection's socket (i.e. wait to read), and then call this + * function repeatedly until it returns zero. After that, there are no more + * pending notifications so you may want to wait again. + * + * If any notifications are pending when you call this function, it + * processes them by finding any receivers that match the notification string + * and invoking those. If no receivers match, there is nothing to invoke but + * we do consider the notification processed. + * + * If any of the client-registered receivers throws an exception, the + * function will report it using the connection's errorhandlers. It does not + * re-throw the exceptions. + * + * @return Number of notifications processed. + */ + int get_notifs(); + + /// Wait for a notification to come in. + /** There are other events that will also terminate the wait, such as the + * backend failing. It will also wake up periodically. + * + * If a notification comes in, the call will process it, along with any other + * notifications that may have been pending. + * + * To wait for notifications into your own event loop instead, wait until + * there is incoming data on the connection's socket to be read, then call + * @ref get_notifs() repeatedly until it returns zero. + * + * @return Number of notifications processed. + */ + int await_notification(); + + /// Wait for a notification to come in, or for given timeout to pass. + /** There are other events that will also terminate the wait, such as the + * backend failing, or timeout expiring. + * + * If a notification comes in, the call will process it, along with any other + * notifications that may have been pending. + * + * To wait for notifications into your own event loop instead, wait until + * there is incoming data on the connection's socket to be read, then call + * @ref get_notifs repeatedly until it returns zero. + * + * @return Number of notifications processed + */ + int await_notification(std::time_t seconds, long microseconds); + //@} + + /** + * @name Password encryption + * + * Use this when setting a new password for the user if password encryption + * is enabled. Inputs are the SQL name for the user for whom you with to + * encrypt a password; the plaintext password; and the hash algorithm. + * + * The algorithm must be one of "md5", "scram-sha-256" (introduced in + * PostgreSQL 10), or `nullptr`. If the pointer is null, this will query + * the `password_encryption setting` from the server, and use the default + * algorithm as defined there. + * + * @return encrypted version of the password, suitable for encrypted + * PostgreSQL authentication. + * + * Thus you can change a user's password with: + * ```cxx + * void setpw(transaction_base &t, string const &user, string const &pw) + * { + * t.exec0("ALTER USER " + user + " " + * "PASSWORD '" + t.conn().encrypt_password(user,pw) + "'"); + * } + * ``` + * + * When building this against a libpq older than version 10, this will use + * an older function which only supports md5. In that case, requesting a + * different algorithm than md5 will result in a @ref feature_not_supported + * exception. + */ + //@{ + /// Encrypt a password for a given user. + [[nodiscard]] std::string + encrypt_password(zview user, zview password, zview algorithm) + { + return encrypt_password(user.c_str(), password.c_str(), algorithm.c_str()); + } + /// Encrypt a password for a given user. + [[nodiscard]] std::string encrypt_password( + char const user[], char const password[], char const *algorithm = nullptr); + //@} + + /** + * @name Prepared statements + * + * PostgreSQL supports prepared SQL statements, i.e. statements that you can + * register under a name you choose, optimized once by the backend, and + * executed any number of times under the given name. + * + * Prepared statement definitions are not sensitive to transaction + * boundaries. A statement defined inside a transaction will remain defined + * outside that transaction, even if the transaction itself is subsequently + * aborted. Once a statement has been prepared, it will only go away if you + * close the connection or explicitly "unprepare" the statement. + * + * Use the `pqxx::transaction_base::exec_prepared` functions to execute a + * prepared statement. See @ref prepared for a full discussion. + * + * @warning Using prepared statements can save time, but if your statement + * takes parameters, it may also make your application significantly slower! + * The reason is that the server works out a plan for executing the query + * when you prepare it. At that time, of course it does not know the values + * for the parameters that you will pass. If you execute a query without + * preparing it, then the server works out the plan on the spot, with full + * knowledge of the parameter values. + * + * A statement's definition can refer to its parameters as `$1`, `$2`, etc. + * The first parameter you pass to the call provides a value for `$1`, and + * so on. + * + * Here's an example of how to use prepared statements. + * + * ```cxx + * using namespace pqxx; + * void foo(connection &c) + * { + * c.prepare("findtable", "select * from pg_tables where name=$1"); + * work tx{c}; + * result r = tx.exec_prepared("findtable", "mytable"); + * if (std::empty(r)) throw runtime_error{"mytable not found!"}; + * } + * ``` + */ + //@{ + + /// Define a prepared statement. + /** + * @param name unique name for the new prepared statement. + * @param definition SQL statement to prepare. + */ + void prepare(zview name, zview definition) & + { + prepare(name.c_str(), definition.c_str()); + } + + /** + * @param name unique name for the new prepared statement. + * @param definition SQL statement to prepare. + */ + void prepare(char const name[], char const definition[]) &; + + /// Define a nameless prepared statement. + /** + * This can be useful if you merely want to pass large binary parameters to a + * statement without otherwise wishing to prepare it. If you use this + * feature, always keep the definition and the use close together to avoid + * the nameless statement being redefined unexpectedly by code somewhere + * else. + */ + void prepare(char const definition[]) &; + void prepare(zview definition) & { return prepare(definition.c_str()); } + + /// Drop prepared statement. + void unprepare(std::string_view name); + + //@} + + // C++20: constexpr. Breaks ABI. + /// Suffix unique number to name to make it unique within session context. + /** Used internally to generate identifiers for SQL objects (such as cursors + * and nested transactions) based on a given human-readable base name. + */ + [[nodiscard]] std::string adorn_name(std::string_view); + + /** + * @defgroup escaping-functions String-escaping functions + */ + //@{ + + /// Escape string for use as SQL string literal on this connection. + /** @warning This accepts a length, and it does not require a terminating + * zero byte. But if there is a zero byte, escaping stops there even if + * it's not at the end of the string! + */ + [[deprecated("Use std::string_view or pqxx:zview.")]] std::string + esc(char const text[], std::size_t maxlen) const + { + return esc(std::string_view{text, maxlen}); + } + + /// Escape string for use as SQL string literal on this connection. + [[nodiscard]] std::string esc(char const text[]) const + { + return esc(std::string_view{text}); + } + +#if defined(PQXX_HAVE_SPAN) + /// Escape string for use as SQL string literal, into `buffer`. + /** Use this variant when you want to re-use the same buffer across multiple + * calls. If that's not the case, or convenience and simplicity are more + * important, use the single-argument variant. + * + * For every byte in `text`, there must be at least 2 bytes of space in + * `buffer`; plus there must be one byte of space for a trailing zero. + * Throws @ref range_error if this space is not available. + * + * Returns a reference to the escaped string, which is actually stored in + * `buffer`. + */ + [[nodiscard]] std::string_view + esc(std::string_view text, std::span buffer) + { + auto const size{std::size(text)}, space{std::size(buffer)}; + auto const needed{2 * size + 1}; + if (space < needed) + throw range_error{internal::concat( + "Not enough room to escape string of ", size, " byte(s): need ", + needed, " bytes of buffer space, but buffer size is ", space, ".")}; + auto const data{buffer.data()}; + return {data, esc_to_buf(text, data)}; + } +#endif + + /// Escape string for use as SQL string literal on this connection. + /** @warning This is meant for text strings only. It cannot contain bytes + * whose value is zero ("nul bytes"). + */ + [[nodiscard]] std::string esc(std::string_view text) const; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Escape binary string for use as SQL string literal on this connection. + /** This is identical to `esc_raw(data)`. */ + template [[nodiscard]] std::string esc(DATA const &data) const + { + return esc_raw(data); + } +#endif + +#if defined(PQXX_HAVE_CONCEPTS) && defined(PQXX_HAVE_SPAN) + /// Escape binary string for use as SQL string literal, into `buffer`. + /** Use this variant when you want to re-use the same buffer across multiple + * calls. If that's not the case, or convenience and simplicity are more + * important, use the single-argument variant. + * + * For every byte in `data`, there must be at least two bytes of space in + * `buffer`; plus there must be two bytes of space for a header and one for + * a trailing zero. Throws @ref range_error if this space is not available. + * + * Returns a reference to the escaped string, which is actually stored in + * `buffer`. + */ + template + [[nodiscard]] zview esc(DATA const &data, std::span buffer) const + { + auto const size{std::size(data)}, space{std::size(buffer)}; + auto const needed{internal::size_esc_bin(std::size(data))}; + if (space < needed) + throw range_error{internal::concat( + "Not enough room to escape binary string of ", size, " byte(s): need ", + needed, " bytes of buffer space, but buffer size is ", space, ".")}; + + std::basic_string_view view{std::data(data), std::size(data)}; + auto const out{std::data(buffer)}; + // Actually, in the modern format, we know beforehand exactly how many + // bytes we're going to fill. Just leave out the trailing zero. + internal::esc_bin(view, out); + return zview{out, needed - 1}; + } +#endif + + /// Escape binary string for use as SQL string literal on this connection. + [[deprecated("Use std::byte for binary data.")]] std::string + esc_raw(unsigned char const bin[], std::size_t len) const; + + /// Escape binary string for use as SQL string literal on this connection. + /** You can also just use @ref esc with a binary string. */ + [[nodiscard]] std::string esc_raw(std::basic_string_view) const; + +#if defined(PQXX_HAVE_SPAN) + /// Escape binary string for use as SQL string literal, into `buffer`. + /** You can also just use @ref esc with a binary string. */ + [[nodiscard]] std::string + esc_raw(std::basic_string_view, std::span buffer) const; +#endif + +#if defined(PQXX_HAVE_CONCEPTS) + /// Escape binary string for use as SQL string literal on this connection. + /** You can also just use @ref esc with a binary string. */ + template + [[nodiscard]] std::string esc_raw(DATA const &data) const + { + return esc_raw( + std::basic_string_view{std::data(data), std::size(data)}); + } +#endif + +#if defined(PQXX_HAVE_CONCEPTS) && defined(PQXX_HAVE_SPAN) + /// Escape binary string for use as SQL string literal, into `buffer`. + template + [[nodiscard]] zview esc_raw(DATA const &data, std::span buffer) const + { + return this->esc(binary_cast(data), buffer); + } +#endif + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(zview text) const + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return unesc_raw(text.c_str()); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(char const text[]) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + * + * (The data must be encoded in PostgreSQL's "hex" format. The legacy + * "bytea" escape format, used prior to PostgreSQL 9.0, is no longer + * supported.) + */ + [[nodiscard]] std::basic_string + unesc_bin(std::string_view text) const + { + std::basic_string buf; + buf.resize(pqxx::internal::size_unesc_bin(std::size(text))); + pqxx::internal::unesc_bin(text, buf.data()); + return buf; + } + + /// Escape and quote a string of binary data. + [[deprecated("Use quote(std::basic_string_view).")]] std::string + quote_raw(unsigned char const bin[], std::size_t len) const; + + /// Escape and quote a string of binary data. + std::string quote_raw(std::basic_string_view) const; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Escape and quote a string of binary data. + /** You can also just use @ref quote with binary data. */ + template + [[nodiscard]] std::string quote_raw(DATA const &data) const + { + return quote_raw( + std::basic_string_view{std::data(data), std::size(data)}); + } +#endif + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote an SQL identifier for use in a query. + [[nodiscard]] std::string quote_name(std::string_view identifier) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote a table name. + /** When passing just a table name, this is just another name for + * @ref quote_name. + */ + [[nodiscard]] std::string quote_table(std::string_view name) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote a table path. + /** A table path consists of a table name, optionally prefixed by a schema + * name; and if both are given, they are in turn optionally prefixed by a + * database name. + * + * Each portion of the path (database name, schema name, table name) will be + * quoted separately, and they will be joined together by dots. So for + * example, `myschema.mytable` will become `"myschema"."mytable"`. + */ + [[nodiscard]] std::string quote_table(table_path) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Quote and comma-separate a series of column names. + /** Use this to save a bit of work in cases where you repeatedly need to pass + * the same list of column names, e.g. with @ref stream_to and @ref + * stream_from. Some functions that need to quote the columns list + * internally, will have a "raw" alternative which let you do the quoting + * yourself. It's a bit of extra work, but it can in rare cases let you + * eliminate some duplicate work in quoting them repeatedly. + */ + template + inline std::string quote_columns(STRINGS const &columns) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Represent object as SQL string, including quoting & escaping. + /** + * Recognises nulls and represents them as SQL nulls. They get no quotes. + */ + template + [[nodiscard]] inline std::string quote(T const &t) const; + + [[deprecated("Use std::byte for binary data.")]] std::string + quote(binarystring const &) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape and quote binary data for use as a BYTEA value in SQL statement. + [[nodiscard]] std::string + quote(std::basic_string_view bytes) const; + + // TODO: Make "into buffer" variant to eliminate a string allocation. + /// Escape string for literal LIKE match. + /** Use this when part of an SQL "LIKE" pattern should match only as a + * literal string, not as a pattern, even if it contains "%" or "_" + * characters that would normally act as wildcards. + * + * The string does not get string-escaped or quoted. You do that later. + * + * For instance, let's say you have a string `name` entered by the user, + * and you're searching a `file` column for items that match `name` + * followed by a dot and three letters. Even if `name` contains wildcard + * characters "%" or "_", you only want those to match literally, so "_" + * only matches "_" and "%" only matches a single "%". + * + * You do that by "like-escaping" `name`, appending the wildcard pattern + * `".___"`, and finally, escaping and quoting the result for inclusion in + * your query: + * + * ```cxx + * tx.exec( + * "SELECT file FROM item WHERE file LIKE " + + * tx.quote(tx.esc_like(name) + ".___")); + * ``` + * + * The SQL "LIKE" operator also lets you choose your own escape character. + * This is supported, but must be a single-byte character. + */ + [[nodiscard]] std::string + esc_like(std::string_view text, char escape_char = '\\') const; + //@} + + /// Attempt to cancel the ongoing query, if any. + /** You can use this from another thread, and/or while a query is executing + * in a pipeline, but it's up to you to ensure that you're not canceling the + * wrong query. This may involve locking. + */ + void cancel_query(); + +#if defined(_WIN32) || __has_include() + /// Set socket to blocking (true) or nonblocking (false). + /** @warning Do not use this unless you _really_ know what you're doing. + * @warning This function is available on most systems, but not necessarily + * all. + */ + void set_blocking(bool block) &; +#endif // defined(_WIN32) || __has_include() + + /// Set session verbosity. + /** Set the verbosity of error messages to "terse", "normal" (the default), + * or "verbose." + * + * If "terse", returned messages include severity, primary text, and + * position only; this will normally fit on a single line. "normal" produces + * messages that include the above plus any detail, hint, or context fields + * (these might span multiple lines). "verbose" includes all available + * fields. + */ + void set_verbosity(error_verbosity verbosity) &noexcept; + + /// Return pointers to the active errorhandlers. + /** The entries are ordered from oldest to newest handler. + * + * You may use this to find errorhandlers that your application wants to + * delete when destroying the connection. Be aware, however, that libpqxx + * may also add errorhandlers of its own, and those will be included in the + * list. If this is a problem for you, derive your errorhandlers from a + * custom base class derived from pqxx::errorhandler. Then use dynamic_cast + * to find which of the error handlers are yours. + * + * The pointers point to the real errorhandlers. The container it returns + * however is a copy of the one internal to the connection, not a reference. + */ + [[nodiscard]] std::vector get_errorhandlers() const; + + /// Return a connection string encapsulating this connection's options. + /** The connection must be currently open for this to work. + * + * Returns a reconstruction of this connection's connection string. It may + * not exactly match the connection string you passed in when creating this + * connection. + */ + [[nodiscard]] std::string connection_string() const; + + /// Explicitly close the connection. + /** The destructor will do this for you automatically. Still, there is a + * reason to `close()` objects explicitly where possible: if an error should + * occur while closing, `close()` can throw an exception. A destructor + * cannot. + * + * Closing a connection is idempotent. Closing a connection that's already + * closed does nothing. + */ + void close(); + + /// Seize control of a raw libpq connection. + /** @warning Do not do this. Please. It's for very rare, very specific + * use-cases. The mechanism may change (or break) in unexpected ways in + * future versions. + * + * @param raw_conn a raw libpq `PQconn` pointer. + */ + static connection seize_raw_connection(internal::pq::PGconn *raw_conn) + { + return connection{raw_conn}; + } + + /// Release the raw connection without closing it. + /** @warning Do not do this. It's for very rare, very specific use-cases. + * The mechanism may change (or break) in unexpected ways in future versions. + * + * The `connection` object becomes unusable after this. + */ + internal::pq::PGconn *release_raw_connection() && + { + return std::exchange(m_conn, nullptr); + } + +private: + friend class connecting; + enum connect_mode + { + connect_nonblocking + }; + connection(connect_mode, zview connection_string); + + /// For use by @ref seize_raw_connection. + explicit connection(internal::pq::PGconn *raw_conn) : m_conn{raw_conn} {} + + /// Poll for ongoing connection, try to progress towards completion. + /** Returns a pair of "now please wait to read data from socket" and "now + * please wait to write data to socket." Both will be false when done. + * + * Throws an exception if polling indicates that the connection has failed. + */ + std::pair poll_connect(); + + // Initialise based on connection string. + void init(char const options[]); + // Initialise based on parameter names and values. + void init(char const *params[], char const *values[]); + void complete_init(); + + result make_result( + internal::pq::PGresult *pgr, std::shared_ptr const &query, + std::string_view desc = ""sv); + + void PQXX_PRIVATE set_up_state(); + + int PQXX_PRIVATE PQXX_PURE status() const noexcept; + + /// Escape a string, into a buffer allocated by the caller. + /** The buffer must have room for at least `2*std::size(text) + 1` bytes. + * + * Returns the number of bytes written, including the trailing zero. + */ + std::size_t esc_to_buf(std::string_view text, char *buf) const; + + friend class internal::gate::const_connection_largeobject; + char const *PQXX_PURE err_msg() const noexcept; + + void PQXX_PRIVATE process_notice_raw(char const msg[]) noexcept; + + result exec_prepared(std::string_view statement, internal::c_params const &); + + /// Throw @ref usage_error if this connection is not in a movable state. + void check_movable() const; + /// Throw @ref usage_error if not in a state where it can be move-assigned. + void check_overwritable() const; + + friend class internal::gate::connection_errorhandler; + void PQXX_PRIVATE register_errorhandler(errorhandler *); + void PQXX_PRIVATE unregister_errorhandler(errorhandler *) noexcept; + + friend class internal::gate::connection_transaction; + result exec(std::string_view, std::string_view = ""sv); + result + PQXX_PRIVATE exec(std::shared_ptr, std::string_view = ""sv); + void PQXX_PRIVATE register_transaction(transaction_base *); + void PQXX_PRIVATE unregister_transaction(transaction_base *) noexcept; + + friend class internal::gate::connection_stream_from; + std::pair>, std::size_t> + PQXX_PRIVATE read_copy_line(); + + friend class internal::gate::connection_stream_to; + void PQXX_PRIVATE write_copy_line(std::string_view); + void PQXX_PRIVATE end_copy_write(); + + friend class internal::gate::connection_largeobject; + internal::pq::PGconn *raw_connection() const { return m_conn; } + + friend class internal::gate::connection_notification_receiver; + void add_receiver(notification_receiver *); + void remove_receiver(notification_receiver *) noexcept; + + friend class internal::gate::connection_pipeline; + void PQXX_PRIVATE start_exec(char const query[]); + bool PQXX_PRIVATE consume_input() noexcept; + bool PQXX_PRIVATE is_busy() const noexcept; + internal::pq::PGresult *get_result(); + + friend class internal::gate::connection_dbtransaction; + friend class internal::gate::connection_sql_cursor; + + result exec_params(std::string_view query, internal::c_params const &args); + + /// Connection handle. + internal::pq::PGconn *m_conn = nullptr; + + /// Active transaction on connection, if any. + /** We don't use this for anything, except to check for open transactions + * when we close the connection or start a new transaction. + * + * We also don't allow move construction or move assignment while there's a + * transaction, since moving the connection in that case would leave one or + * more pointers back from the transaction to the connection dangling. + */ + transaction_base const *m_trans = nullptr; + + std::list m_errorhandlers; + + using receiver_list = + std::multimap; + /// Notification receivers. + receiver_list m_receivers; + + /// Unique number to use as suffix for identifiers (see adorn_name()). + int m_unique_id = 0; +}; + + +/// @deprecated Old base class for connection. They are now the same class. +using connection_base = connection; + + +/// An ongoing, non-blocking stepping stone to a connection. +/** Use this when you want to create a connection to the database, but without + * blocking your whole thread. It is only available on systems that have + * the `` header, and Windows. + * + * Connecting in this way is probably not "faster" (it's more complicated and + * has some extra overhead), but in some situations you can use it to make your + * application as a whole faster. It all depends on having other useful work + * to do in the same thread, and being able to wait on a socket. If you have + * other I/O going on at the same time, your event loop can wait for both the + * libpqxx socket and your own sockets, and wake up whenever any of them is + * ready to do work. + * + * Connecting in this way is not properly "asynchronous;" it's merely + * "nonblocking." This means it's not a super-high-performance mechanism like + * you might get with e.g. `io_uring`. In particular, if we need to look up + * the database hostname in DNS, that will happen synchronously. + * + * To use this, create the `connecting` object, passing a connection string. + * Then loop: If @ref wait_to_read returns true, wait for the socket to have + * incoming data on it. If @ref wait_to_write returns true, wait for the + * socket to be ready for writing. Then call @ref process to process any + * incoming or outgoing data. Do all of this until @ref done returns true (or + * there is an exception). Finally, call @ref produce to get the completed + * connection. + * + * For example: + * + * ```cxx + * pqxx::connecting cg{}; + * + * // Loop until we're done connecting. + * while (!cg.done()) + * { + * wait_for_fd(cg.sock(), cg.wait_to_read(), cg.wait_to_write()); + * cg.process(); + * } + * + * pqxx::connection conn = std::move(cg).produce(); + * + * // At this point, conn is a working connection. You can no longer use + * // cg at all. + * ``` + */ +class PQXX_LIBEXPORT connecting +{ +public: + /// Start connecting. + connecting(zview connection_string = ""_zv); + + connecting(connecting const &) = delete; + connecting(connecting &&) = default; + connecting &operator=(connecting const &) = delete; + connecting &operator=(connecting &&) = default; + + /// Get the socket. The socket may change during the connection process. + [[nodiscard]] int sock() const &noexcept { return m_conn.sock(); } + + /// Should we currently wait to be able to _read_ from the socket? + [[nodiscard]] constexpr bool wait_to_read() const &noexcept + { + return m_reading; + } + + /// Should we currently wait to be able to _write_ to the socket? + [[nodiscard]] constexpr bool wait_to_write() const &noexcept + { + return m_writing; + } + + /// Progress towards completion (but don't block). + void process() &; + + /// Is our connection finished? + [[nodiscard]] constexpr bool done() const &noexcept + { + return not m_reading and not m_writing; + } + + /// Produce the completed connection object. + /** Use this only once, after @ref done returned `true`. Once you have + * called this, the `connecting` instance has no more use or meaning. You + * can't call any of its member functions afterwards. + * + * This member function is rvalue-qualified, meaning that you can only call + * it on an rvalue instance of the class. If what you have is not an rvalue, + * turn it into one by wrapping it in `std::move()`. + */ + [[nodiscard]] connection produce() &&; + +private: + connection m_conn; + bool m_reading{false}; + bool m_writing{true}; +}; + + +template inline std::string connection::quote(T const &t) const +{ + if constexpr (nullness::always_null) + { + return "NULL"; + } + else + { + if (is_null(t)) + return "NULL"; + auto const text{to_string(t)}; + + // Okay, there's an easy way to do this and there's a hard way. The easy + // way was "quote, esc(to_string(t)), quote". I'm going with the hard way + // because it's going to save some string manipulation that will probably + // incur some unnecessary memory allocations and deallocations. + std::string buf{'\''}; + buf.resize(2 + 2 * std::size(text) + 1); + auto const content_bytes{esc_to_buf(text, buf.data() + 1)}; + auto const closing_quote{1 + content_bytes}; + buf[closing_quote] = '\''; + auto const end{closing_quote + 1}; + buf.resize(end); + return buf; + } +} + + +template +inline std::string connection::quote_columns(STRINGS const &columns) const +{ + return separated_list( + ","sv, std::cbegin(columns), std::cend(columns), + [this](auto col) { return this->quote_name(*col); }); +} + + +#if defined(PQXX_HAVE_CONCEPTS) +template +inline connection::connection(MAPPING const ¶ms) +{ + check_version(); + + std::vector keys, values; + if constexpr (std::ranges::sized_range) + { + auto const size{std::ranges::size(params) + 1}; + keys.reserve(size); + values.reserve(size); + } + for (auto const &[key, value] : params) + { + keys.push_back(internal::as_c_string(key)); + values.push_back(internal::as_c_string(value)); + } + keys.push_back(nullptr); + values.push_back(nullptr); + init(std::data(keys), std::data(values)); +} +#endif // PQXX_HAVE_CONCEPTS +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/cursor b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/cursor new file mode 100644 index 000000000..e20b3a4fa --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/cursor @@ -0,0 +1,8 @@ +/** Definition of the iterator/container-style cursor classes. + * + * C++-style wrappers for SQL cursors + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/cursor.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/cursor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/cursor.hxx new file mode 100644 index 000000000..b392e2407 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/cursor.hxx @@ -0,0 +1,483 @@ +/* Definition of the iterator/container-style cursor classes. + * + * C++-style wrappers for SQL cursors. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/cursor instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_CURSOR +#define PQXX_H_CURSOR + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/result.hxx" +#include "pqxx/transaction_base.hxx" + + +namespace pqxx +{ +/// Common definitions for cursor types +/** In C++ terms, fetches are always done in pre-increment or pre-decrement + * fashion--i.e. the result does not include the row the cursor is on at the + * beginning of the fetch, and the cursor ends up being positioned on the last + * row in the result. + * + * There are singular positions akin to `end()` at both the beginning and the + * end of the cursor's range of movement, although these fit in so naturally + * with the semantics that one rarely notices them. The cursor begins at the + * first of these, but any fetch in the forward direction will move the cursor + * off this position and onto the first row before returning anything. + */ +class PQXX_LIBEXPORT cursor_base +{ +public: + using size_type = result_size_type; + using difference_type = result_difference_type; + + /// Cursor access-pattern policy + /** Allowing a cursor to move forward only can result in better performance, + * so use this access policy whenever possible. + */ + enum access_policy + { + /// Cursor can move forward only + forward_only, + /// Cursor can move back and forth + random_access + }; + + /// Cursor update policy + /** + * @warning Not all PostgreSQL versions support updatable cursors. + */ + enum update_policy + { + /// Cursor can be used to read data but not to write + read_only, + /// Cursor can be used to update data as well as read it + update + }; + + /// Cursor destruction policy + /** The normal thing to do is to make a cursor object the owner of the SQL + * cursor it represents. There may be cases, however, where a cursor needs + * to persist beyond the end of the current transaction (and thus also beyond + * the lifetime of the cursor object that created it!), where it can be + * "adopted" into a new cursor object. See the basic_cursor documentation + * for an explanation of cursor adoption. + * + * If a cursor is created with "loose" ownership policy, the object + * representing the underlying SQL cursor will not take the latter with it + * when its own lifetime ends, nor will its originating transaction. + * + * @warning Use this feature with care and moderation. Only one cursor + * object should be responsible for any one underlying SQL cursor at any + * given time. + */ + enum ownership_policy + { + /// Destroy SQL cursor when cursor object is closed at end of transaction + owned, + /// Leave SQL cursor in existence after close of object and transaction + loose + }; + + cursor_base() = delete; + cursor_base(cursor_base const &) = delete; + cursor_base &operator=(cursor_base const &) = delete; + + /** + * @name Special movement distances. + */ + //@{ + + // TODO: Make constexpr inline (but breaks ABI). + /// Special value: read until end. + /** @return Maximum value for result::difference_type, so the cursor will + * attempt to read the largest possible result set. + */ + [[nodiscard]] static difference_type all() noexcept; + + /// Special value: read one row only. + /** @return Unsurprisingly, 1. + */ + [[nodiscard]] static constexpr difference_type next() noexcept { return 1; } + + /// Special value: read backwards, one row only. + /** @return Unsurprisingly, -1. + */ + [[nodiscard]] static constexpr difference_type prior() noexcept + { + return -1; + } + + // TODO: Make constexpr inline (but breaks ABI). + /// Special value: read backwards from current position back to origin. + /** @return Minimum value for result::difference_type. + */ + [[nodiscard]] static difference_type backward_all() noexcept; + + //@} + + /// Name of underlying SQL cursor + /** + * @returns Name of SQL cursor, which may differ from original given name. + * @warning Don't use this to access the SQL cursor directly without going + * through the provided wrapper classes! + */ + [[nodiscard]] constexpr std::string const &name() const noexcept + { + return m_name; + } + +protected: + cursor_base(connection &, std::string_view Name, bool embellish_name = true); + + std::string const m_name; +}; +} // namespace pqxx + + +#include + + +namespace pqxx +{ +/// "Stateless cursor" class: easy API for retrieving parts of result sets +/** This is a front-end for SQL cursors, but with a more C++-like API. + * + * Actually, stateless_cursor feels entirely different from SQL cursors. You + * don't keep track of positions, fetches, and moves; you just say which rows + * you want. See the retrieve() member function. + */ +template +class stateless_cursor +{ +public: + using size_type = result_size_type; + using difference_type = result_difference_type; + + /// Create cursor. + /** + * @param tx The transaction within which you want to create the cursor. + * @param query The SQL query whose results the cursor should traverse. + * @param cname A hint for the cursor's name. The actual SQL cursor's name + * will be based on this (though not necessarily identical). + * @param hold Create a `WITH HOLD` cursor? Such cursors stay alive after + * the transaction has ended, so you can continue to use it. + */ + stateless_cursor( + transaction_base &tx, std::string_view query, std::string_view cname, + bool hold) : + m_cur{tx, query, cname, cursor_base::random_access, up, op, hold} + {} + + /// Adopt an existing scrolling SQL cursor. + /** This lets you define a cursor yourself, and then wrap it in a + * libpqxx-managed `stateless_cursor` object. + * + * @param tx The transaction within which you want to manage the cursor. + * @param adopted_cursor Your cursor's SQL name. + */ + stateless_cursor(transaction_base &tx, std::string_view adopted_cursor) : + m_cur{tx, adopted_cursor, op} + { + // Put cursor in known position + m_cur.move(cursor_base::backward_all()); + } + + /// Close this cursor. + /** The destructor will do this for you automatically. + * + * Closing a cursor is idempotent. Closing a cursor that's already closed + * does nothing. + */ + void close() noexcept { m_cur.close(); } + + /// Number of rows in cursor's result set + /** @note This function is not const; it may need to scroll to find the size + * of the result set. + */ + [[nodiscard]] size_type size() + { + return internal::obtain_stateless_cursor_size(m_cur); + } + + /// Retrieve rows from begin_pos (inclusive) to end_pos (exclusive) + /** Rows are numbered starting from 0 to size()-1. + * + * @param begin_pos First row to retrieve. May be one row beyond the end of + * the result set, to avoid errors for empty result sets. Otherwise, must be + * a valid row number in the result set. + * @param end_pos Row up to which to fetch. Rows are returned ordered from + * begin_pos to end_pos, i.e. in ascending order if begin_pos < end_pos but + * in descending order if begin_pos > end_pos. The end_pos may be + * arbitrarily inside or outside the result set; only existing rows are + * included in the result. + */ + result retrieve(difference_type begin_pos, difference_type end_pos) + { + return internal::stateless_cursor_retrieve( + m_cur, result::difference_type(size()), begin_pos, end_pos); + } + + /// Return this cursor's name. + [[nodiscard]] constexpr std::string const &name() const noexcept + { + return m_cur.name(); + } + +private: + internal::sql_cursor m_cur; +}; + + +class icursor_iterator; +} // namespace pqxx + + +namespace pqxx::internal::gate +{ +class icursor_iterator_icursorstream; +class icursorstream_icursor_iterator; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +/// Simple read-only cursor represented as a stream of results +/** SQL cursors can be tricky, especially in C++ since the two languages seem + * to have been designed on different planets. An SQL cursor has two singular + * positions akin to `end()` on either side of the underlying result set. + * + * These cultural differences are hidden from view somewhat by libpqxx, which + * tries to make SQL cursors behave more like familiar C++ entities such as + * iterators, sequences, streams, and containers. + * + * Data is fetched from the cursor as a sequence of result objects. Each of + * these will contain the number of rows defined as the stream's stride, except + * of course the last block of data which may contain fewer rows. + * + * This class can create or adopt cursors that live outside any backend + * transaction, which your backend version may not support. + */ +class PQXX_LIBEXPORT icursorstream +{ +public: + using size_type = cursor_base::size_type; + using difference_type = cursor_base::difference_type; + + /// Set up a read-only, forward-only cursor. + /** Roughly equivalent to a C++ Standard Library istream, this cursor type + * supports only two operations: reading a block of rows while moving + * forward, and moving forward without reading any data. + * + * @param context Transaction context in which this cursor will be active. + * @param query SQL query whose results this cursor shall iterate. + * @param basename Suggested name for the SQL cursor; the library will append + * a unique code to ensure its uniqueness. + * @param sstride Number of rows to fetch per read operation; must be a + * positive number. + */ + icursorstream( + transaction_base &context, std::string_view query, + std::string_view basename, difference_type sstride = 1); + + /// Adopt existing SQL cursor. Use with care. + /** Forms a cursor stream around an existing SQL cursor, as returned by e.g. + * a server-side function. The SQL cursor will be cleaned up by the stream's + * destructor as if it had been created by the stream; cleaning it up by hand + * or adopting the same cursor twice is an error. + * + * Passing the name of the cursor as a string is not allowed, both to avoid + * confusion with the other constructor and to discourage unnecessary use of + * adopted cursors. + * + * @warning It is technically possible to adopt a "WITH HOLD" cursor, i.e. a + * cursor that stays alive outside its creating transaction. However, any + * cursor stream (including the underlying SQL cursor, naturally) must be + * destroyed before its transaction context object is destroyed. Therefore + * the only way to use SQL's WITH HOLD feature is to adopt the cursor, but + * defer doing so until after entering the transaction context that will + * eventually destroy it. + * + * @param context Transaction context in which this cursor will be active. + * @param cname Result field containing the name of the SQL cursor to adopt. + * @param sstride Number of rows to fetch per read operation; must be a + * positive number. + * @param op Ownership policy. Determines whether the cursor underlying this + * stream will be destroyed when the stream is closed. + */ + icursorstream( + transaction_base &context, field const &cname, difference_type sstride = 1, + cursor_base::ownership_policy op = cursor_base::owned); + + /// Return `true` if this stream may still return more data. + constexpr operator bool() const &noexcept { return not m_done; } + + /// Read new value into given result object; same as operator `>>`. + /** The result set may continue any number of rows from zero to the chosen + * stride, inclusive. An empty result will only be returned if there are no + * more rows to retrieve. + * + * @param res Write the retrieved data into this result object. + * @return Reference to this very stream, to facilitate "chained" invocations + * ("C.get(r1).get(r2);") + */ + icursorstream &get(result &res) + { + res = fetchblock(); + return *this; + } + /// Read new value into given result object; same as `get(result&)`. + /** The result set may continue any number of rows from zero to the chosen + * stride, inclusive. An empty result will only be returned if there are no + * more rows to retrieve. + * + * @param res Write the retrieved data into this result object. + * @return Reference to this very stream, to facilitate "chained" invocations + * ("C >> r1 >> r2;") + */ + icursorstream &operator>>(result &res) { return get(res); } + + /// Move given number of rows forward without reading data. + /** Ignores any stride that you may have set. It moves by a given number of + * rows, not a number of strides. + * + * @return Reference to this stream itself, to facilitate "chained" + * invocations. + */ + icursorstream &ignore(std::streamsize n = 1) &; + + /// Change stride, i.e. the number of rows to fetch per read operation. + /** + * @param stride Must be a positive number. + */ + void set_stride(difference_type stride) &; + [[nodiscard]] constexpr difference_type stride() const noexcept + { + return m_stride; + } + +private: + result fetchblock(); + + friend class internal::gate::icursorstream_icursor_iterator; + size_type forward(size_type n = 1); + void insert_iterator(icursor_iterator *) noexcept; + void remove_iterator(icursor_iterator *) const noexcept; + + void service_iterators(difference_type); + + internal::sql_cursor m_cur; + + difference_type m_stride; + difference_type m_realpos, m_reqpos; + + mutable icursor_iterator *m_iterators; + + bool m_done; +}; + + +/// Approximate istream_iterator for icursorstream. +/** Intended as an implementation of an input_iterator (as defined by the C++ + * Standard Library), this class supports only two basic operations: reading + * the current element, and moving forward. In addition to the minimal + * guarantees for istream_iterators, this class supports multiple successive + * reads of the same position (the current result set is cached in the + * iterator) even after copying and even after new data have been read from the + * stream. This appears to be a requirement for input_iterators. Comparisons + * are also supported in the general case. + * + * The iterator does not care about its own position, however. Moving an + * iterator forward moves the underlying stream forward and reads the data from + * the new stream position, regardless of the iterator's old position in the + * stream. + * + * The stream's stride defines the granularity for all iterator movement or + * access operations, i.e. "ici += 1" advances the stream by one stride's worth + * of rows, and "*ici++" reads one stride's worth of rows from the stream. + * + * @warning Do not read from the underlying stream or its cursor, move its read + * position, or change its stride, between the time the first icursor_iterator + * on it is created and the time its last icursor_iterator is destroyed. + * + * @warning Manipulating these iterators within the context of a single cursor + * stream is not thread-safe. Creating a new iterator, copying one, + * or destroying one affects the stream as a whole. + */ +class PQXX_LIBEXPORT icursor_iterator +{ +public: + using iterator_category = std::input_iterator_tag; + using value_type = result; + using pointer = result const *; + using reference = result const &; + using istream_type = icursorstream; + using size_type = istream_type::size_type; + using difference_type = istream_type::difference_type; + + icursor_iterator() noexcept; + explicit icursor_iterator(istream_type &) noexcept; + icursor_iterator(icursor_iterator const &) noexcept; + ~icursor_iterator() noexcept; + + result const &operator*() const + { + refresh(); + return m_here; + } + result const *operator->() const + { + refresh(); + return &m_here; + } + icursor_iterator &operator++(); + icursor_iterator operator++(int); + icursor_iterator &operator+=(difference_type); + icursor_iterator &operator=(icursor_iterator const &) noexcept; + + [[nodiscard]] bool operator==(icursor_iterator const &rhs) const; + [[nodiscard]] bool operator!=(icursor_iterator const &rhs) const noexcept + { + return not operator==(rhs); + } + [[nodiscard]] bool operator<(icursor_iterator const &rhs) const; + [[nodiscard]] bool operator>(icursor_iterator const &rhs) const + { + return rhs < *this; + } + [[nodiscard]] bool operator<=(icursor_iterator const &rhs) const + { + return not(*this > rhs); + } + [[nodiscard]] bool operator>=(icursor_iterator const &rhs) const + { + return not(*this < rhs); + } + +private: + void refresh() const; + + friend class internal::gate::icursor_iterator_icursorstream; + difference_type pos() const noexcept { return m_pos; } + void fill(result const &); + + icursorstream *m_stream{nullptr}; + result m_here; + difference_type m_pos; + icursor_iterator *m_prev{nullptr}, *m_next{nullptr}; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/dbtransaction b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/dbtransaction new file mode 100644 index 000000000..fa8d26476 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/dbtransaction @@ -0,0 +1,8 @@ +/** pqxx::dbtransaction abstract base class. + * + * pqxx::dbransaction defines a real transaction on the database. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/dbtransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/dbtransaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/dbtransaction.hxx new file mode 100644 index 000000000..d85cb170f --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/dbtransaction.hxx @@ -0,0 +1,70 @@ +/* Definition of the pqxx::dbtransaction abstract base class. + * + * pqxx::dbransaction defines a real transaction on the database. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/dbtransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_DBTRANSACTION +#define PQXX_H_DBTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/transaction_base.hxx" + +namespace pqxx +{ +/// Abstract transaction base class: bracket transactions on the database. +/** + * @ingroup transactions + * + * Use a dbtransaction-derived object such as "work" (transaction<>) to enclose + * operations on a database in a single "unit of work." This ensures that the + * whole series of operations either succeeds as a whole or fails completely. + * In no case will it leave half-finished work behind in the database. + * + * Once processing on a transaction has succeeded and any changes should be + * allowed to become permanent in the database, call commit(). If something + * has gone wrong and the changes should be forgotten, call abort() instead. + * If you do neither, an implicit abort() is executed at destruction time. + * + * It is an error to abort a transaction that has already been committed, or to + * commit a transaction that has already been aborted. Aborting an already + * aborted transaction or committing an already committed one is allowed, to + * make error handling easier. Repeated aborts or commits have no effect after + * the first one. + * + * Database transactions are not suitable for guarding long-running processes. + * If your transaction code becomes too long or too complex, consider ways to + * break it up into smaller ones. Unfortunately there is no universal recipe + * for this. + * + * The actual operations for committing/aborting the backend transaction are + * implemented by a derived class. The implementing concrete class must also + * call @ref close from its destructor. + */ +class PQXX_LIBEXPORT PQXX_NOVTABLE dbtransaction : public transaction_base +{ +protected: + /// Begin transaction. + explicit dbtransaction(connection &c) : transaction_base{c} {} + /// Begin transaction. + dbtransaction(connection &c, std::string_view tname) : + transaction_base{c, tname} + {} + /// Begin transaction. + dbtransaction( + connection &c, std::string_view tname, + std::shared_ptr rollback_cmd) : + transaction_base{c, tname, rollback_cmd} + {} +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/errorhandler b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/errorhandler new file mode 100644 index 000000000..ea572ee79 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/errorhandler @@ -0,0 +1,8 @@ +/** pqxx::errorhandler class. + * + * Callbacks for handling errors and warnings. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/errorhandler.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/errorhandler.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/errorhandler.hxx new file mode 100644 index 000000000..2ffb5703c --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/errorhandler.hxx @@ -0,0 +1,92 @@ +/* Definition of the pqxx::errorhandler class. + * + * pqxx::errorhandler handlers errors and warnings in a database session. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/errorhandler instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ERRORHANDLER +#define PQXX_H_ERRORHANDLER + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/types.hxx" + + +namespace pqxx::internal::gate +{ +class errorhandler_connection; +} + + +namespace pqxx +{ +/** + * @addtogroup errorhandler + */ +//@{ + +/// Base class for error-handler callbacks. +/** To receive errors and warnings from a connection, subclass this with your + * own error-handler functor, and instantiate it for the connection. Destroying + * the handler un-registers it. + * + * A connection can have multiple error handlers at the same time. When the + * database connection emits an error or warning message, it passes the message + * to each error handler, starting with the most recently registered one and + * progressing towards the oldest one. However an error handler may also + * instruct the connection not to pass the message to further handlers by + * returning "false." + * + * @warning Strange things happen when a result object outlives its parent + * connection. If you register an error handler on a connection, then you must + * not access the result after destroying the connection. This applies even if + * you destroy the error handler first! + */ +class PQXX_LIBEXPORT errorhandler +{ +public: + explicit errorhandler(connection &); + virtual ~errorhandler(); + + /// Define in subclass: receive an error or warning message from the + /// database. + /** + * @return Whether the same error message should also be passed to the + * remaining, older errorhandlers. + */ + virtual bool operator()(char const msg[]) noexcept = 0; + + errorhandler() = delete; + errorhandler(errorhandler const &) = delete; + errorhandler &operator=(errorhandler const &) = delete; + +private: + connection *m_home; + + friend class internal::gate::errorhandler_connection; + void unregister() noexcept; +}; + + +/// An error handler that suppresses any previously registered error handlers. +class quiet_errorhandler : public errorhandler +{ +public: + /// Suppress error notices. + quiet_errorhandler(connection &conn) : errorhandler{conn} {} + + /// Revert to previous handling of error notices. + virtual bool operator()(char const[]) noexcept override { return false; } +}; + +//@} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/except b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/except new file mode 100644 index 000000000..e5dd508bf --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/except @@ -0,0 +1,8 @@ +/** libpqxx exception classes. + * + * pqxx::sql_error, pqxx::broken_connection, pqxx::in_doubt_error, ... + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/except.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/except.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/except.hxx new file mode 100644 index 000000000..24f959437 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/except.hxx @@ -0,0 +1,447 @@ +/* Definition of libpqxx exception classes. + * + * pqxx::sql_error, pqxx::broken_connection, pqxx::in_doubt_error, ... + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/except instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_EXCEPT +#define PQXX_H_EXCEPT + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + + +namespace pqxx +{ +/** + * @addtogroup exception Exception classes + * + * These exception classes follow, roughly, the two-level hierarchy defined by + * the PostgreSQL SQLSTATE error codes (see Appendix A of the PostgreSQL + * documentation corresponding to your server version). This is not a complete + * mapping though. There are other differences as well, e.g. the error code + * for `statement_completion_unknown` has a separate status in libpqxx as + * @ref in_doubt_error, and `too_many_connections` is classified as a + * `broken_connection` rather than a subtype of `insufficient_resources`. + * + * @see http://www.postgresql.org/docs/9.4/interactive/errcodes-appendix.html + * + * @{ + */ + +/// Run-time failure encountered by libpqxx, similar to std::runtime_error. +struct PQXX_LIBEXPORT failure : std::runtime_error +{ + explicit failure(std::string const &); +}; + + +/// Exception class for lost or failed backend connection. +/** + * @warning When this happens on Unix-like systems, you may also get a SIGPIPE + * signal. That signal aborts the program by default, so if you wish to be + * able to continue after a connection breaks, be sure to disarm this signal. + * + * If you're working on a Unix-like system, see the manual page for + * `signal` (2) on how to deal with SIGPIPE. The easiest way to make this + * signal harmless is to make your program ignore it: + * + * ```cxx + * #include + * + * int main() + * { + * signal(SIGPIPE, SIG_IGN); + * // ... + * ``` + */ +struct PQXX_LIBEXPORT broken_connection : failure +{ + broken_connection(); + explicit broken_connection(std::string const &); +}; + + +/// The caller attempted to set a variable to null, which is not allowed. +struct PQXX_LIBEXPORT variable_set_to_null : failure +{ + variable_set_to_null(); + explicit variable_set_to_null(std::string const &); +}; + + +/// Exception class for failed queries. +/** Carries, in addition to a regular error message, a copy of the failed query + * and (if available) the SQLSTATE value accompanying the error. + */ +class PQXX_LIBEXPORT sql_error : public failure +{ + /// Query string. Empty if unknown. + std::string const m_query; + /// SQLSTATE string describing the error type, if known; or empty string. + std::string const m_sqlstate; + +public: + explicit sql_error( + std::string const &whatarg = "", std::string const &Q = "", + char const sqlstate[] = nullptr); + virtual ~sql_error() noexcept override; + + /// The query whose execution triggered the exception + [[nodiscard]] PQXX_PURE std::string const &query() const noexcept; + + /// SQLSTATE error code if known, or empty string otherwise. + [[nodiscard]] PQXX_PURE std::string const &sqlstate() const noexcept; +}; + + +/// "Help, I don't know whether transaction was committed successfully!" +/** Exception that might be thrown in rare cases where the connection to the + * database is lost while finishing a database transaction, and there's no way + * of telling whether it was actually executed by the backend. In this case + * the database is left in an indeterminate (but consistent) state, and only + * manual inspection will tell which is the case. + */ +struct PQXX_LIBEXPORT in_doubt_error : failure +{ + explicit in_doubt_error(std::string const &); +}; + + +/// The backend saw itself forced to roll back the ongoing transaction. +struct PQXX_LIBEXPORT transaction_rollback : sql_error +{ + explicit transaction_rollback( + std::string const &whatarg, std::string const &q = "", + char const sqlstate[] = nullptr); +}; + + +/// Transaction failed to serialize. Please retry it. +/** Can only happen at transaction isolation levels REPEATABLE READ and + * SERIALIZABLE. + * + * The current transaction cannot be committed without violating the guarantees + * made by its isolation level. This is the effect of a conflict with another + * ongoing transaction. The transaction may still succeed if you try to + * perform it again. + */ +struct PQXX_LIBEXPORT serialization_failure : transaction_rollback +{ + explicit serialization_failure( + std::string const &whatarg, std::string const &q, + char const sqlstate[] = nullptr); +}; + + +/// We can't tell whether our last statement succeeded. +struct PQXX_LIBEXPORT statement_completion_unknown : transaction_rollback +{ + explicit statement_completion_unknown( + std::string const &whatarg, std::string const &q, + char const sqlstate[] = nullptr); +}; + + +/// The ongoing transaction has deadlocked. Retrying it may help. +struct PQXX_LIBEXPORT deadlock_detected : transaction_rollback +{ + explicit deadlock_detected( + std::string const &whatarg, std::string const &q, + char const sqlstate[] = nullptr); +}; + + +/// Internal error in libpqxx library +struct PQXX_LIBEXPORT internal_error : std::logic_error +{ + explicit internal_error(std::string const &); +}; + + +/// Error in usage of libpqxx library, similar to std::logic_error +struct PQXX_LIBEXPORT usage_error : std::logic_error +{ + explicit usage_error(std::string const &); +}; + + +/// Invalid argument passed to libpqxx, similar to std::invalid_argument +struct PQXX_LIBEXPORT argument_error : std::invalid_argument +{ + explicit argument_error(std::string const &); +}; + + +/// Value conversion failed, e.g. when converting "Hello" to int. +struct PQXX_LIBEXPORT conversion_error : std::domain_error +{ + explicit conversion_error(std::string const &); +}; + + +/// Could not convert value to string: not enough buffer space. +struct PQXX_LIBEXPORT conversion_overrun : conversion_error +{ + explicit conversion_overrun(std::string const &); +}; + + +/// Something is out of range, similar to std::out_of_range +struct PQXX_LIBEXPORT range_error : std::out_of_range +{ + explicit range_error(std::string const &); +}; + + +/// Query returned an unexpected number of rows. +struct PQXX_LIBEXPORT unexpected_rows : public range_error +{ + explicit unexpected_rows(std::string const &msg) : range_error{msg} {} +}; + + +/// Database feature not supported in current setup. +struct PQXX_LIBEXPORT feature_not_supported : sql_error +{ + explicit feature_not_supported( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +/// Error in data provided to SQL statement. +struct PQXX_LIBEXPORT data_exception : sql_error +{ + explicit data_exception( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT integrity_constraint_violation : sql_error +{ + explicit integrity_constraint_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT restrict_violation : integrity_constraint_violation +{ + explicit restrict_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT not_null_violation : integrity_constraint_violation +{ + explicit not_null_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT foreign_key_violation : integrity_constraint_violation +{ + explicit foreign_key_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT unique_violation : integrity_constraint_violation +{ + explicit unique_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT check_violation : integrity_constraint_violation +{ + explicit check_violation( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + integrity_constraint_violation{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT invalid_cursor_state : sql_error +{ + explicit invalid_cursor_state( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT invalid_sql_statement_name : sql_error +{ + explicit invalid_sql_statement_name( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT invalid_cursor_name : sql_error +{ + explicit invalid_cursor_name( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT syntax_error : sql_error +{ + /// Approximate position in string where error occurred, or -1 if unknown. + int const error_position; + + explicit syntax_error( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr, int pos = -1) : + sql_error{err, Q, sqlstate}, error_position{pos} + {} +}; + +struct PQXX_LIBEXPORT undefined_column : syntax_error +{ + explicit undefined_column( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + syntax_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT undefined_function : syntax_error +{ + explicit undefined_function( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + syntax_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT undefined_table : syntax_error +{ + explicit undefined_table( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + syntax_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT insufficient_privilege : sql_error +{ + explicit insufficient_privilege( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +/// Resource shortage on the server +struct PQXX_LIBEXPORT insufficient_resources : sql_error +{ + explicit insufficient_resources( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT disk_full : insufficient_resources +{ + explicit disk_full( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + insufficient_resources{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT out_of_memory : insufficient_resources +{ + explicit out_of_memory( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + insufficient_resources{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT too_many_connections : broken_connection +{ + explicit too_many_connections(std::string const &err) : + broken_connection{err} + {} +}; + +/// PL/pgSQL error +/** Exceptions derived from this class are errors from PL/pgSQL procedures. + */ +struct PQXX_LIBEXPORT plpgsql_error : sql_error +{ + explicit plpgsql_error( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + sql_error{err, Q, sqlstate} + {} +}; + +/// Exception raised in PL/pgSQL procedure +struct PQXX_LIBEXPORT plpgsql_raise : plpgsql_error +{ + explicit plpgsql_raise( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + plpgsql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT plpgsql_no_data_found : plpgsql_error +{ + explicit plpgsql_no_data_found( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + plpgsql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT plpgsql_too_many_rows : plpgsql_error +{ + explicit plpgsql_too_many_rows( + std::string const &err, std::string const &Q = "", + char const sqlstate[] = nullptr) : + plpgsql_error{err, Q, sqlstate} + {} +}; + +struct PQXX_LIBEXPORT blob_already_exists : failure +{ + explicit blob_already_exists(std::string const &); +}; + +/** + * @} + */ +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/field b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/field new file mode 100644 index 000000000..37cb69e84 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/field @@ -0,0 +1,8 @@ +/** pqxx::field class. + * + * pqxx::field refers to a field in a query result. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/field.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/field.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/field.hxx new file mode 100644 index 000000000..b8b869fe4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/field.hxx @@ -0,0 +1,542 @@ +/* Definitions for the pqxx::field class. + * + * pqxx::field refers to a field in a query result. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/field instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_FIELD +#define PQXX_H_FIELD + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/array.hxx" +#include "pqxx/composite.hxx" +#include "pqxx/result.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/types.hxx" + +namespace pqxx +{ +/// Reference to a field in a result set. +/** A field represents one entry in a row. It represents an actual value + * in the result set, and can be converted to various types. + */ +class PQXX_LIBEXPORT field +{ +public: + using size_type = field_size_type; + + /// Constructor. Do not call this yourself; libpqxx will do it for you. + /** Create field as reference to a field in a result set. + * @param r Row that this field is part of. + * @param c Column number of this field. + */ + [[deprecated( + "Do not construct fields yourself. Get them from the row.")]] field(row const &r, row_size_type c) noexcept; + + /// Constructor. Do not call this yourself; libpqxx will do it for you. + [[deprecated( + "Do not construct fields yourself. Get them from the " + "row.")]] field() noexcept = default; + + /** + * @name Comparison + */ + //@{ + // TODO: noexcept. Breaks ABI. + /// Byte-by-byte comparison of two fields (all nulls are considered equal) + /** @warning null handling is still open to discussion and change! + * + * Handling of null values differs from that in SQL where a comparison + * involving a null value yields null, so nulls are never considered equal + * to one another or even to themselves. + * + * Null handling also probably differs from the closest equivalent in C++, + * which is the NaN (Not-a-Number) value, a singularity comparable to + * SQL's null. This is because the builtin == operator demands that a == a. + * + * The usefulness of this operator is questionable. No interpretation + * whatsoever is imposed on the data; 0 and 0.0 are considered different, + * as are null vs. the empty string, or even different (but possibly + * equivalent and equally valid) encodings of the same Unicode character + * etc. + */ + [[nodiscard]] PQXX_PURE bool operator==(field const &) const; + + /// Byte-by-byte comparison (all nulls are considered equal) + /** @warning See operator==() for important information about this operator + */ + [[nodiscard]] PQXX_PURE bool operator!=(field const &rhs) const noexcept + { + return not operator==(rhs); + } + //@} + + /** + * @name Column information + */ + //@{ + /// Column name. + [[nodiscard]] PQXX_PURE char const *name() const &; + + /// Column type. + [[nodiscard]] oid PQXX_PURE type() const; + + /// What table did this column come from? + [[nodiscard]] PQXX_PURE oid table() const; + + /// Return row number. The first row is row 0, the second is row 1, etc. + PQXX_PURE constexpr row_size_type num() const noexcept { return col(); } + + /// What column number in its originating table did this column come from? + [[nodiscard]] PQXX_PURE row_size_type table_column() const; + //@} + + /** + * @name Content access + */ + //@{ + /// Read as `string_view`, or an empty one if null. + /** The result only remains usable while the data for the underlying + * @ref result exists. Once all `result` objects referring to that data have + * been destroyed, the `string_view` will no longer point to valid memory. + */ + [[nodiscard]] PQXX_PURE std::string_view view() const & + { + return std::string_view(c_str(), size()); + } + + /// Read as plain C string. + /** Since the field's data is stored internally in the form of a + * zero-terminated C string, this is the fastest way to read it. Use the + * to() or as() functions to convert the string to other types such as + * `int`, or to C++ strings. + * + * Do not use this for BYTEA values, or other binary values. To read those, + * convert the value to your desired type using `to()` or `as()`. For + * example: `f.as>()`. + */ + [[nodiscard]] PQXX_PURE char const *c_str() const &; + + /// Is this field's value null? + [[nodiscard]] PQXX_PURE bool is_null() const noexcept; + + /// Return number of bytes taken up by the field's value. + [[nodiscard]] PQXX_PURE size_type size() const noexcept; + + /// Read value into obj; or if null, leave obj untouched and return `false`. + /** This can be used with optional types (except pointers other than C-style + * strings). + */ + template + auto to(T &obj) const -> typename std::enable_if_t< + (not std::is_pointer::value or std::is_same::value), + bool> + { + if (is_null()) + { + return false; + } + else + { + auto const bytes{c_str()}; + from_string(bytes, obj); + return true; + } + } + + /// Read field as a composite value, write its components into `fields`. + /** @warning This is still experimental. It may change or be replaced. + * + * Returns whether the field was null. If it was, it will not touch the + * values in `fields`. + */ + template bool composite_to(T &...fields) const + { + if (is_null()) + { + return false; + } + else + { + parse_composite(m_home.m_encoding, view(), fields...); + return true; + } + } + + /// Read value into obj; or leave obj untouched and return `false` if null. + template bool operator>>(T &obj) const { return to(obj); } + + /// Read value into obj; or if null, use default value and return `false`. + /** This can be used with `std::optional`, as well as with standard smart + * pointer types, but not with raw pointers. If the conversion from a + * PostgreSQL string representation allocates a pointer (e.g. using `new`), + * then the object's later deallocation should be baked in as well, right + * from the point where the object is created. So if you want a pointer, use + * a smart pointer, not a raw pointer. + * + * There is one exception, of course: C-style strings. Those are just + * pointers to the field's internal text data. + */ + template + auto to(T &obj, T const &default_value) const -> typename std::enable_if_t< + (not std::is_pointer::value or std::is_same::value), + bool> + { + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = from_string(this->view()); + return not null; + } + + /// Return value as object of given type, or default value if null. + /** Note that unless the function is instantiated with an explicit template + * argument, the Default value's type also determines the result type. + */ + template T as(T const &default_value) const + { + if (is_null()) + return default_value; + else + return from_string(this->view()); + } + + /// Return value as object of given type, or throw exception if null. + /** Use as `as>()` or `as()` as + * an alternative to `get()`; this is disabled for use with raw pointers + * (other than C-strings) because storage for the value can't safely be + * allocated here + */ + template T as() const + { + if (is_null()) + { + if constexpr (not nullness::has_null) + internal::throw_null_conversion(type_name); + else + return nullness::null(); + } + else + { + return from_string(this->view()); + } + } + + /// Return value wrapped in some optional type (empty for nulls). + /** Use as `get()` as before to obtain previous behavior, or specify + * container type with `get()` + */ + template class O = std::optional> + constexpr O get() const + { + return as>(); + } + + // TODO: constexpr noexcept, once array_parser constructor gets those. + /// Parse the field as an SQL array. + /** Call the parser to retrieve values (and structure) from the array. + * + * Make sure the @ref result object stays alive until parsing is finished. If + * you keep the @ref row of `field` object alive, it will keep the @ref + * result object alive as well. + */ + array_parser as_array() const & + { + return array_parser{c_str(), m_home.m_encoding}; + } + //@} + + +protected: + constexpr result const &home() const noexcept { return m_home; } + constexpr result::size_type idx() const noexcept { return m_row; } + constexpr row_size_type col() const noexcept { return m_col; } + + // TODO: Create gates. + friend class pqxx::result; + friend class pqxx::row; + field( + result const &r, result_size_type row_num, row_size_type col_num) noexcept + : + m_col{col_num}, m_home{r}, m_row{row_num} + {} + + /** + * You'd expect this to be unsigned, but due to the way reverse iterators + * are related to regular iterators, it must be allowed to underflow to -1. + */ + row_size_type m_col; + +private: + result m_home; + result::size_type m_row; +}; + + +template<> inline bool field::to(std::string &obj) const +{ + bool const null{is_null()}; + if (not null) + obj = std::string{view()}; + return not null; +} + + +template<> +inline bool field::to( + std::string &obj, std::string const &default_value) const +{ + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = std::string{view()}; + return not null; +} + + +/// Specialization: `to(char const *&)`. +/** The buffer has the same lifetime as the data in this result (i.e. of this + * result object, or the last remaining one copied from it etc.), so take care + * not to use it after the last result object referring to this query result is + * destroyed. + */ +template<> inline bool field::to(char const *&obj) const +{ + bool const null{is_null()}; + if (not null) + obj = c_str(); + return not null; +} + + +template<> inline bool field::to(std::string_view &obj) const +{ + bool const null{is_null()}; + if (not null) + obj = view(); + return not null; +} + + +template<> +inline bool field::to( + std::string_view &obj, std::string_view const &default_value) const +{ + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = view(); + return not null; +} + + +template<> inline std::string_view field::as() const +{ + if (is_null()) + PQXX_UNLIKELY + internal::throw_null_conversion(type_name); + return view(); +} + + +template<> +inline std::string_view +field::as(std::string_view const &default_value) const +{ + return is_null() ? default_value : view(); +} + + +template<> inline bool field::to(zview &obj) const +{ + bool const null{is_null()}; + if (not null) + obj = zview{c_str(), size()}; + return not null; +} + + +template<> +inline bool field::to(zview &obj, zview const &default_value) const +{ + bool const null{is_null()}; + if (null) + obj = default_value; + else + obj = zview{c_str(), size()}; + return not null; +} + + +template<> inline zview field::as() const +{ + if (is_null()) + PQXX_UNLIKELY + internal::throw_null_conversion(type_name); + return zview{c_str(), size()}; +} + + +template<> inline zview field::as(zview const &default_value) const +{ + return is_null() ? default_value : zview{c_str(), size()}; +} + + +template> +class field_streambuf : public std::basic_streambuf +{ +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + using openmode = std::ios::openmode; + using seekdir = std::ios::seekdir; + + explicit field_streambuf(field const &f) : m_field{f} { initialize(); } + +protected: + virtual int sync() override { return traits_type::eof(); } + + virtual pos_type seekoff(off_type, seekdir, openmode) override + { + return traits_type::eof(); + } + virtual pos_type seekpos(pos_type, openmode) override + { + return traits_type::eof(); + } + virtual int_type overflow(int_type) override { return traits_type::eof(); } + virtual int_type underflow() override { return traits_type::eof(); } + +private: + field const &m_field; + + int_type initialize() + { + auto g{static_cast(const_cast(m_field.c_str()))}; + this->setg(g, g, g + std::size(m_field)); + return int_type(std::size(m_field)); + } +}; + + +/// Input stream that gets its data from a result field +/** Use this class exactly as you would any other istream to read data from a + * field. All formatting and streaming operations of `std::istream` are + * supported. What you'll typically want to use, however, is the fieldstream + * alias (which defines a @ref basic_fieldstream for `char`). This is similar + * to how e.g. `std::ifstream` relates to `std::basic_ifstream`. + * + * This class has only been tested for the char type (and its default traits). + */ +template> +class basic_fieldstream : public std::basic_istream +{ + using super = std::basic_istream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + + basic_fieldstream(field const &f) : super{nullptr}, m_buf{f} + { + super::init(&m_buf); + } + +private: + field_streambuf m_buf; +}; + +using fieldstream = basic_fieldstream; + +/// Write a result field to any type of stream +/** This can be convenient when writing a field to an output stream. More + * importantly, it lets you write a field to e.g. a `stringstream` which you + * can then use to read, format and convert the field in ways that to() does + * not support. + * + * Example: parse a field into a variable of the nonstandard + * "long long" type. + * + * ```cxx + * extern result R; + * long long L; + * stringstream S; + * + * // Write field's string into S + * S << R[0][0]; + * + * // Parse contents of S into L + * S >> L; + * ``` + */ +template +inline std::basic_ostream & +operator<<(std::basic_ostream &s, field const &value) +{ + s.write(value.c_str(), std::streamsize(std::size(value))); + return s; +} + + +/// Convert a field's value to type `T`. +/** Unlike the "regular" `from_string`, this knows how to deal with null + * values. + */ +template inline T from_string(field const &value) +{ + if (value.is_null()) + { + if constexpr (nullness::has_null) + return nullness::null(); + else + internal::throw_null_conversion(type_name); + } + else + { + return from_string(value.view()); + } +} + + +/// Convert a field's value to `nullptr_t`. +/** Yes, you read that right. This conversion does nothing useful. It always + * returns `nullptr`. + * + * Except... what if the field is not null? In that case, this throws + * @ref conversion_error. + */ +template<> +inline std::nullptr_t from_string(field const &value) +{ + if (not value.is_null()) + throw conversion_error{ + "Extracting non-null field into nullptr_t variable."}; + return nullptr; +} + + +/// Convert a field to a string. +template<> PQXX_LIBEXPORT std::string to_string(field const &value); +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/array-composite.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/array-composite.hxx new file mode 100644 index 000000000..d2b6603e5 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/array-composite.hxx @@ -0,0 +1,305 @@ +#if !defined(PQXX_ARRAY_COMPOSITE_HXX) +# define PQXX_ARRAY_COMPOSITE_HXX + +# include + +# include "pqxx/strconv.hxx" + +namespace pqxx::internal +{ +// Find the end of a double-quoted string. +/** `input[pos]` must be the opening double quote. + * + * Returns the offset of the first position after the closing quote. + */ +inline std::size_t scan_double_quoted_string( + char const input[], std::size_t size, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + // XXX: find_char<'"', '\\'>(). + auto next{scan(input, size, pos)}; + bool at_quote{false}; + for (pos = next, next = scan(input, size, pos); pos < size; + pos = next, next = scan(input, size, pos)) + { + if (at_quote) + { + if (next - pos == 1 and input[pos] == '"') + { + // We just read a pair of double quotes. Carry on. + at_quote = false; + } + else + { + // We just read one double quote, and now we're at a character that's + // not a second double quote. Ergo, that last character was the + // closing double quote and this is the position right after it. + return pos; + } + } + else if (next - pos == 1) + { + switch (input[pos]) + { + case '\\': + // Backslash escape. Skip ahead by one more character. + pos = next; + next = scan(input, size, pos); + break; + + case '"': + // This is either the closing double quote, or the first of a pair of + // double quotes. + at_quote = true; + break; + } + } + else + { + // Multibyte character. Carry on. + } + } + if (not at_quote) + throw argument_error{ + "Missing closing double-quote: " + std::string{input}}; + return pos; +} + + +/// Un-quote and un-escape a double-quoted SQL string. +inline std::string parse_double_quoted_string( + char const input[], std::size_t end, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + std::string output; + // Maximum output size is same as the input size, minus the opening and + // closing quotes. Or in the extreme opposite case, the real number could be + // half that. Usually it'll be a pretty close estimate. + output.reserve(std::size_t(end - pos - 2)); + + for (auto here{scan(input, end, pos)}, next{scan(input, end, here)}; + here < end - 1; here = next, next = scan(input, end, here)) + { + // A backslash here is always an escape. So is a double-quote, since we're + // inside the double-quoted string. In either case, we can just ignore the + // escape character and use the next character. This is the one redeeming + // feature of SQL's escaping system. + if ((next - here == 1) and (input[here] == '\\' or input[here] == '"')) + { + // Skip escape. + here = next; + next = scan(input, end, here); + } + output.append(input + here, input + next); + } + return output; +} + + +/// Find the end of an unquoted string in an array or composite-type value. +/** Stops when it gets to the end of the input; or when it sees any of the + * characters in STOP which has not been escaped. + * + * For array values, STOP is a comma, a semicolon, or a closing brace. For + * a value of a composite type, STOP is a comma or a closing parenthesis. + */ +template +inline std::size_t scan_unquoted_string( + char const input[], std::size_t size, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + bool at_backslash{false}; + auto next{scan(input, size, pos)}; + while ((pos < size) and + ((next - pos) > 1 or at_backslash or ((input[pos] != STOP) and ...))) + { + pos = next; + next = scan(input, size, pos); + at_backslash = + ((not at_backslash) and ((next - pos) == 1) and (input[pos] == '\\')); + } + return pos; +} + + +/// Parse an unquoted array entry or cfield of a composite-type field. +inline std::string parse_unquoted_string( + char const input[], std::size_t end, std::size_t pos, + pqxx::internal::glyph_scanner_func *scan) +{ + std::string output; + bool at_backslash{false}; + output.reserve(end - pos); + for (auto next{scan(input, end, pos)}; pos < end; + pos = next, next = scan(input, end, pos)) + { + at_backslash = + ((not at_backslash) and ((next - pos) == 1) and (input[pos] == '\\')); + if (not at_backslash) + output.append(input + pos, next - pos); + } + return output; +} + + +/// Parse a field of a composite-type value. +/** `T` is the C++ type of the field we're parsing, and `index` is its + * zero-based number. + * + * Strip off the leading parenthesis or bracket yourself before parsing. + * However, this function will parse the lcosing parenthesis or bracket. + * + * After a successful parse, `pos` will point at `std::end(text)`. + * + * For the purposes of parsing, ranges and arrays count as compositve values, + * so this function supports parsing those. If you specifically need a closing + * parenthesis, check afterwards that `text` did not end in a bracket instead. + * + * @param index Index of the current field, zero-based. It will increment for + * the next field. + * @param input Full input text for the entire composite-type value. + * @param pos Starting position (in `input`) of the field that we're parsing. + * After parsing, this will point at the beginning of the next field if + * there is one, or one position past the last character otherwise. + * @param field Destination for the parsed value. + * @param scan Glyph scanning function for the relevant encoding type. + * @param last_field Number of the last field in the value (zero-based). When + * parsing the last field, this will equal `index`. + */ +template +inline void parse_composite_field( + std::size_t &index, std::string_view input, std::size_t &pos, T &field, + glyph_scanner_func *scan, std::size_t last_field) +{ + assert(index <= last_field); + auto next{scan(std::data(input), std::size(input), pos)}; + if ((next - pos) != 1) + throw conversion_error{"Non-ASCII character in composite-type syntax."}; + + // Expect a field. + switch (input[pos]) + { + case ',': + case ')': + case ']': + // The field is empty, i.e, null. + if constexpr (nullness::has_null) + field = nullness::null(); + else + throw conversion_error{ + "Can't read composite field " + to_string(index) + ": C++ type " + + type_name + " does not support nulls."}; + break; + + case '"': { + auto const stop{scan_double_quoted_string( + std::data(input), std::size(input), pos, scan)}; + auto const text{ + parse_double_quoted_string(std::data(input), stop, pos, scan)}; + field = from_string(text); + pos = stop; + } + break; + + default: { + auto const stop{scan_unquoted_string<',', ')', ']'>( + std::data(input), std::size(input), pos, scan)}; + auto const text{parse_unquoted_string(std::data(input), stop, pos, scan)}; + field = from_string(text); + pos = stop; + } + break; + } + + // Expect a comma or a closing parenthesis. + next = scan(std::data(input), std::size(input), pos); + + if ((next - pos) != 1) + throw conversion_error{ + "Unexpected non-ASCII character after composite field: " + + std::string{input}}; + + if (index < last_field) + { + if (input[pos] != ',') + throw conversion_error{ + "Found '" + std::string{input[pos]} + + "' in composite value where comma was expected: " + std::data(input)}; + } + else + { + if (input[pos] == ',') + throw conversion_error{ + "Composite value contained more fields than the expected " + + to_string(last_field) + ": " + std::data(input)}; + if (input[pos] != ')' and input[pos] != ']') + throw conversion_error{ + "Composite value has unexpected characters where closing parenthesis " + "was expected: " + + std::string{input}}; + if (next != std::size(input)) + throw conversion_error{ + "Composite value has unexpected text after closing parenthesis: " + + std::string{input}}; + } + + pos = next; + ++index; +} + + +/// Conservatively estimate buffer size needed for a composite field. +template +inline std::size_t size_composite_field_buffer(T const &field) +{ + if constexpr (is_unquoted_safe) + { + // Safe to copy, without quotes or escaping. Drop the terminating zero. + return size_buffer(field) - 1; + } + else + { + // + Opening quote. + // + Field budget. + // - Terminating zero. + // + Escaping for each byte in the field's string representation. + // - Escaping for terminating zero. + // + Closing quote. + return 1 + 2 * (size_buffer(field) - 1) + 1; + } +} + + +template +inline void write_composite_field(char *&pos, char *end, T const &field) +{ + if constexpr (is_unquoted_safe) + { + // No need for quoting or escaping. Convert it straight into its final + // place in the buffer, and "backspace" the trailing zero. + pos = string_traits::into_buf(pos, end, field) - 1; + } + else + { + // The field may need escaping, which means we need an intermediate buffer. + // To avoid allocating that at run time, we use the end of the buffer that + // we have. + auto const budget{size_buffer(field)}; + *pos++ = '"'; + + // Now escape buf into its final position. + for (char const c : string_traits::to_buf(end - budget, end, field)) + { + if ((c == '"') or (c == '\\')) + *pos++ = '\\'; + + *pos++ = c; + } + + *pos++ = '"'; + } + + *pos++ = ','; +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/callgate.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/callgate.hxx new file mode 100644 index 000000000..42f7703e3 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/callgate.hxx @@ -0,0 +1,70 @@ +#ifndef PQXX_H_CALLGATE +#define PQXX_H_CALLGATE + +/* +Here's what a typical gate class definition looks like: + +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE @gateclass@ : callgate<@host@> +{ + friend class @client@; + + @gateclass@(reference x) : super(x) {} + + // Methods here. Use home() to access the host-class object. +}; +} // namespace pqxx::internal::gate +*/ + +namespace pqxx::internal +{ +/// Base class for call gates. +/** + * A call gate defines a limited, private interface on the host class that + * specified client classes can access. + * + * The metaphor works as follows: the gate stands in front of a "home," which + * is really a class, and only lets specific friends in. + * + * To implement a call gate that gives client C access to host H, + * * derive a gate class from callgate; + * * make the gate class a friend of H; + * * make C a friend of the gate class; and + * * implement "stuff C can do with H" as private members in the gate class. + * + * This special kind of "gated" friendship gives C private access to H, but + * only through an expressly limited interface. The gate class can access its + * host object as home(). + * + * Keep gate classes entirely stateless. They should be ultra-lightweight + * wrappers for their host classes, and be optimized away as much as possible + * by the compiler. Once you start adding state, you're on a slippery slope + * away from the pure, clean, limited interface pattern that gate classes are + * meant to implement. + * + * Ideally, all member functions of the gate class should be one-liners passing + * calls straight on to the host class. It can be useful however to break this + * rule temporarily during inter-class refactoring. + */ +template class PQXX_PRIVATE callgate +{ +protected: + /// This class, to keep constructors easy. + using super = callgate; + /// A reference to the host class. Helps keep constructors easy. + using reference = HOME &; + + callgate(reference x) : m_home(x) {} + + /// The home object. The gate class has full "private" access. + reference home() const noexcept { return m_home; } + +private: + reference m_home; +}; +} // namespace pqxx::internal + +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/concat.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/concat.hxx new file mode 100644 index 000000000..cd28bde7c --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/concat.hxx @@ -0,0 +1,45 @@ +#if !defined(PQXX_CONCAT_HXX) +# define PQXX_CONCAT_HXX + +# include +# include + +# include "pqxx/strconv.hxx" + +namespace pqxx::internal +{ +/// Convert item to a string, write it into [here, end). +template +void render_item(TYPE const &item, char *&here, char *end) +{ + here = string_traits::into_buf(here, end, item) - 1; +} + + +// C++20: Support non-random_access_range ranges. +/// Efficiently combine a bunch of items into one big string. +/** Use this as an optimised version of string concatentation. It takes just + * about any type; it will represent each item as a string according to its + * @ref string_traits. + * + * This is a simpler, more specialised version of @ref separated_list for a + * statically known series of items, possibly of different types. + */ +template +[[nodiscard]] inline std::string concat(TYPE... item) +{ + std::string buf; + // Size to accommodate string representations of all inputs, minus their + // terminating zero bytes. + buf.resize(size_buffer(item...)); + + char *const data{buf.data()}; + char *here = data; + char *end = data + std::size(buf); + (render_item(item, here, end), ...); + + buf.resize(static_cast(here - data)); + return buf; +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/conversions.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/conversions.hxx new file mode 100644 index 000000000..1df4fdead --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/conversions.hxx @@ -0,0 +1,1188 @@ +#include +#include +#include +#include +#include +#include + +#if defined(PQXX_HAVE_SPAN) && __has_include() +# include +#endif + +#include +#include +#include + +#include "pqxx/types.hxx" +#include "pqxx/util.hxx" + + +/* Internal helpers for string conversion, and conversion implementations. + * + * Do not include this header directly. The libpqxx headers do it for you. + */ +namespace pqxx::internal +{ +/// Convert a number in [0, 9] to its ASCII digit. +inline constexpr char number_to_digit(int i) noexcept +{ + return static_cast(i + '0'); +} + + +/// Compute numeric value of given textual digit (assuming that it is a digit). +constexpr int digit_to_number(char c) noexcept +{ + return c - '0'; +} + + +/// Summarize buffer overrun. +/** Don't worry about the exact parameter types: the sizes will be reasonably + * small, and nonnegative. + */ +std::string PQXX_LIBEXPORT +state_buffer_overrun(int have_bytes, int need_bytes); + + +template +inline std::string state_buffer_overrun(HAVE have_bytes, NEED need_bytes) +{ + return state_buffer_overrun( + static_cast(have_bytes), static_cast(need_bytes)); +} + + +/// Throw exception for attempt to convert null to given type. +[[noreturn]] PQXX_LIBEXPORT void +throw_null_conversion(std::string const &type); + + +/// Deliberately nonfunctional conversion traits for `char` types. +/** There are no string conversions for `char` and its signed and unsigned + * variants. Such a conversion would be dangerously ambiguous: should we treat + * it as text, or as a small integer? It'd be an open invitation for bugs. + * + * But the error message when you get this wrong is very cryptic. So, we + * derive dummy @ref string_traits implementations from this dummy type, and + * ensure that the compiler disallows their use. The compiler error message + * will at least contain a hint of the root of the problem. + */ +template struct disallowed_ambiguous_char_conversion +{ + static char *into_buf(char *, char *, CHAR_TYPE) = delete; + static constexpr zview + to_buf(char *, char *, CHAR_TYPE const &) noexcept = delete; + + static constexpr std::size_t + size_buffer(CHAR_TYPE const &) noexcept = delete; + static CHAR_TYPE from_string(std::string_view) = delete; +}; + + +template PQXX_LIBEXPORT extern std::string to_string_float(T); + + +/// Generic implementation for into_buf, on top of to_buf. +template +inline char *generic_into_buf(char *begin, char *end, T const &value) +{ + zview const text{string_traits::to_buf(begin, end, value)}; + auto const space{end - begin}; + // Include the trailing zero. + auto const len = std::size(text) + 1; + if (internal::cmp_greater(len, space)) + throw conversion_overrun{ + "Not enough buffer space to insert " + type_name + ". " + + state_buffer_overrun(space, len)}; + std::memmove(begin, text.data(), len); + return begin + len; +} + + +/// String traits for builtin integral types (though not bool). +template struct integral_traits +{ + static PQXX_LIBEXPORT T from_string(std::string_view text); + static PQXX_LIBEXPORT zview to_buf(char *begin, char *end, T const &value); + static PQXX_LIBEXPORT char *into_buf(char *begin, char *end, T const &value); + + static constexpr std::size_t size_buffer(T const &) noexcept + { + /** Includes a sign if needed; the number of base-10 digits which the type + * can reliably represent; the one extra base-10 digit which the type can + * only partially represent; and the terminating zero. + */ + return std::is_signed_v + std::numeric_limits::digits10 + 1 + 1; + } +}; + + +/// String traits for builtin floating-point types. +template struct float_traits +{ + static PQXX_LIBEXPORT T from_string(std::string_view text); + static PQXX_LIBEXPORT zview to_buf(char *begin, char *end, T const &value); + static PQXX_LIBEXPORT char *into_buf(char *begin, char *end, T const &value); + + // Return a nonnegative integral value's number of decimal digits. + static constexpr std::size_t digits10(std::size_t value) noexcept + { + if (value < 10) + return 1; + else + return 1 + digits10(value / 10); + } + + static constexpr std::size_t size_buffer(T const &) noexcept + { + using lims = std::numeric_limits; + // See #328 for a detailed discussion on the maximum number of digits. + // + // In a nutshell: for the big cases, the scientific notation is always + // the shortest one, and therefore the one that to_chars will pick. + // + // So... How long can the scientific notation get? 1 (for sign) + 1 (for + // decimal point) + 1 (for 'e') + 1 (for exponent sign) + max_digits10 + + // max number of digits in the exponent + 1 (terminating zero). + // + // What's the max number of digits in the exponent? It's the max number of + // digits out of the most negative exponent and the most positive one. + // + // The longest positive exponent is easy: 1 + ceil(log10(max_exponent10)). + // (The extra 1 is because 10^n takes up 1 + n digits, not n.) + // + // The longest negative exponent is a bit harder: min_exponent10 gives us + // the smallest power of 10 which a normalised version of T can represent. + // But the smallest denormalised power of 10 that T can represent is + // another max_digits10 powers of 10 below that. + // needs a minus sign. + // + // All this stuff messes with my head a bit because it's on the order of + // log10(log10(n)). It's easy to get the number of logs wrong. + auto const max_pos_exp{digits10(lims::max_exponent10)}; + // Really want std::abs(lims::min_exponent10), but MSVC 2017 apparently has + // problems with std::abs. So we use -lims::min_exponent10 instead. + auto const max_neg_exp{ + digits10(lims::max_digits10 - lims::min_exponent10)}; + return 1 + // Sign. + 1 + // Decimal point. + std::numeric_limits::max_digits10 + // Mantissa digits. + 1 + // Exponent "e". + 1 + // Exponent sign. + // Spell this weirdly to stop Windows compilers from reading this as + // a call to their "max" macro when NOMINMAX is not defined. + (std::max)(max_pos_exp, max_neg_exp) + // Exponent digits. + 1; // Terminating zero. + } +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/// The built-in arithmetic types do not have inherent null values. +template +struct nullness>> : no_null +{}; + + +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits + : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits + : internal::integral_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::float_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> struct string_traits : internal::float_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; +template<> +struct string_traits : internal::float_traits +{}; +template<> inline constexpr bool is_unquoted_safe{true}; + + +template<> struct string_traits +{ + static PQXX_LIBEXPORT bool from_string(std::string_view text); + + static constexpr zview to_buf(char *, char *, bool const &value) noexcept + { + return value ? "true"_zv : "false"_zv; + } + + static char *into_buf(char *begin, char *end, bool const &value) + { + return pqxx::internal::generic_into_buf(begin, end, value); + } + + static constexpr std::size_t size_buffer(bool const &) noexcept { return 6; } +}; + + +/// We don't support conversion to/from `char` types. +/** Why are these disallowed? Because they are ambiguous. It's not inherently + * clear whether we should treat values of these types as text or as small + * integers. Either choice would lead to bugs. + */ +template<> +struct string_traits + : internal::disallowed_ambiguous_char_conversion +{}; + +/// We don't support conversion to/from `char` types. +/** Why are these disallowed? Because they are ambiguous. It's not inherently + * clear whether we should treat values of these types as text or as small + * integers. Either choice would lead to bugs. + */ +template<> +struct string_traits + : internal::disallowed_ambiguous_char_conversion +{}; + +/// We don't support conversion to/from `char` types. +/** Why are these disallowed? Because they are ambiguous. It's not inherently + * clear whether we should treat values of these types as text or as small + * integers. Either choice would lead to bugs. + */ +template<> +struct string_traits + : internal::disallowed_ambiguous_char_conversion +{}; + + +template<> inline constexpr bool is_unquoted_safe{true}; + + +template struct nullness> +{ + static constexpr bool has_null = true; + /// Technically, you could have an optional of an always-null type. + static constexpr bool always_null = nullness::always_null; + static constexpr bool is_null(std::optional const &v) noexcept + { + return ((not v.has_value()) or pqxx::is_null(*v)); + } + static constexpr std::optional null() { return {}; } +}; + + +template +inline constexpr format param_format(std::optional const &value) +{ + return param_format(*value); +} + + +template struct string_traits> +{ + static char *into_buf(char *begin, char *end, std::optional const &value) + { + return string_traits::into_buf(begin, end, *value); + } + + static zview to_buf(char *begin, char *end, std::optional const &value) + { + if (value.has_value()) + return string_traits::to_buf(begin, end, *value); + else + return {}; + } + + static std::optional from_string(std::string_view text) + { + return std::optional{ + std::in_place, string_traits::from_string(text)}; + } + + static std::size_t size_buffer(std::optional const &value) noexcept + { + return pqxx::size_buffer(value.value()); + } +}; + + +template +inline constexpr bool is_unquoted_safe>{is_unquoted_safe}; + + +template struct nullness> +{ + static constexpr bool has_null = (nullness::has_null or ...); + static constexpr bool always_null = (nullness::always_null and ...); + static constexpr bool is_null(std::variant const &value) noexcept + { + return std::visit( + [](auto const &i) noexcept { + return nullness>::is_null(i); + }, + value); + } + + // We don't support `null()` for `std::variant`. + /** It would be technically possible to have a `null` in the case where just + * one of the types has a null, but it gets complicated and arbitrary. + */ + static constexpr std::variant null() = delete; +}; + + +template struct string_traits> +{ + static char * + into_buf(char *begin, char *end, std::variant const &value) + { + return std::visit( + [begin, end](auto const &i) { + return string_traits>::into_buf(begin, end, i); + }, + value); + } + static zview to_buf(char *begin, char *end, std::variant const &value) + { + return std::visit( + [begin, end](auto const &i) { + return string_traits>::to_buf(begin, end, i); + }, + value); + } + static std::size_t size_buffer(std::variant const &value) noexcept + { + return std::visit( + [](auto const &i) noexcept { return pqxx::size_buffer(i); }, value); + } + + /** There's no from_string for std::variant. We could have one with a rule + * like "pick the first type which fits the value," but we'd have to look + * into how natural that API feels to users. + */ + static std::variant from_string(std::string_view) = delete; +}; + + +template +inline constexpr format param_format(std::variant const &value) +{ + return std::visit([](auto &v) { return param_format(v); }, value); +} + + +template +inline constexpr bool is_unquoted_safe>{ + (is_unquoted_safe and ...)}; + + +template inline T from_string(std::stringstream const &text) +{ + return from_string(text.str()); +} + + +template<> struct string_traits +{ + static char *into_buf(char *, char *, std::nullptr_t) = delete; + + static constexpr zview + to_buf(char *, char *, std::nullptr_t const &) noexcept + { + return {}; + } + + static constexpr std::size_t size_buffer(std::nullptr_t = nullptr) noexcept + { + return 0; + } + static std::nullptr_t from_string(std::string_view) = delete; +}; + + +template<> struct string_traits +{ + static char *into_buf(char *, char *, std::nullopt_t) = delete; + + static constexpr zview + to_buf(char *, char *, std::nullopt_t const &) noexcept + { + return {}; + } + + static constexpr std::size_t size_buffer(std::nullopt_t) noexcept + { + return 0; + } + static std::nullopt_t from_string(std::string_view) = delete; +}; + + +template<> struct string_traits +{ + static char *into_buf(char *, char *, std::monostate) = delete; + + static constexpr zview + to_buf(char *, char *, std::monostate const &) noexcept + { + return {}; + } + + static constexpr std::size_t size_buffer(std::monostate) noexcept + { + return 0; + } + static std::monostate from_string(std::string_view) = delete; +}; + + +template<> inline constexpr bool is_unquoted_safe{true}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(char const *t) noexcept + { + return t == nullptr; + } + static constexpr char const *null() noexcept { return nullptr; } +}; + + +/// String traits for C-style string ("pointer to char const"). +template<> struct string_traits +{ + static char const *from_string(std::string_view text) { return text.data(); } + + static zview to_buf(char *begin, char *end, char const *const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, char const *const &value) + { + auto const space{end - begin}; + // Count the trailing zero, even though std::strlen() and friends don't. + auto const len{std::strlen(value) + 1}; + if (space < ptrdiff_t(len)) + throw conversion_overrun{ + "Could not copy string: buffer too small. " + + pqxx::internal::state_buffer_overrun(space, len)}; + std::memmove(begin, value, len); + return begin + len; + } + + static std::size_t size_buffer(char const *const &value) noexcept + { + return std::strlen(value) + 1; + } +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(char const *t) noexcept + { + return t == nullptr; + } + static constexpr char const *null() { return nullptr; } +}; + + +/// String traits for non-const C-style string ("pointer to char"). +template<> struct string_traits +{ + static char *into_buf(char *begin, char *end, char *const &value) + { + return string_traits::into_buf(begin, end, value); + } + static zview to_buf(char *begin, char *end, char *const &value) + { + return string_traits::to_buf(begin, end, value); + } + static std::size_t size_buffer(char *const &value) noexcept + { + return string_traits::size_buffer(value); + } + + /// Don't allow conversion to this type since it breaks const-safety. + static char *from_string(std::string_view) = delete; +}; + + +template struct nullness : no_null +{}; + + +/// String traits for C-style string constant ("array of char"). +/** @warning This assumes that every array-of-char is a C-style string literal. + * So, it must include a trailing zero. and it must have static duration. + */ +template struct string_traits +{ + static constexpr zview + to_buf(char *, char *, char const (&value)[N]) noexcept + { + return zview{value, N - 1}; + } + + static char *into_buf(char *begin, char *end, char const (&value)[N]) + { + if (internal::cmp_less(end - begin, size_buffer(value))) + throw conversion_overrun{ + "Could not convert char[] to string: too long for buffer."}; + std::memcpy(begin, value, N); + return begin + N; + } + static constexpr std::size_t size_buffer(char const (&)[N]) noexcept + { + return N; + } + + /// Don't allow conversion to this type. + static void from_string(std::string_view) = delete; +}; + + +template<> struct nullness : no_null +{}; + + +template<> struct string_traits +{ + static std::string from_string(std::string_view text) + { + return std::string{text}; + } + + static char *into_buf(char *begin, char *end, std::string const &value) + { + if (internal::cmp_greater_equal(std::size(value), end - begin)) + throw conversion_overrun{ + "Could not convert string to string: too long for buffer."}; + // Include the trailing zero. + value.copy(begin, std::size(value)); + begin[std::size(value)] = '\0'; + return begin + std::size(value) + 1; + } + + static zview to_buf(char *begin, char *end, std::string const &value) + { + return generic_to_buf(begin, end, value); + } + + static std::size_t size_buffer(std::string const &value) noexcept + { + return std::size(value) + 1; + } +}; + + +/// There's no real null for `std::string_view`. +/** I'm not sure how clear-cut this is: a `string_view` may have a null + * data pointer, which is analogous to a null `char` pointer. + */ +template<> struct nullness : no_null +{}; + + +/// String traits for `string_view`. +template<> struct string_traits +{ + static constexpr std::size_t + size_buffer(std::string_view const &value) noexcept + { + return std::size(value) + 1; + } + + static char *into_buf(char *begin, char *end, std::string_view const &value) + { + if (internal::cmp_greater_equal(std::size(value), end - begin)) + throw conversion_overrun{ + "Could not store string_view: too long for buffer."}; + value.copy(begin, std::size(value)); + begin[std::size(value)] = '\0'; + return begin + std::size(value) + 1; + } + + /// Don't convert to this type; it has nowhere to store its contents. + static std::string_view from_string(std::string_view) = delete; +}; + + +template<> struct nullness : no_null +{}; + + +/// String traits for `zview`. +template<> struct string_traits +{ + static constexpr std::size_t + size_buffer(std::string_view const &value) noexcept + { + return std::size(value) + 1; + } + + static char *into_buf(char *begin, char *end, zview const &value) + { + auto const size{std::size(value)}; + if (internal::cmp_less_equal(end - begin, std::size(value))) + throw conversion_overrun{"Not enough buffer space to store this zview."}; + value.copy(begin, size); + begin[size] = '\0'; + return begin + size + 1; + } + + static std::string_view to_buf(char *begin, char *end, zview const &value) + { + return {into_buf(begin, end, value), std::size(value)}; + } + + /// Don't convert to this type; it has nowhere to store its contents. + static zview from_string(std::string_view) = delete; +}; + + +template<> struct nullness : no_null +{}; + + +template<> struct string_traits +{ + static std::size_t size_buffer(std::stringstream const &) = delete; + + static std::stringstream from_string(std::string_view text) + { + std::stringstream stream; + stream.write(text.data(), std::streamsize(std::size(text))); + return stream; + } + + static char *into_buf(char *, char *, std::stringstream const &) = delete; + static std::string_view + to_buf(char *, char *, std::stringstream const &) = delete; +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = true; + static constexpr bool is_null(std::nullptr_t const &) noexcept + { + return true; + } + static constexpr std::nullptr_t null() noexcept { return nullptr; } +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = true; + static constexpr bool is_null(std::nullopt_t const &) noexcept + { + return true; + } + static constexpr std::nullopt_t null() noexcept { return std::nullopt; } +}; + + +template<> struct nullness +{ + static constexpr bool has_null = true; + static constexpr bool always_null = true; + static constexpr bool is_null(std::monostate const &) noexcept + { + return true; + } + static constexpr std::monostate null() noexcept { return {}; } +}; + + +template struct nullness> +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(std::unique_ptr const &t) noexcept + { + return not t or pqxx::is_null(*t); + } + static constexpr std::unique_ptr null() { return {}; } +}; + + +template +struct string_traits> +{ + static std::unique_ptr from_string(std::string_view text) + { + return std::make_unique(string_traits::from_string(text)); + } + + static char * + into_buf(char *begin, char *end, std::unique_ptr const &value) + { + return string_traits::into_buf(begin, end, *value); + } + + static zview + to_buf(char *begin, char *end, std::unique_ptr const &value) + { + if (value) + return string_traits::to_buf(begin, end, *value); + else + return {}; + } + + static std::size_t + size_buffer(std::unique_ptr const &value) noexcept + { + return pqxx::size_buffer(*value.get()); + } +}; + + +template +inline format param_format(std::unique_ptr const &value) +{ + return param_format(*value); +} + + +template +inline constexpr bool is_unquoted_safe>{ + is_unquoted_safe}; + + +template struct nullness> +{ + static constexpr bool has_null = true; + static constexpr bool always_null = false; + static constexpr bool is_null(std::shared_ptr const &t) noexcept + { + return not t or pqxx::is_null(*t); + } + static constexpr std::shared_ptr null() { return {}; } +}; + + +template struct string_traits> +{ + static std::shared_ptr from_string(std::string_view text) + { + return std::make_shared(string_traits::from_string(text)); + } + + static zview to_buf(char *begin, char *end, std::shared_ptr const &value) + { + return string_traits::to_buf(begin, end, *value); + } + static char * + into_buf(char *begin, char *end, std::shared_ptr const &value) + { + return string_traits::into_buf(begin, end, *value); + } + static std::size_t size_buffer(std::shared_ptr const &value) noexcept + { + return pqxx::size_buffer(*value); + } +}; + + +template format param_format(std::shared_ptr const &value) +{ + return param_format(*value); +} + + +template +inline constexpr bool is_unquoted_safe>{ + is_unquoted_safe}; + + +template<> +struct nullness> + : no_null> +{}; + + +#if defined(PQXX_HAVE_CONCEPTS) +template struct nullness : no_null +{}; + + +template inline constexpr format param_format(DATA const &) +{ + return format::binary; +} + + +template struct string_traits +{ + static std::size_t size_buffer(DATA const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview to_buf(char *begin, char *end, DATA const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, DATA const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + internal::esc_bin(value, begin); + return begin + budget; + } + + static DATA from_string(std::string_view text) + { + auto const size{pqxx::internal::size_unesc_bin(std::size(text))}; + std::basic_string buf; + buf.resize(size); + pqxx::internal::unesc_bin(text, reinterpret_cast(buf.data())); + return buf; + } +}; +#endif // PQXX_HAVE_CONCEPTS + + +template<> struct string_traits> +{ + static std::size_t + size_buffer(std::basic_string const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview + to_buf(char *begin, char *end, std::basic_string const &value) + { + return generic_to_buf(begin, end, value); + } + + static char * + into_buf(char *begin, char *end, std::basic_string const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + internal::esc_bin(value, begin); + return begin + budget; + } + + static std::basic_string from_string(std::string_view text) + { + auto const size{pqxx::internal::size_unesc_bin(std::size(text))}; + std::basic_string buf; + buf.resize(size); + pqxx::internal::unesc_bin(text, reinterpret_cast(buf.data())); + return buf; + } +}; + + +template<> +inline constexpr format param_format(std::basic_string const &) +{ + return format::binary; +} + + +template<> +struct nullness> + : no_null> +{}; + + +template<> struct string_traits> +{ + static std::size_t + size_buffer(std::basic_string_view const &value) noexcept + { + return internal::size_esc_bin(std::size(value)); + } + + static zview to_buf( + char *begin, char *end, std::basic_string_view const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf( + char *begin, char *end, std::basic_string_view const &value) + { + auto const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to escape binary data."}; + internal::esc_bin(value, begin); + return begin + budget; + } + + // There's no from_string, because there's nobody to hold the data. +}; + +template<> +inline constexpr format param_format(std::basic_string_view const &) +{ + return format::binary; +} +} // namespace pqxx + + +namespace pqxx::internal +{ +/// String traits for SQL arrays. +template struct array_string_traits +{ +private: + using elt_type = strip_t>; + using elt_traits = string_traits; + static constexpr zview s_null{"NULL"}; + +public: + static zview to_buf(char *begin, char *end, Container const &value) + { + return generic_to_buf(begin, end, value); + } + + static char *into_buf(char *begin, char *end, Container const &value) + { + std::size_t const budget{size_buffer(value)}; + if (internal::cmp_less(end - begin, budget)) + throw conversion_overrun{ + "Not enough buffer space to convert array to string."}; + + char *here = begin; + *here++ = '{'; + + bool nonempty{false}; + for (auto const &elt : value) + { + if (is_null(elt)) + { + s_null.copy(here, std::size(s_null)); + here += std::size(s_null); + } + else if constexpr (is_sql_array) + { + // Render nested array in-place. Then erase the trailing zero. + here = elt_traits::into_buf(here, end, elt) - 1; + } + else if constexpr (is_unquoted_safe) + { + // No need to quote or escape. Just convert the value straight into + // its place in the array, and "backspace" the trailing zero. + here = elt_traits::into_buf(here, end, elt) - 1; + } + else + { + *here++ = '"'; + + // Use the tail end of the destination buffer as an intermediate + // buffer. + auto const elt_budget{pqxx::size_buffer(elt)}; + for (char const c : elt_traits::to_buf(end - elt_budget, end, elt)) + { + if (c == '\\' or c == '"') + *here++ = '\\'; + *here++ = c; + } + *here++ = '"'; + } + *here++ = array_separator; + nonempty = true; + } + + // Erase that last comma, if present. + if (nonempty) + here--; + + *here++ = '}'; + *here++ = '\0'; + + return here; + } + + static std::size_t size_buffer(Container const &value) noexcept + { + if constexpr (is_unquoted_safe) + return 3 + std::accumulate( + std::begin(value), std::end(value), std::size_t{}, + [](std::size_t acc, elt_type const &elt) { + return acc + + (pqxx::is_null(elt) ? + std::size(s_null) : + elt_traits::size_buffer(elt)) - + 1; + }); + else + return 3 + std::accumulate( + std::begin(value), std::end(value), std::size_t{}, + [](std::size_t acc, elt_type const &elt) { + // Opening and closing quotes, plus worst-case escaping, + // but don't count the trailing zeroes. + std::size_t const elt_size{ + pqxx::is_null(elt) ? std::size(s_null) : + elt_traits::size_buffer(elt) - 1}; + return acc + 2 * elt_size + 2; + }); + } + + // We don't yet support parsing of array types using from_string. Doing so + // would require a reference to the connection. +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +template +struct nullness> : no_null> +{}; + + +template +struct string_traits> + : internal::array_string_traits> +{}; + + +/// We don't know how to pass array params in binary format, so pass as text. +template +inline constexpr format param_format(std::vector const &) +{ + return format::text; +} + + +/// A `std::vector` is a binary string. Other vectors are not. +template +inline constexpr format param_format(std::vector const &) +{ + return format::binary; +} + + +template inline constexpr bool is_sql_array>{true}; + + +template +struct nullness> : no_null> +{}; + + +template +struct string_traits> + : internal::array_string_traits> +{}; + + +/// We don't know how to pass array params in binary format, so pass as text. +template +inline constexpr format param_format(std::array const &) +{ + return format::text; +} + + +/// An array of `std::byte` is a binary string. +template +inline constexpr format param_format(std::array const &) +{ + return format::binary; +} + + +template +inline constexpr bool is_sql_array>{true}; +} // namespace pqxx + + +namespace pqxx +{ +template inline std::string to_string(T const &value) +{ + if (is_null(value)) + throw conversion_error{ + "Attempt to convert null " + type_name + " to a string."}; + + std::string buf; + // We can't just reserve() space; modifying the terminating zero leads to + // undefined behaviour. + buf.resize(size_buffer(value)); + auto const data{buf.data()}; + auto const end{ + string_traits::into_buf(data, data + std::size(buf), value)}; + buf.resize(static_cast(end - data - 1)); + return buf; +} + + +template<> inline std::string to_string(float const &value) +{ + return internal::to_string_float(value); +} +template<> inline std::string to_string(double const &value) +{ + return internal::to_string_float(value); +} +template<> inline std::string to_string(long double const &value) +{ + return internal::to_string_float(value); +} +template<> inline std::string to_string(std::stringstream const &value) +{ + return value.str(); +} + + +template inline void into_string(T const &value, std::string &out) +{ + if (is_null(value)) + throw conversion_error{ + "Attempt to convert null " + type_name + " to a string."}; + + // We can't just reserve() data; modifying the terminating zero leads to + // undefined behaviour. + out.resize(size_buffer(value) + 1); + auto const data{out.data()}; + auto const end{ + string_traits::into_buf(data, data + std::size(out), value)}; + out.resize(static_cast(end - data - 1)); +} +} // namespace pqxx diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/encoding_group.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/encoding_group.hxx new file mode 100644 index 000000000..e17736e5b --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/encoding_group.hxx @@ -0,0 +1,60 @@ +/** Enum type for supporting encodings in libpqxx + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ENCODING_GROUP +#define PQXX_H_ENCODING_GROUP + +#include + +namespace pqxx::internal +{ +// Types of encodings supported by PostgreSQL, see +// https://www.postgresql.org/docs/current/static/multibyte.html#CHARSET-TABLE +enum class encoding_group +{ + // Handles all single-byte fixed-width encodings + MONOBYTE, + + // Multibyte encodings. + // Many of these can embed ASCII-like bytes inside multibyte characters, + // notably Big5, SJIS, SHIFT_JIS_2004, GP18030, GBK, JOHAB, UHC. + BIG5, + EUC_CN, + // TODO: Merge EUC_JP and EUC_JIS_2004? + EUC_JP, + EUC_JIS_2004, + EUC_KR, + EUC_TW, + GB18030, + GBK, + JOHAB, + MULE_INTERNAL, + // TODO: Merge SJIS and SHIFT_JIS_2004? + SJIS, + SHIFT_JIS_2004, + UHC, + UTF8, +}; + + +// TODO:: Can we just use string_view now? +/// Function type: "find the end of the current glyph." +/** This type of function takes a text buffer, and a location in that buffer, + * and returns the location one byte past the end of the current glyph. + * + * The start offset marks the beginning of the current glyph. It must fall + * within the buffer. + * + * There are multiple different glyph scanner implementations, for different + * kinds of encodings. + */ +using glyph_scanner_func = + std::size_t(char const buffer[], std::size_t buffer_len, std::size_t start); +} // namespace pqxx::internal + +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/encodings.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/encodings.hxx new file mode 100644 index 000000000..ba7fecc70 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/encodings.hxx @@ -0,0 +1,90 @@ +/** Internal string encodings support for libpqxx + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ENCODINGS +#define PQXX_H_ENCODINGS + +#include "pqxx/internal/encoding_group.hxx" + +#include +#include + + +namespace pqxx::internal +{ +char const *name_encoding(int encoding_id); + +/// Convert libpq encoding enum or encoding name to its libpqxx group. +encoding_group enc_group(int /* libpq encoding ID */); +encoding_group enc_group(std::string_view); + + +/// Look up the glyph scanner function for a given encoding group. +/** To identify the glyph boundaries in a buffer, call this to obtain the + * scanner function appropriate for the buffer's encoding. Then, repeatedly + * call the scanner function to find the glyphs. + */ +PQXX_LIBEXPORT glyph_scanner_func *get_glyph_scanner(encoding_group); + + +// TODO: For ASCII search, treat UTF8/EUC_*/MULE_INTERNAL as MONOBYTE. + +/// Find any of the ASCII characters `NEEDLE` in `haystack`. +/** Scans through `haystack` until it finds a single-byte character that + * matches any value in `NEEDLE`. + * + * If it finds one, returns its offset. If not, returns the end of the + * haystack. + */ +template +inline std::size_t find_char( + glyph_scanner_func *scanner, std::string_view haystack, + std::size_t here = 0u) +{ + auto const sz{std::size(haystack)}; + auto const data{std::data(haystack)}; + while (here < sz) + { + auto next{scanner(data, sz, here)}; + // (For some reason gcc had a problem with a right-fold here. But clang + // was fine.) + if ((... or (data[here] == NEEDLE))) + { + // Also check against a multibyte character starting with a bytes which + // just happens to match one of the ASCII bytes we're looking for. It'd + // be cleaner to check that first, but either works. So, let's apply the + // most selective filter first and skip this check in almost all cases. + if (next == here + 1) + return here; + } + + // Nope, no hit. Move on. + here = next; + } + return sz; +} + + +/// Iterate over the glyphs in a buffer. +/** Scans the glyphs in the buffer, and for each, passes its begin and its + * one-past-end pointers to `callback`. + */ +template +inline void for_glyphs( + encoding_group enc, CALLABLE callback, char const buffer[], + std::size_t buffer_len, std::size_t start = 0) +{ + auto const scan{get_glyph_scanner(enc)}; + for (std::size_t here = start, next; here < buffer_len; here = next) + { + next = scan(buffer, buffer_len, here); + callback(buffer + here, buffer + next); + } +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-errorhandler.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-errorhandler.hxx new file mode 100644 index 000000000..ffc12a6cf --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-errorhandler.hxx @@ -0,0 +1,26 @@ +#include + +namespace pqxx +{ +class connection; +class errorhandler; +} // namespace pqxx + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_errorhandler : callgate +{ + friend class pqxx::errorhandler; + + connection_errorhandler(reference x) : super(x) {} + + void register_errorhandler(errorhandler *h) + { + home().register_errorhandler(h); + } + void unregister_errorhandler(errorhandler *h) + { + home().unregister_errorhandler(h); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-largeobject.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-largeobject.hxx new file mode 100644 index 000000000..49feaf9e6 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-largeobject.hxx @@ -0,0 +1,35 @@ +#include + +#include +#include + +namespace pqxx +{ +class blob; +class largeobject; +} // namespace pqxx + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_largeobject : callgate +{ + friend class pqxx::blob; + friend class pqxx::largeobject; + + connection_largeobject(reference x) : super(x) {} + + pq::PGconn *raw_connection() const { return home().raw_connection(); } +}; + + +class PQXX_PRIVATE const_connection_largeobject : callgate +{ + friend class pqxx::blob; + friend class pqxx::largeobject; + + const_connection_largeobject(reference x) : super(x) {} + + std::string error_message() const { return home().err_msg(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-notification_receiver.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-notification_receiver.hxx new file mode 100644 index 000000000..0bcb2db17 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-notification_receiver.hxx @@ -0,0 +1,29 @@ +#include + +#include "pqxx/connection.hxx" + + +namespace pqxx +{ +class notification_receiver; +} + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_notification_receiver : callgate +{ + friend class pqxx::notification_receiver; + + connection_notification_receiver(reference x) : super(x) {} + + void add_receiver(notification_receiver *receiver) + { + home().add_receiver(receiver); + } + void remove_receiver(notification_receiver *receiver) noexcept + { + home().remove_receiver(receiver); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-pipeline.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-pipeline.hxx new file mode 100644 index 000000000..c6ae6e17a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-pipeline.hxx @@ -0,0 +1,23 @@ +#include "pqxx/internal/libpq-forward.hxx" +#include + +#include "pqxx/pipeline.hxx" + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_pipeline : callgate +{ + friend class pqxx::pipeline; + + connection_pipeline(reference x) : super(x) {} + + void start_exec(char const query[]) { home().start_exec(query); } + pqxx::internal::pq::PGresult *get_result() { return home().get_result(); } + void cancel_query() { home().cancel_query(); } + + bool consume_input() noexcept { return home().consume_input(); } + bool is_busy() const noexcept { return home().is_busy(); } + + int encoding_id() { return home().encoding_id(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-sql_cursor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-sql_cursor.hxx new file mode 100644 index 000000000..51a889844 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-sql_cursor.hxx @@ -0,0 +1,19 @@ +#include + +namespace pqxx::internal +{ +class sql_cursor; +} + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_sql_cursor : callgate +{ + friend class pqxx::internal::sql_cursor; + + connection_sql_cursor(reference x) : super(x) {} + + result exec(char const query[]) { return home().exec(query); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-stream_from.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-stream_from.hxx new file mode 100644 index 000000000..8961e7146 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-stream_from.hxx @@ -0,0 +1,15 @@ +#include + +#include "pqxx/connection.hxx" + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_stream_from : callgate +{ + friend class pqxx::stream_from; + + connection_stream_from(reference x) : super{x} {} + + auto read_copy_line() { return home().read_copy_line(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-stream_to.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-stream_to.hxx new file mode 100644 index 000000000..a6974fb21 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-stream_to.hxx @@ -0,0 +1,17 @@ +#include + +#include "pqxx/stream_to.hxx" + + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_stream_to : callgate +{ + friend class pqxx::stream_to; + + connection_stream_to(reference x) : super(x) {} + + void write_copy_line(std::string_view line) { home().write_copy_line(line); } + void end_copy_write() { home().end_copy_write(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-transaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-transaction.hxx new file mode 100644 index 000000000..74d659253 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/connection-transaction.hxx @@ -0,0 +1,44 @@ +#include + +namespace pqxx +{ +class connection; +} + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE connection_transaction : callgate +{ + friend class pqxx::transaction_base; + + connection_transaction(reference x) : super(x) {} + + template result exec(STRING query, std::string_view desc) + { + return home().exec(query, desc); + } + + void register_transaction(transaction_base *t) + { + home().register_transaction(t); + } + void unregister_transaction(transaction_base *t) noexcept + { + home().unregister_transaction(t); + } + + auto read_copy_line() { return home().read_copy_line(); } + void write_copy_line(std::string_view line) { home().write_copy_line(line); } + void end_copy_write() { home().end_copy_write(); } + + result exec_prepared(zview statement, internal::c_params const &args) + { + return home().exec_prepared(statement, args); + } + + result exec_params(zview query, internal::c_params const &args) + { + return home().exec_params(query, args); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/errorhandler-connection.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/errorhandler-connection.hxx new file mode 100644 index 000000000..5560cedec --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/errorhandler-connection.hxx @@ -0,0 +1,13 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE errorhandler_connection : callgate +{ + friend class pqxx::connection; + + errorhandler_connection(reference x) : super(x) {} + + void unregister() noexcept { home().unregister(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/icursor_iterator-icursorstream.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/icursor_iterator-icursorstream.hxx new file mode 100644 index 000000000..296d22145 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/icursor_iterator-icursorstream.hxx @@ -0,0 +1,24 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE icursor_iterator_icursorstream : callgate +{ + friend class pqxx::icursorstream; + + icursor_iterator_icursorstream(reference x) : super(x) {} + + icursor_iterator::difference_type pos() const noexcept + { + return home().pos(); + } + + icursor_iterator *get_prev() { return home().m_prev; } + void set_prev(icursor_iterator *i) { home().m_prev = i; } + + icursor_iterator *get_next() { return home().m_next; } + void set_next(icursor_iterator *i) { home().m_next = i; } + + void fill(result const &r) { home().fill(r); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/icursorstream-icursor_iterator.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/icursorstream-icursor_iterator.hxx new file mode 100644 index 000000000..56056d5ef --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/icursorstream-icursor_iterator.hxx @@ -0,0 +1,32 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE icursorstream_icursor_iterator : callgate +{ + friend class pqxx::icursor_iterator; + + icursorstream_icursor_iterator(reference x) : super(x) {} + + void insert_iterator(icursor_iterator *i) noexcept + { + home().insert_iterator(i); + } + + void remove_iterator(icursor_iterator *i) const noexcept + { + home().remove_iterator(i); + } + + icursorstream::size_type forward() { return home().forward(); } + icursorstream::size_type forward(icursorstream::size_type n) + { + return home().forward(n); + } + + void service_iterators(icursorstream::difference_type p) + { + home().service_iterators(p); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-connection.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-connection.hxx new file mode 100644 index 000000000..daa0808c0 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-connection.hxx @@ -0,0 +1,14 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_connection : callgate +{ + friend class pqxx::connection; + + result_connection(reference x) : super(x) {} + + operator bool() const { return bool(home()); } + bool operator!() const { return not home(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-creation.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-creation.hxx new file mode 100644 index 000000000..3d9205f2c --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-creation.hxx @@ -0,0 +1,24 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_creation : callgate +{ + friend class pqxx::connection; + friend class pqxx::pipeline; + + result_creation(reference x) : super(x) {} + + static result create( + internal::pq::PGresult *rhs, std::shared_ptr const &query, + encoding_group enc) + { + return result(rhs, query, enc); + } + + void check_status(std::string_view desc = ""sv) const + { + return home().check_status(desc); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-pipeline.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-pipeline.hxx new file mode 100644 index 000000000..3ebe436d2 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-pipeline.hxx @@ -0,0 +1,16 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_pipeline : callgate +{ + friend class pqxx::pipeline; + + result_pipeline(reference x) : super(x) {} + + std::shared_ptr query_ptr() const + { + return home().query_ptr(); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-sql_cursor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-sql_cursor.hxx new file mode 100644 index 000000000..78b450739 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/result-sql_cursor.hxx @@ -0,0 +1,13 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE result_sql_cursor : callgate +{ + friend class pqxx::internal::sql_cursor; + + result_sql_cursor(reference x) : super(x) {} + + char const *cmd_status() const noexcept { return home().cmd_status(); } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/transaction-sql_cursor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/transaction-sql_cursor.hxx new file mode 100644 index 000000000..4ed78dc93 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/transaction-sql_cursor.hxx @@ -0,0 +1,10 @@ +#include + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE transaction_sql_cursor : callgate +{ + friend class pqxx::internal::sql_cursor; + transaction_sql_cursor(reference x) : super(x) {} +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/transaction-transaction_focus.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/transaction-transaction_focus.hxx new file mode 100644 index 000000000..ca7939a99 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/gates/transaction-transaction_focus.hxx @@ -0,0 +1,30 @@ +#include + +#include "pqxx/transaction_base.hxx" + +namespace pqxx::internal::gate +{ +class PQXX_PRIVATE transaction_transaction_focus : callgate +{ + friend class pqxx::transaction_focus; + + transaction_transaction_focus(reference x) : super(x) {} + + void register_focus(transaction_focus *focus) + { + home().register_focus(focus); + } + void unregister_focus(transaction_focus *focus) noexcept + { + home().unregister_focus(focus); + } + void register_pending_error(zview error) + { + home().register_pending_error(error); + } + void register_pending_error(std::string &&error) + { + home().register_pending_error(std::move(error)); + } +}; +} // namespace pqxx::internal::gate diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/header-post.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/header-post.hxx new file mode 100644 index 000000000..ff6bf8986 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/header-post.hxx @@ -0,0 +1,22 @@ +/* Compiler deficiency workarounds for compiling libpqxx headers. + * + * To be included at the end of each libpqxx header, in order to restore the + * client program's settings. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +// NO GUARDS HERE! This code should be executed every time! + +#if defined(_MSC_VER) +# pragma warning(pop) // Restore compiler's warning state +#endif + +#if !defined(PQXX_HEADER_PRE) +# error "Include pqxx/internal/header-post.hxx AFTER its 'pre' counterpart." +#endif + +#undef PQXX_HEADER_PRE diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/header-pre.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/header-pre.hxx new file mode 100644 index 000000000..abc1a398d --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/header-pre.hxx @@ -0,0 +1,169 @@ +/* Compiler settings for compiling libpqxx headers, and workarounds for all. + * + * Include this before including any other libpqxx headers from within libpqxx. + * And to balance it out, also include header-post.hxx at the end of the batch + * of headers. + * + * The public libpqxx headers (e.g. ``) include this already; + * there's no need to do this from within an application. + * + * Include this file at the highest aggregation level possible to avoid nesting + * and to keep things simple. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ + +// NO GUARD HERE! This part should be included every time this file is. +#if defined(_MSC_VER) + +// Save compiler's warning state, and set warning level 4 for maximum +// sensitivity to warnings. +# pragma warning(push, 4) + +// Visual C++ generates some entirely unreasonable warnings. Disable them. +// Copy constructor could not be generated. +# pragma warning(disable : 4511) +// Assignment operator could not be generated. +# pragma warning(disable : 4512) +// Can't expose outside classes without exporting them. Except the MSVC docs +// say please ignore the warning if it's a standard library class. +# pragma warning(disable : 4251) +// Can't derive library classes from outside classes without exporting them. +// Except the MSVC docs say please ignore the warning if the parent class is +// in the standard library. +# pragma warning(disable : 4275) +// Can't inherit from non-exported class. +# pragma warning(disable : 4275) + +#endif // _MSC_VER + + +#if defined(PQXX_HEADER_PRE) +# error "Avoid nesting #include of pqxx/internal/header-pre.hxx." +#endif + +#define PQXX_HEADER_PRE + + +// Workarounds & definitions that need to be included even in library's headers +#include "pqxx/config-public-compiler.h" + +// Enable ISO-646 alternative operaotr representations: "and" instead of "&&" +// etc. on older compilers. C++20 removes this header. +#if __has_include() +# include +#endif + + +#if defined(PQXX_HAVE_GCC_PURE) +/// Declare function "pure": no side effects, only reads globals and its args. +# define PQXX_PURE __attribute__((pure)) +#else +# define PQXX_PURE /* pure */ +#endif + + +#if defined(__GNUC__) +/// Tell the compiler to optimise a function for size, not speed. +# define PQXX_COLD __attribute__((cold)) +#else +# define PQXX_COLD /* cold */ +#endif + + +// Workarounds for Windows +#ifdef _WIN32 + +/* For now, export DLL symbols if _DLL is defined. This is done automatically + * by the compiler when linking to the dynamic version of the runtime library, + * according to "gzh" + */ +# if defined(PQXX_SHARED) && !defined(PQXX_LIBEXPORT) +# define PQXX_LIBEXPORT __declspec(dllimport) +# endif // PQXX_SHARED && !PQXX_LIBEXPORT + + +// Workarounds for Microsoft Visual C++ +# ifdef _MSC_VER + +// Suppress vtables on abstract classes. +# define PQXX_NOVTABLE __declspec(novtable) + +// Automatically link with the appropriate libpq (static or dynamic, debug or +// release). The default is to use the release DLL. Define PQXX_PQ_STATIC to +// link to a static version of libpq, and _DEBUG to link to a debug version. +// The two may be combined. +# if defined(PQXX_AUTOLINK) +# if defined(PQXX_PQ_STATIC) +# ifdef _DEBUG +# pragma comment(lib, "libpqd") +# else +# pragma comment(lib, "libpq") +# endif +# else +# ifdef _DEBUG +# pragma comment(lib, "libpqddll") +# else +# pragma comment(lib, "libpqdll") +# endif +# endif +# endif + +// If we're not compiling libpqxx itself, automatically link with the +// appropriate libpqxx library. To link with the libpqxx DLL, define +// PQXX_SHARED; the default is to link with the static library. A static link +// is the recommended practice. +// +// The preprocessor macro PQXX_INTERNAL is used to detect whether we +// are compiling the libpqxx library itself. When you compile the library +// yourself using your own project file, make sure to include this macro. +# if defined(PQXX_AUTOLINK) && !defined(PQXX_INTERNAL) +# ifdef PQXX_SHARED +# ifdef _DEBUG +# pragma comment(lib, "libpqxxD") +# else +# pragma comment(lib, "libpqxx") +# endif +# else // !PQXX_SHARED +# ifdef _DEBUG +# pragma comment(lib, "libpqxx_staticD") +# else +# pragma comment(lib, "libpqxx_static") +# endif +# endif +# endif + +# endif // _MSC_VER + +#elif defined(PQXX_HAVE_GCC_VISIBILITY) // !_WIN32 + +# define PQXX_LIBEXPORT __attribute__((visibility("default"))) +# define PQXX_PRIVATE __attribute__((visibility("hidden"))) + +#endif // PQXX_HAVE_GCC_VISIBILITY + + +#ifndef PQXX_LIBEXPORT +# define PQXX_LIBEXPORT /* libexport */ +#endif + +#ifndef PQXX_PRIVATE +# define PQXX_PRIVATE /* private */ +#endif + +#ifndef PQXX_NOVTABLE +# define PQXX_NOVTABLE /* novtable */ +#endif + +// C++20: Assume support. +#if defined(PQXX_HAVE_LIKELY) +# define PQXX_LIKELY [[likely]] +# define PQXX_UNLIKELY [[unlikely]] +#else +# define PQXX_LIKELY /* [[likely]] */ +# define PQXX_UNLIKELY /* [[unlikely]] */ +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/ignore-deprecated-post.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/ignore-deprecated-post.hxx new file mode 100644 index 000000000..cebcf0594 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/ignore-deprecated-post.hxx @@ -0,0 +1,15 @@ +/// End a code block started by "ignore-deprecated-pre.hxx". + +#if !defined(PQXX_IGNORING_DEPRECATED) +# error "Ended an 'ignore-deprecated' block while none was active." +#endif + +#if defined(__GNUC__) +# pragma GCC diagnostic pop +#endif // __GNUC__ + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +#undef PQXX_IGNORING_DEPRECATED diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/ignore-deprecated-pre.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/ignore-deprecated-pre.hxx new file mode 100644 index 000000000..8ac57afaa --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/ignore-deprecated-pre.hxx @@ -0,0 +1,28 @@ +/** Start a block of deprecated code which may call other deprecated code. + * + * Most compilers will emit warnings when deprecated code is invoked from + * non-deprecated code. But some compilers (notably gcc) will always emit the + * warning even when the calling code is also deprecated. + * + * This header starts a block where those warnings are suppressed. It can be + * included inside a code block. + * + * Always match the #include with a closing #include of + * "ignore-deprecated-post.hxx". To avoid mistakes, keep the enclosed area as + * small as possible. + */ +#if defined(PQXX_IGNORING_DEPRECATED) +# error "Started an 'ignore-deprecated' block inside another." +#endif + +#define PQXX_IGNORING_DEPRECATED + +#if defined(__GNUC__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#endif // __GNUC__ + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable : 4996) +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/libpq-forward.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/libpq-forward.hxx new file mode 100644 index 000000000..9e74f79ec --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/libpq-forward.hxx @@ -0,0 +1,31 @@ +/** Minimal forward declarations of libpq types needed in libpqxx headers. + * + * DO NOT INCLUDE THIS FILE when building client programs. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +extern "C" +{ + struct pg_conn; + struct pg_result; + struct pgNotify; +} + +/// Forward declarations of libpq types as needed in libpqxx headers. +namespace pqxx::internal::pq +{ +using PGconn = pg_conn; +using PGresult = pg_result; +using PGnotify = pgNotify; +using PQnoticeProcessor = void (*)(void *, char const *); +} // namespace pqxx::internal::pq + +namespace pqxx +{ +/// PostgreSQL database row identifier. +using oid = unsigned int; +} // namespace pqxx diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/result_iter.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/result_iter.hxx new file mode 100644 index 000000000..1fa1f7d8a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/result_iter.hxx @@ -0,0 +1,124 @@ +/** Result loops. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_RESULT_ITER +#define PQXX_H_RESULT_ITER + +#include + +#include "pqxx/strconv.hxx" + +namespace pqxx +{ +class result; +} // namespace pqxx + + +namespace pqxx::internal +{ +// C++20: Replace with generator? +/// Iterator for looped unpacking of a result. +template class result_iter +{ +public: + using value_type = std::tuple; + + /// Construct an "end" iterator. + result_iter() = default; + + explicit result_iter(result const &home) : + m_home{&home}, m_size{std::size(home)} + { + if (not std::empty(home)) + read(); + } + result_iter(result_iter const &) = default; + + result_iter &operator++() + { + m_index++; + if (m_index >= m_size) + m_home = nullptr; + else + read(); + return *this; + } + + /// Comparison only works for comparing to end(). + bool operator==(result_iter const &rhs) const + { + return m_home == rhs.m_home; + } + bool operator!=(result_iter const &rhs) const { return not(*this == rhs); } + + value_type const &operator*() const { return m_value; } + +private: + void read() { (*m_home)[m_index].convert(m_value); } + + result const *m_home{nullptr}; + result::size_type m_index{0}; + result::size_type m_size; + value_type m_value; +}; + + +template class result_iteration +{ +public: + using iterator = result_iter; + explicit result_iteration(result const &home) : m_home{home} + { + constexpr auto tup_size{sizeof...(TYPE)}; + if (home.columns() != tup_size) + throw usage_error{internal::concat( + "Tried to extract ", to_string(tup_size), + " field(s) from a result with ", to_string(home.columns()), + " column(s).")}; + } + iterator begin() const + { + if (std::size(m_home) == 0) + return end(); + else + return iterator{m_home}; + } + iterator end() const { return {}; } + +private: + pqxx::result const &m_home; +}; +} // namespace pqxx::internal + + +template inline auto pqxx::result::iter() const +{ + return pqxx::internal::result_iteration{*this}; +} + + +template +inline void pqxx::result::for_each(CALLABLE &&func) const +{ + using args_tuple = internal::args_t; + constexpr auto sz{std::tuple_size_v}; + static_assert( + sz > 0, + "Callback for for_each must take parameters, one for each column in the " + "result."); + + auto const cols{this->columns()}; + if (sz != cols) + throw usage_error{internal::concat( + "Callback to for_each takes ", sz, "parameter", (sz == 1) ? "" : "s", + ", but result set has ", cols, "field", (cols == 1) ? "" : "s", ".")}; + + using pass_tuple = pqxx::internal::strip_types_t; + for (auto const r : *this) std::apply(func, r.as_tuple()); +} +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/result_iterator.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/result_iterator.hxx new file mode 100644 index 000000000..3f27a1d3f --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/result_iterator.hxx @@ -0,0 +1,389 @@ +/* Definitions for the pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/result instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_RESULT_ITERATOR +#define PQXX_H_RESULT_ITERATOR + +#include "pqxx/row.hxx" + + +/* Result iterator. + * + * Don't include this header from your own application; it is included for you + * by other libpqxx headers. + */ + +namespace pqxx +{ +/// Iterator for rows in a result. Use as result::const_iterator. +/** A result, once obtained, cannot be modified. Therefore there is no + * plain iterator type for result. However its const_iterator type can be + * used to inspect its rows without changing them. + */ +class PQXX_LIBEXPORT const_result_iterator : public row +{ +public: + using iterator_category = std::random_access_iterator_tag; + using value_type = row const; + using pointer = row const *; + using reference = row; + using size_type = result_size_type; + using difference_type = result_difference_type; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Create an iterator, but in an unusable state. + const_result_iterator() noexcept = default; + /// Copy an iterator. + const_result_iterator(const_result_iterator const &) noexcept = default; + /// Move an iterator. + const_result_iterator(const_result_iterator &&) noexcept = default; + + /// Begin iterating a @ref row. + const_result_iterator(row const &t) noexcept : row{t} {} +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /** + * @name Dereferencing operators + * + * An iterator "points to" its own row, which is also itself. This makes it + * easy to address a @ref result as a two-dimensional container, without + * going through the intermediate step of dereferencing the iterator. It + * makes the interface similar to C pointer/array semantics. + * + * IIRC Alex Stepanov, the inventor of the STL, once remarked that having + * this as standard behaviour for pointers would be useful in some + * algorithms. So even if this makes me look foolish, I would seem to be in + * distinguished company. + */ + //@{ + /// Dereference the iterator. + [[nodiscard]] pointer operator->() const { return this; } + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Dereference the iterator. + [[nodiscard]] reference operator*() const { return *this; } +#include "pqxx/internal/ignore-deprecated-post.hxx" + //@} + + /** + * @name Field access + */ + //@{ + using row::back; + using row::front; + using row::operator[]; + using row::at; + using row::rownumber; + //@} + + /** + * @name Manipulations + */ + //@{ + const_result_iterator &operator=(const_result_iterator const &rhs) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row::operator=(rhs); +#include "pqxx/internal/ignore-deprecated-post.hxx" + return *this; + } + + const_result_iterator &operator=(const_result_iterator &&rhs) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row::operator=(std::move(rhs)); +#include "pqxx/internal/ignore-deprecated-post.hxx" + return *this; + } + + const_result_iterator operator++(int); + const_result_iterator &operator++() + { + ++m_index; + return *this; + } + const_result_iterator operator--(int); + const_result_iterator &operator--() + { + --m_index; + return *this; + } + + const_result_iterator &operator+=(difference_type i) + { + m_index += i; + return *this; + } + const_result_iterator &operator-=(difference_type i) + { + m_index -= i; + return *this; + } + + /// Interchange two iterators in an exception-safe manner. + void swap(const_result_iterator &other) noexcept + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row::swap(other); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] bool operator==(const_result_iterator const &i) const + { + return m_index == i.m_index; + } + [[nodiscard]] bool operator!=(const_result_iterator const &i) const + { + return m_index != i.m_index; + } + [[nodiscard]] bool operator<(const_result_iterator const &i) const + { + return m_index < i.m_index; + } + [[nodiscard]] bool operator<=(const_result_iterator const &i) const + { + return m_index <= i.m_index; + } + [[nodiscard]] bool operator>(const_result_iterator const &i) const + { + return m_index > i.m_index; + } + [[nodiscard]] bool operator>=(const_result_iterator const &i) const + { + return m_index >= i.m_index; + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] inline const_result_iterator operator+(difference_type) const; + friend const_result_iterator + operator+(difference_type, const_result_iterator const &); + [[nodiscard]] inline const_result_iterator operator-(difference_type) const; + [[nodiscard]] inline difference_type + operator-(const_result_iterator const &) const; + //@} + +private: + friend class pqxx::result; + const_result_iterator(pqxx::result const *r, result_size_type i) noexcept : + row{*r, i, r->columns()} + {} +}; + + +/// Reverse iterator for result. Use as result::const_reverse_iterator. +class PQXX_LIBEXPORT const_reverse_result_iterator + : private const_result_iterator +{ +public: + using super = const_result_iterator; + using iterator_type = const_result_iterator; + using iterator_type::difference_type; + using iterator_type::iterator_category; + using iterator_type::pointer; + using value_type = iterator_type::value_type; + using reference = iterator_type::reference; + + /// Create an iterator, but in an unusable state. + const_reverse_result_iterator() = default; + /// Copy an iterator. + const_reverse_result_iterator(const_reverse_result_iterator const &rhs) = + default; + /// Copy a reverse iterator from a regular iterator. + explicit const_reverse_result_iterator(const_result_iterator const &rhs) : + const_result_iterator{rhs} + { + super::operator--(); + } + + /// Move a regular iterator into a reverse iterator. + explicit const_reverse_result_iterator(const_result_iterator const &&rhs) : + const_result_iterator{std::move(rhs)} + { + super::operator--(); + } + + /// Return the underlying "regular" iterator (as per standard library). + [[nodiscard]] PQXX_PURE const_result_iterator base() const noexcept; + + /** + * @name Dereferencing operators + */ + //@{ + /// Dereference iterator. + using const_result_iterator::operator->; + /// Dereference iterator. + using const_result_iterator::operator*; + //@} + + /** + * @name Field access + */ + //@{ + using const_result_iterator::back; + using const_result_iterator::front; + using const_result_iterator::operator[]; + using const_result_iterator::at; + using const_result_iterator::rownumber; + //@} + + /** + * @name Manipulations + */ + //@{ + const_reverse_result_iterator & + operator=(const_reverse_result_iterator const &r) + { + iterator_type::operator=(r); + return *this; + } + const_reverse_result_iterator &operator=(const_reverse_result_iterator &&r) + { + iterator_type::operator=(std::move(r)); + return *this; + } + const_reverse_result_iterator &operator++() + { + iterator_type::operator--(); + return *this; + } + const_reverse_result_iterator operator++(int); + const_reverse_result_iterator &operator--() + { + iterator_type::operator++(); + return *this; + } + const_reverse_result_iterator operator--(int); + const_reverse_result_iterator &operator+=(difference_type i) + { + iterator_type::operator-=(i); + return *this; + } + const_reverse_result_iterator &operator-=(difference_type i) + { + iterator_type::operator+=(i); + return *this; + } + + void swap(const_reverse_result_iterator &other) noexcept + { + const_result_iterator::swap(other); + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] const_reverse_result_iterator + operator+(difference_type i) const + { + return const_reverse_result_iterator(base() - i); + } + [[nodiscard]] const_reverse_result_iterator operator-(difference_type i) + { + return const_reverse_result_iterator(base() + i); + } + [[nodiscard]] difference_type + operator-(const_reverse_result_iterator const &rhs) const + { + return rhs.const_result_iterator::operator-(*this); + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] bool + operator==(const_reverse_result_iterator const &rhs) const noexcept + { + return iterator_type::operator==(rhs); + } + [[nodiscard]] bool + operator!=(const_reverse_result_iterator const &rhs) const noexcept + { + return not operator==(rhs); + } + + [[nodiscard]] bool operator<(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator>(rhs); + } + [[nodiscard]] bool operator<=(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator>=(rhs); + } + [[nodiscard]] bool operator>(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator<(rhs); + } + [[nodiscard]] bool operator>=(const_reverse_result_iterator const &rhs) const + { + return iterator_type::operator<=(rhs); + } + //@} +}; + + +inline const_result_iterator +const_result_iterator::operator+(result::difference_type o) const +{ + return {&m_result, size_type(result::difference_type(m_index) + o)}; +} + +inline const_result_iterator +operator+(result::difference_type o, const_result_iterator const &i) +{ + return i + o; +} + +inline const_result_iterator +const_result_iterator::operator-(result::difference_type o) const +{ + return {&m_result, result_size_type(result::difference_type(m_index) - o)}; +} + +inline result::difference_type +const_result_iterator::operator-(const const_result_iterator &i) const +{ + return result::difference_type(num() - i.num()); +} + +inline const_result_iterator result::end() const noexcept +{ + return {this, size()}; +} + + +inline const_result_iterator result::cend() const noexcept +{ + return end(); +} + + +inline const_reverse_result_iterator +operator+(result::difference_type n, const_reverse_result_iterator const &i) +{ + return const_reverse_result_iterator{i.base() - n}; +} + +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/sql_cursor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/sql_cursor.hxx new file mode 100644 index 000000000..a26d06306 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/sql_cursor.hxx @@ -0,0 +1,118 @@ +/** Internal wrapper for SQL cursors. Supports higher-level cursor classes. + * + * DO NOT INCLUDE THIS FILE DIRECTLY. Other headers include it for you. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_SQL_CURSOR +#define PQXX_H_SQL_CURSOR + +namespace pqxx::internal +{ +/// Cursor with SQL positioning semantics. +/** Thin wrapper around an SQL cursor, with SQL's ideas of positioning. + * + * SQL cursors have pre-increment/pre-decrement semantics, with on either end + * of the result set a special position that does not repesent a row. This + * class models SQL cursors for the purpose of implementing more C++-like + * semantics on top. + * + * Positions of actual rows are numbered starting at 1. Position 0 exists but + * does not refer to a row. There is a similar non-row position at the end of + * the result set. + * + * Don't use this at home. You deserve better. Use the stateles_cursor + * instead. + */ +class PQXX_LIBEXPORT sql_cursor : public cursor_base +{ +public: + sql_cursor( + transaction_base &t, std::string_view query, std::string_view cname, + cursor_base::access_policy ap, cursor_base::update_policy up, + cursor_base::ownership_policy op, bool hold); + + sql_cursor( + transaction_base &t, std::string_view cname, + cursor_base::ownership_policy op); + + ~sql_cursor() noexcept { close(); } + + result fetch(difference_type rows, difference_type &displacement); + result fetch(difference_type rows) + { + difference_type d = 0; + return fetch(rows, d); + } + difference_type move(difference_type rows, difference_type &displacement); + difference_type move(difference_type rows) + { + difference_type d = 0; + return move(rows, d); + } + + /// Current position, or -1 for unknown + /** + * The starting position, just before the first row, counts as position zero. + * + * Position may be unknown if (and only if) this cursor was adopted, and has + * never hit its starting position (position zero). + */ + difference_type pos() const noexcept { return m_pos; } + + /// End position, or -1 for unknown + /** + * Returns the final position, just after the last row in the result set. The + * starting position, just before the first row, counts as position zero. + * + * End position is unknown until it is encountered during use. + */ + difference_type endpos() const noexcept { return m_endpos; } + + /// Return zero-row result for this cursor. + result const &empty_result() const noexcept { return m_empty_result; } + + void close() noexcept; + +private: + difference_type adjust(difference_type hoped, difference_type actual); + static std::string stridestring(difference_type); + /// Initialize cached empty result. Call only at beginning or end! + void init_empty_result(transaction_base &); + + /// Connection in which this cursor lives. + connection &m_home; + + /// Zero-row result from this cursor (or plain empty one if cursor is + /// adopted) + result m_empty_result; + + result m_cached_current_row; + + /// Is this cursor adopted (as opposed to created by this cursor object)? + bool m_adopted; + + /// Will this cursor object destroy its SQL cursor when it dies? + cursor_base::ownership_policy m_ownership; + + /// At starting position (-1), somewhere in the middle (0), or past end (1) + int m_at_end; + + /// Position, or -1 for unknown + difference_type m_pos; + + /// End position, or -1 for unknown + difference_type m_endpos = -1; +}; + + +PQXX_LIBEXPORT result_size_type obtain_stateless_cursor_size(sql_cursor &); +PQXX_LIBEXPORT result stateless_cursor_retrieve( + sql_cursor &, result::difference_type size, + result::difference_type begin_pos, result::difference_type end_pos); +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/statement_parameters.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/statement_parameters.hxx new file mode 100644 index 000000000..b078bf6e0 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/statement_parameters.hxx @@ -0,0 +1,131 @@ +/** Common implementation for statement parameter lists. + * + * These are used for both prepared statements and parameterized statements. + * + * DO NOT INCLUDE THIS FILE DIRECTLY. Other headers include it for you. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STATEMENT_PARAMETER +#define PQXX_H_STATEMENT_PARAMETER + +#include +#include +#include +#include +#include + +#include "pqxx/binarystring.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/util.hxx" + + +namespace pqxx::internal +{ +template +constexpr inline auto const iterator_identity{ + [](decltype(*std::declval()) x) { return x; }}; + + +/// Marker type: pass a dynamically-determined number of statement parameters. +/** @deprecated Use @ref params instead. + * + * Normally when invoking a prepared or parameterised statement, the number + * of parameters is known at compile time. For instance, + * `t.exec_prepared("foo", 1, "x");` executes statement `foo` with two + * parameters, an `int` and a C string. + * + * But sometimes you may want to pass a number of parameters known only at run + * time. In those cases, a @ref dynamic_params encodes a dynamically + * determined number of parameters. You can mix these with regular, static + * parameter lists, and you can re-use them for multiple statement invocations. + * + * A dynamic_params object does not store copies of its parameters, so make + * sure they remain accessible until you've executed the statement. + * + * The ACCESSOR is an optional callable (such as a lambda). If you pass an + * accessor `a`, then each parameter `p` goes into your statement as `a(p)`. + */ +template)> +class dynamic_params +{ +public: + /// Wrap a sequence of pointers or iterators. + constexpr dynamic_params(IT begin, IT end) : + m_begin(begin), m_end(end), m_accessor(iterator_identity) + {} + + /// Wrap a sequence of pointers or iterators. + /** This version takes an accessor callable. If you pass an accessor `acc`, + * then any parameter `p` will go into the statement's parameter list as + * `acc(p)`. + */ + constexpr dynamic_params(IT begin, IT end, ACCESSOR &acc) : + m_begin(begin), m_end(end), m_accessor(acc) + {} + + /// Wrap a container. + template + explicit constexpr dynamic_params(C &container) : + dynamic_params(std::begin(container), std::end(container)) + {} + + /// Wrap a container. + /** This version takes an accessor callable. If you pass an accessor `acc`, + * then any parameter `p` will go into the statement's parameter list as + * `acc(p)`. + */ + template + explicit constexpr dynamic_params(C &container, ACCESSOR &acc) : + dynamic_params(std::begin(container), std::end(container), acc) + {} + + constexpr IT begin() const noexcept { return m_begin; } + constexpr IT end() const noexcept { return m_end; } + + constexpr auto access(decltype(*std::declval()) value) const + -> decltype(std::declval()(value)) + { + return m_accessor(value); + } + +private: + IT const m_begin, m_end; + ACCESSOR m_accessor = iterator_identity; +}; + + +/// Internal type: encode statement parameters. +/** Compiles arguments for prepared statements and parameterised queries into + * a format that can be passed into libpq. + * + * Objects of this type are meant to be short-lived: a `c_params` lives and + * dies entirely within the call to execute. So, for example, if you pass in a + * non-null pointer as a parameter, @ref params may simply use that pointer as + * a parameter value, without arranging longer-term storage for the data to + * which it points. All values referenced by parameters must remain "live" + * until the parameterised or prepared statement has been executed. + */ +struct PQXX_LIBEXPORT c_params +{ + c_params() = default; + /// Copying these objects is pointless and expensive. Don't do it. + c_params(c_params const &) = delete; + c_params(c_params &&) = default; + + /// Pre-allocate storage for `n` parameters. + void reserve(std::size_t n) &; + + /// As used by libpq: pointers to parameter values. + std::vector values; + /// As used by libpq: lengths of non-null arguments, in bytes. + std::vector lengths; + /// As used by libpq: effectively boolean "is this a binary parameter?" + std::vector formats; +}; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/stream_iterator.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/stream_iterator.hxx new file mode 100644 index 000000000..f240dcfa7 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/stream_iterator.hxx @@ -0,0 +1,105 @@ +/** Stream iterators. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STREAM_ITERATOR +#define PQXX_H_STREAM_ITERATOR + +#include + +namespace pqxx +{ +class stream_from; +} + + +namespace pqxx::internal +{ +// C++20: Replace with generator? +/// Input iterator for stream_from. +/** Just barely enough to support range-based "for" loops. Don't assume that + * any of the usual behaviour works beyond that. + */ +template class stream_input_iterator +{ +public: + using value_type = std::tuple; + + /// Construct an "end" iterator. + stream_input_iterator() = default; + + explicit stream_input_iterator(stream_from &home) : m_home(&home) + { + advance(); + } + stream_input_iterator(stream_input_iterator const &) = default; + + stream_input_iterator &operator++() + { + advance(); + return *this; + } + + value_type const &operator*() const { return m_value; } + + /// Comparison only works for comparing to end(). + bool operator==(stream_input_iterator const &rhs) const + { + return m_home == rhs.m_home; + } + /// Comparison only works for comparing to end(). + bool operator!=(stream_input_iterator const &rhs) const + { + return not(*this == rhs); + } + +private: + void advance() + { + if (m_home == nullptr) + throw usage_error{"Moving stream_from iterator beyond end()."}; + if (not((*m_home) >> m_value)) + m_home = nullptr; + } + + stream_from *m_home{nullptr}; + value_type m_value; +}; + + +// C++20: Replace with generator? +/// Iteration over a @ref stream_from. +template class stream_input_iteration +{ +public: + using iterator = stream_input_iterator; + explicit stream_input_iteration(stream_from &home) : m_home{home} {} + iterator begin() const { return iterator{m_home}; } + iterator end() const { return {}; } + +private: + stream_from &m_home; +}; + + +// C++20: Replace with generator? +/// Iteration over a @ref stream_from, deleting it once done. +template class owning_stream_input_iteration +{ +public: + using iterator = stream_input_iterator; + explicit owning_stream_input_iteration(std::unique_ptr &&home) : + m_home{std::move(home)} + {} + iterator begin() const { return iterator{*m_home.get()}; } + iterator end() const { return {}; } + +private: + std::unique_ptr m_home; +}; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/wait.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/wait.hxx new file mode 100644 index 000000000..7a82e6553 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/internal/wait.hxx @@ -0,0 +1,18 @@ +#if !defined(PQXX_WAIT_HXX) +# define PQXX_WAIT_HXX + +namespace pqxx::internal +{ +/// Wait. +/** This is normally `std::this_thread::sleep_for()`. But MinGW's `thread` + * header doesn't work, so we must be careful about including it. + */ +void PQXX_LIBEXPORT wait_for(unsigned int microseconds); + + +/// Wait for a socket to be ready for reading/writing, or timeout. +PQXX_LIBEXPORT void wait_fd( + int fd, bool for_read, bool for_write, unsigned seconds = 1, + unsigned microseconds = 0); +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/isolation b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/isolation new file mode 100644 index 000000000..1b801329b --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/isolation @@ -0,0 +1,8 @@ +/** Transaction isolation levels. + * + * Policies and traits describing SQL transaction isolation levels + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/isolation.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/isolation.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/isolation.hxx new file mode 100644 index 000000000..0698c6ab4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/isolation.hxx @@ -0,0 +1,75 @@ +/* Definitions for transaction isolation levels, and such. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/isolation instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ISOLATION +#define PQXX_H_ISOLATION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/util.hxx" + +namespace pqxx +{ +/// Should a transaction be read-only, or read-write? +/** No, this is not an isolation level. So it really doesn't belong here. + * But it's not really worth a separate header. + */ +enum class write_policy +{ + read_only, + read_write +}; + + +/// Transaction isolation levels. +/** These are as defined in the SQL standard. But there are a few notes + * specific to PostgreSQL. + * + * First, postgres does not support "read uncommitted." The lowest level you + * can get is "read committed," which is better. PostgreSQL is built on the + * MVCC paradigm, which guarantees "read committed" isolation without any + * additional performance overhead, so there was no point in providing the + * lower level. + * + * Second, "repeatable read" also makes more isolation guarantees than the + * standard requires. According to the standard, this level prevents "dirty + * reads" and "nonrepeatable reads," but not "phantom reads." In postgres, + * it actually prevents all three. + * + * Third, "serializable" is only properly supported starting at postgres 9.1. + * If you request "serializable" isolation on an older backend, you will get + * the same isolation as in "repeatable read." It's better than the + * "repeatable read" defined in the SQL standard, but not a complete + * implementation of the standard's "serializable" isolation level. + * + * In general, a lower isolation level will allow more surprising interactions + * between ongoing transactions, but improve performance. A higher level + * gives you more protection from subtle concurrency bugs, but sometimes it + * may not be possible to complete your transaction without avoiding paradoxes + * in the data. In that case a transaction may fail, and the application will + * have to re-do the whole thing based on the latest state of the database. + * (If you want to retry your code in that situation, have a look at the + * transactor framework.) + * + * Study the levels and design your application with the right level in mind. + */ +enum isolation_level +{ + // PostgreSQL only has the better isolation levels. + // read_uncommitted, + + read_committed, + repeatable_read, + serializable, +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/largeobject b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/largeobject new file mode 100644 index 000000000..1f2f94790 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/largeobject @@ -0,0 +1,8 @@ +/** Large Objects interface. + * + * Supports direct access to large objects, as well as through I/O streams + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/largeobject.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/largeobject.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/largeobject.hxx new file mode 100644 index 000000000..ebafc51d8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/largeobject.hxx @@ -0,0 +1,735 @@ +/* Large Objects interface. Deprecated; use blob instead. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/largeobject instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_LARGEOBJECT +#define PQXX_H_LARGEOBJECT + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/dbtransaction.hxx" + + +namespace pqxx +{ +/// Identity of a large object. +/** @deprecated Use the @ref blob class instead. + * + * Encapsulates the identity of a large object. + * + * A largeobject must be accessed only from within a backend transaction, but + * the object's identity remains valid as long as the object exists. + */ +class PQXX_LIBEXPORT largeobject +{ +public: + using size_type = large_object_size_type; + + /// Refer to a nonexistent large object (similar to what a null pointer + /// does). + [[deprecated("Use blob instead.")]] largeobject() noexcept = default; + + /// Create new large object. + /** @param t Backend transaction in which the object is to be created. + */ + [[deprecated("Use blob instead.")]] explicit largeobject(dbtransaction &t); + + /// Wrap object with given oid. + /** Convert combination of a transaction and object identifier into a + * large object identity. Does not affect the database. + * @param o Object identifier for the given object. + */ + [[deprecated("Use blob instead.")]] explicit largeobject(oid o) noexcept : + m_id{o} + {} + + /// Import large object from a local file. + /** Creates a large object containing the data found in the given file. + * @param t Backend transaction in which the large object is to be created. + * @param file A filename on the client program's filesystem. + */ + [[deprecated("Use blob instead.")]] largeobject( + dbtransaction &t, std::string_view file); + + /// Take identity of an opened large object. + /** Copy identity of already opened large object. Note that this may be done + * as an implicit conversion. + * @param o Already opened large object to copy identity from. + */ + [[deprecated("Use blob instead.")]] largeobject( + largeobjectaccess const &o) noexcept; + + /// Object identifier. + /** The number returned by this function identifies the large object in the + * database we're connected to (or oid_none is returned if we refer to the + * null object). + */ + [[nodiscard]] oid id() const noexcept { return m_id; } + + /** + * @name Identity comparisons + * + * These operators compare the object identifiers of large objects. This has + * nothing to do with the objects' actual contents; use them only for keeping + * track of containers of references to large objects and such. + */ + //@{ + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator==(largeobject const &other) const + { + return m_id == other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator!=(largeobject const &other) const + { + return m_id != other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator<=(largeobject const &other) const + { + return m_id <= other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator>=(largeobject const &other) const + { + return m_id >= other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator<(largeobject const &other) const + { + return m_id < other.m_id; + } + /// Compare object identities + /** @warning Only valid between large objects in the same database. */ + [[nodiscard]] bool operator>(largeobject const &other) const + { + return m_id > other.m_id; + } + //@} + + /// Export large object's contents to a local file + /** Writes the data stored in the large object to the given file. + * @param t Transaction in which the object is to be accessed + * @param file A filename on the client's filesystem + */ + void to_file(dbtransaction &t, std::string_view file) const; + + /// Delete large object from database + /** Unlike its low-level equivalent cunlink, this will throw an exception if + * deletion fails. + * @param t Transaction in which the object is to be deleted + */ + void remove(dbtransaction &t) const; + +protected: + PQXX_PURE static internal::pq::PGconn * + raw_connection(dbtransaction const &T); + + PQXX_PRIVATE std::string reason(connection const &, int err) const; + +private: + oid m_id = oid_none; +}; + + +/// Accessor for large object's contents. +/** @deprecated Use the `blob` class instead. + */ +class PQXX_LIBEXPORT largeobjectaccess : private largeobject +{ +public: + using largeobject::size_type; + using off_type = size_type; + using pos_type = size_type; + + /// Open mode: `in`, `out` (can be combined using "bitwise or"). + /** According to the C++ standard, these should be in `std::ios_base`. We + * take them from derived class `std::ios` instead, which is easier on the + * eyes. + * + * Historical note: taking it from std::ios was originally a workaround for a + * problem with gcc 2.95. + */ + using openmode = std::ios::openmode; + + /// Default open mode: in, out, binary. + static constexpr auto default_mode{ + std::ios::in | std::ios::out | std::ios::binary}; + + /// Seek direction: `beg`, `cur`, `end`. + using seekdir = std::ios::seekdir; + + /// Create new large object and open it. + /** + * @param t Backend transaction in which the object is to be created. + * @param mode Access mode, defaults to ios_base::in | ios_base::out | + * ios_base::binary. + */ + [[deprecated("Use blob instead.")]] explicit largeobjectaccess( + dbtransaction &t, openmode mode = default_mode); + + /// Open large object with given oid. + /** Convert combination of a transaction and object identifier into a + * large object identity. Does not affect the database. + * @param t Transaction in which the object is to be accessed. + * @param o Object identifier for the given object. + * @param mode Access mode, defaults to ios_base::in | ios_base::out | + * ios_base::binary. + */ + [[deprecated("Use blob instead.")]] largeobjectaccess( + dbtransaction &t, oid o, openmode mode = default_mode); + + /// Open given large object. + /** Open a large object with the given identity for reading and/or writing. + * @param t Transaction in which the object is to be accessed. + * @param o Identity for the large object to be accessed. + * @param mode Access mode, defaults to ios_base::in | ios_base::out | + * ios_base::binary. + */ + [[deprecated("Use blob instead.")]] largeobjectaccess( + dbtransaction &t, largeobject o, openmode mode = default_mode); + + /// Import large object from a local file and open it. + /** Creates a large object containing the data found in the given file. + * @param t Backend transaction in which the large object is to be created. + * @param file A filename on the client program's filesystem. + * @param mode Access mode, defaults to ios_base::in | ios_base::out. + */ + [[deprecated("Use blob instead.")]] largeobjectaccess( + dbtransaction &t, std::string_view file, openmode mode = default_mode); + + ~largeobjectaccess() noexcept { close(); } + + /// Object identifier. + /** The number returned by this function uniquely identifies the large object + * in the context of the database we're connected to. + */ + using largeobject::id; + + /// Export large object's contents to a local file. + /** Writes the data stored in the large object to the given file. + * @param file A filename on the client's filesystem. + */ + void to_file(std::string_view file) const + { + largeobject::to_file(m_trans, file); + } + + using largeobject::to_file; + + /** + * @name High-level access to object contents. + */ + //@{ + /// Write data to large object. + /** @warning The size of a write is currently limited to 2GB. + * + * @param buf Data to write. + * @param len Number of bytes from Buf to write. + */ + void write(char const buf[], std::size_t len); + + /// Write string to large object. + /** If not all bytes could be written, an exception is thrown. + * @param buf Data to write; no terminating zero is written. + */ + void write(std::string_view buf) { write(std::data(buf), std::size(buf)); } + + /// Read data from large object. + /** Throws an exception if an error occurs while reading. + * @param buf Location to store the read data in. + * @param len Number of bytes to try and read. + * @return Number of bytes read, which may be less than the number requested + * if the end of the large object is reached. + */ + size_type read(char buf[], std::size_t len); + + /// Seek in large object's data stream. + /** Throws an exception if an error occurs. + * @return The new position in the large object + */ + size_type seek(size_type dest, seekdir dir); + + /// Report current position in large object's data stream. + /** Throws an exception if an error occurs. + * @return The current position in the large object. + */ + [[nodiscard]] size_type tell() const; + //@} + + /** + * @name Low-level access to object contents. + * + * These functions provide a more "C-like" access interface, returning + * special values instead of throwing exceptions on error. These functions + * are generally best avoided in favour of the high-level access functions, + * which behave more like C++ functions should. + * + * Due to libpq's underlying API, some operations are limited to "int" + * sizes, typically 2 GB, even though a large object can grow much larger. + */ + //@{ + /// Seek in large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @param dest Offset to go to. + * @param dir Origin to which dest is relative: ios_base::beg (from beginning + * of the object), ios_base::cur (from current access position), or + * ios_base;:end (from end of object). + * @return New position in large object, or -1 if an error occurred. + */ + pos_type cseek(off_type dest, seekdir dir) noexcept; + + /// Write to large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @param buf Data to write. + * @param len Number of bytes to write. + * @return Number of bytes actually written, or -1 if an error occurred. + */ + off_type cwrite(char const buf[], std::size_t len) noexcept; + + /// Read from large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @param buf Area where incoming bytes should be stored. + * @param len Number of bytes to read. + * @return Number of bytes actually read, or -1 if an error occurred.. + */ + off_type cread(char buf[], std::size_t len) noexcept; + + /// Report current position in large object's data stream. + /** Does not throw exception in case of error; inspect return value and + * `errno` instead. + * @return Current position in large object, of -1 if an error occurred. + */ + [[nodiscard]] pos_type ctell() const noexcept; + //@} + + /** + * @name Error/warning output + */ + //@{ + /// Issue message to transaction's notice processor. + void process_notice(zview) noexcept; + //@} + + using largeobject::remove; + + using largeobject::operator==; + using largeobject::operator!=; + using largeobject::operator<; + using largeobject::operator<=; + using largeobject::operator>; + using largeobject::operator>=; + + largeobjectaccess() = delete; + largeobjectaccess(largeobjectaccess const &) = delete; + largeobjectaccess operator=(largeobjectaccess const &) = delete; + +private: + PQXX_PRIVATE std::string reason(int err) const; + internal::pq::PGconn *raw_connection() const + { + return largeobject::raw_connection(m_trans); + } + + PQXX_PRIVATE void open(openmode mode); + void close() noexcept; + + dbtransaction &m_trans; + int m_fd = -1; +}; + + +/// Streambuf to use large objects in standard I/O streams. +/** @deprecated Access large objects directly using the @ref blob class. + * + * The standard streambuf classes provide uniform access to data storage such + * as files or string buffers, so they can be accessed using standard input or + * output streams. This streambuf implementation provided similar access to + * large objects, so they could be read and written using the same stream + * classes. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class largeobject_streambuf : public std::basic_streambuf +{ + using size_type = largeobject::size_type; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + using openmode = largeobjectaccess::openmode; + using seekdir = largeobjectaccess::seekdir; + + /// Default open mode: in, out, binary. + static constexpr auto default_mode{ + std::ios::in | std::ios::out | std::ios::binary}; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + [[deprecated("Use blob instead.")]] largeobject_streambuf( + dbtransaction &t, largeobject o, openmode mode = default_mode, + size_type buf_size = 512) : + m_bufsize{buf_size}, m_obj{t, o, mode}, m_g{nullptr}, m_p{nullptr} + { + initialize(mode); + } +#include "pqxx/internal/ignore-deprecated-post.hxx" + + [[deprecated("Use blob instead.")]] largeobject_streambuf( + dbtransaction &t, oid o, openmode mode = default_mode, + size_type buf_size = 512) : + m_bufsize{buf_size}, m_obj{t, o, mode}, m_g{nullptr}, m_p{nullptr} + { + initialize(mode); + } + + virtual ~largeobject_streambuf() noexcept + { + delete[] m_p; + delete[] m_g; + } + + /// For use by large object stream classes. + void process_notice(zview const &s) { m_obj.process_notice(s); } + +protected: + virtual int sync() override + { + // setg() sets eback, gptr, egptr. + this->setg(this->eback(), this->eback(), this->egptr()); + return overflow(eof()); + } + + virtual pos_type seekoff(off_type offset, seekdir dir, openmode) override + { + return adjust_eof(m_obj.cseek(largeobjectaccess::off_type(offset), dir)); + } + + virtual pos_type seekpos(pos_type pos, openmode) override + { + largeobjectaccess::pos_type const newpos{ + m_obj.cseek(largeobjectaccess::off_type(pos), std::ios::beg)}; + return adjust_eof(newpos); + } + + virtual int_type overflow(int_type ch) override + { + auto *const pp{this->pptr()}; + if (pp == nullptr) + return eof(); + auto *const pb{this->pbase()}; + int_type res{0}; + + if (pp > pb) + { + auto const write_sz{pp - pb}; + auto const written_sz{ + m_obj.cwrite(pb, static_cast(pp - pb))}; + if (internal::cmp_less_equal(written_sz, 0)) + throw internal_error{ + "pqxx::largeobject: write failed " + "(is transaction still valid on write or flush?), " + "libpq reports error"}; + else if (write_sz != written_sz) + throw internal_error{ + "pqxx::largeobject: write failed " + "(is transaction still valid on write or flush?), " + + std::to_string(written_sz) + "/" + std::to_string(write_sz) + + " bytes written"}; + auto const out{adjust_eof(written_sz)}; + + if constexpr (std::is_arithmetic_v) + res = check_cast(out, "largeobject position"sv); + else + res = int_type(out); + } + this->setp(m_p, m_p + m_bufsize); + + // Write that one more character, if it's there. + if (ch != eof()) + { + *this->pptr() = static_cast(ch); + this->pbump(1); + } + return res; + } + + virtual int_type overflow() { return overflow(eof()); } + + virtual int_type underflow() override + { + if (this->gptr() == nullptr) + return eof(); + auto *const eb{this->eback()}; + auto const res{adjust_eof( + m_obj.cread(this->eback(), static_cast(m_bufsize)))}; + this->setg( + eb, eb, eb + (res == eof() ? 0 : static_cast(res))); + return (res == eof() or res == 0) ? eof() : traits_type::to_int_type(*eb); + } + +private: + /// Shortcut for traits_type::eof(). + static int_type eof() { return traits_type::eof(); } + + /// Helper: change error position of -1 to EOF (probably a no-op). + template static std::streampos adjust_eof(INTYPE pos) + { + bool const at_eof{pos == -1}; + if constexpr (std::is_arithmetic_v) + { + return check_cast( + (at_eof ? eof() : pos), "large object seek"sv); + } + else + { + return std::streampos(at_eof ? eof() : pos); + } + } + + void initialize(openmode mode) + { + if ((mode & std::ios::in) != 0) + { + m_g = new char_type[unsigned(m_bufsize)]; + this->setg(m_g, m_g, m_g); + } + if ((mode & std::ios::out) != 0) + { + m_p = new char_type[unsigned(m_bufsize)]; + this->setp(m_p, m_p + m_bufsize); + } + } + + size_type const m_bufsize; + largeobjectaccess m_obj; + + /// Get & put buffers. + char_type *m_g, *m_p; +}; + + +/// Input stream that gets its data from a large object. +/** @deprecated Access large objects directly using the @ref blob class. + * + * This class worked like any other istream, but to read data from a large + * object. It supported all formatting and streaming operations of + * `std::istream`. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class basic_ilostream : public std::basic_istream +{ + using super = std::basic_istream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Create a basic_ilostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_ilostream( + dbtransaction &t, largeobject o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::in | std::ios::binary, buf_size} + { + super::init(&m_buf); + } +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /// Create a basic_ilostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Identifier of a large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_ilostream( + dbtransaction &t, oid o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::in | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + +private: + largeobject_streambuf m_buf; +}; + +using ilostream = basic_ilostream; + + +/// Output stream that writes data back to a large object. +/** @deprecated Access large objects directly using the @ref blob class. + * + * This worked like any other ostream, but to write data to a large object. + * It supported all formatting and streaming operations of `std::ostream`. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class basic_olostream : public std::basic_ostream +{ + using super = std::basic_ostream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// Create a basic_olostream. + /** + * @param t transaction in which this stream is to exist. + * @param o a large object to access. + * @param buf_size size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_olostream( + dbtransaction &t, largeobject o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /// Create a basic_olostream. + /** + * @param t transaction in which this stream is to exist. + * @param o a large object to access. + * @param buf_size size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_olostream( + dbtransaction &t, oid o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{t, o, std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + + ~basic_olostream() + { + try + { + m_buf.pubsync(); + m_buf.pubsync(); + } + catch (std::exception const &e) + { + m_buf.process_notice(e.what()); + } + } + +private: + largeobject_streambuf m_buf; +}; + +using olostream = basic_olostream; + + +/// Stream that reads and writes a large object. +/** @deprecated Access large objects directly using the @ref blob class. + * + * This worked like a std::iostream, but to read data from, or write data to, a + * large object. It supported all formatting and streaming operations of + * `std::iostream`. + * + * This functionality was considered too fragile and complex, so it has been + * replaced with a single, much simpler class. + */ +template> +class basic_lostream : public std::basic_iostream +{ + using super = std::basic_iostream; + +public: + using char_type = CHAR; + using traits_type = TRAITS; + using int_type = typename traits_type::int_type; + using pos_type = typename traits_type::pos_type; + using off_type = typename traits_type::off_type; + + /// Create a basic_lostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_lostream( + dbtransaction &t, largeobject o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{ + t, o, std::ios::in | std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + + /// Create a basic_lostream. + /** + * @param t Transaction in which this stream is to exist. + * @param o Large object to access. + * @param buf_size Size of buffer to use internally (optional). + */ + [[deprecated("Use blob instead.")]] basic_lostream( + dbtransaction &t, oid o, largeobject::size_type buf_size = 512) : + super{nullptr}, + m_buf{ + t, o, std::ios::in | std::ios::out | std::ios::binary, buf_size} + { + super::init(&m_buf); + } + + ~basic_lostream() + { + try + { + m_buf.pubsync(); + m_buf.pubsync(); + } + catch (std::exception const &e) + { + m_buf.process_notice(e.what()); + } + } + +private: + largeobject_streambuf m_buf; +}; + +using lostream = basic_lostream; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/nontransaction b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/nontransaction new file mode 100644 index 000000000..bb5b79724 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/nontransaction @@ -0,0 +1,8 @@ +/** pqxx::nontransaction class. + * + * pqxx::nontransaction provides nontransactional database access. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/nontransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/nontransaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/nontransaction.hxx new file mode 100644 index 000000000..c50715594 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/nontransaction.hxx @@ -0,0 +1,76 @@ +/* Definition of the pqxx::nontransaction class. + * + * pqxx::nontransaction provides nontransactional database access + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/nontransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_NONTRANSACTION +#define PQXX_H_NONTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/connection.hxx" +#include "pqxx/result.hxx" +#include "pqxx/transaction.hxx" + +namespace pqxx +{ +using namespace std::literals; + +/// Simple "transaction" class offering no transactional integrity. +/** + * @ingroup transactions + * + * nontransaction, like transaction or any other transaction_base-derived + * class, provides access to a database through a connection. Unlike its + * siblings, however, nontransaction does not maintain any kind of + * transactional integrity. This may be useful eg. for read-only access to the + * database that does not require a consistent, atomic view on its data; or for + * operations that are not allowed within a backend transaction, such as + * creating tables. + * + * For queries that update the database, however, a real transaction is likely + * to be faster unless the transaction consists of only a single record update. + * + * Also, you can keep a nontransaction open for as long as you like. Actual + * back-end transactions are limited in lifespan, and will sometimes fail just + * because they took too long to execute or were left idle for too long. This + * will not happen with a nontransaction (although the connection may still + * time out, e.g. when the network is unavailable for a very long time). + * + * Any query executed in a nontransaction is committed immediately, and neither + * commit() nor abort() has any effect. + * + * Database features that require a backend transaction, such as cursors or + * large objects, will not work in a nontransaction. + */ +class PQXX_LIBEXPORT nontransaction final : public transaction_base +{ +public: + /// Constructor. + /** Create a "dummy" transaction. + * @param c Connection in which this "transaction" will operate. + * @param tname Optional tname for the transaction, beginning with a letter + * and containing only letters and digits. + */ + nontransaction(connection &c, std::string_view tname = ""sv) : + transaction_base{c, tname, std::shared_ptr{}} + { + register_transaction(); + } + + virtual ~nontransaction() override { close(); } + +private: + virtual void do_commit() override {} +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/notification b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/notification new file mode 100644 index 000000000..a0bd1c73e --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/notification @@ -0,0 +1,8 @@ +/** pqxx::notification_receiver functor interface. + * + * pqxx::notification_receiver handles incoming notifications. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/notification.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/notification.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/notification.hxx new file mode 100644 index 000000000..b59b8567a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/notification.hxx @@ -0,0 +1,94 @@ +/* Definition of the pqxx::notification_receiver functor interface. + * + * pqxx::notification_receiver handles incoming notifications. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/notification instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_NOTIFICATION +#define PQXX_H_NOTIFICATION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/types.hxx" + + +namespace pqxx +{ +/// "Observer" base class for notifications. +/** @addtogroup notification Notifications and Receivers + * + * To listen on a notification issued using the NOTIFY command, derive your own + * class from notification_receiver and define its function-call operator to + * perform whatever action you wish to take when the given notification + * arrives. Then create an object of that class and pass it to your connection. + * DO NOT use raw SQL to listen for notifications, or your attempts to listen + * won't be resumed when a connection fails--and you'll have no way to notice. + * + * Notifications never arrive inside a transaction, not even in a + * nontransaction. Therefore, you are free to open a transaction of your own + * inside your receiver's function invocation operator. + * + * Notifications you are listening for may arrive anywhere within libpqxx code, + * but be aware that **PostgreSQL defers notifications occurring inside + * transactions.** (This was done for excellent reasons; just think about what + * happens if the transaction where you happen to handle an incoming + * notification is later rolled back for other reasons). So if you're keeping + * a transaction open, don't expect any of your receivers on the same + * connection to be notified. + * + * (For very similar reasons, outgoing notifications are also not sent until + * the transaction that sends them commits.) + * + * Multiple receivers on the same connection may listen on a notification of + * the same name. An incoming notification is processed by invoking all + * receivers (zero or more) of the same name. + */ +class PQXX_LIBEXPORT PQXX_NOVTABLE notification_receiver +{ +public: + /// Register the receiver with a connection. + /** + * @param c Connnection to operate on. + * @param channel Name of the notification to listen for. + */ + notification_receiver(connection &c, std::string_view channel); + /// Register the receiver with a connection. + notification_receiver(notification_receiver const &) = delete; + /// Register the receiver with a connection. + notification_receiver &operator=(notification_receiver const &) = delete; + /// Deregister the receiver. + virtual ~notification_receiver(); + + /// The channel that this receiver listens on. + [[nodiscard]] std::string const &channel() const & { return m_channel; } + + // TODO: Change API to take payload as zview instead of string ref. + /// Overridable: action to invoke when notification arrives. + /** + * @param payload An optional string that may have been passed to the NOTIFY + * command. + * @param backend_pid Process ID of the database backend process that served + * our connection when the notification arrived. The actual process ID + * behind the connection may have changed by the time this method is called. + */ + virtual void operator()(std::string const &payload, int backend_pid) = 0; + +protected: + connection &conn() const noexcept { return m_conn; } + +private: + connection &m_conn; + std::string m_channel; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/params b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/params new file mode 100644 index 000000000..4098782aa --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/params @@ -0,0 +1,8 @@ +/** Helper classes for passing statement parameters. + * + * Use these for prepared statements and parameterised statements. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/params.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/params.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/params.hxx new file mode 100644 index 000000000..2d29cdfed --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/params.hxx @@ -0,0 +1,383 @@ +/* Helpers for prepared statements and parameterised statements. + * + * See the connection class for more about such statements. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_PARAMS +#define PQXX_H_PARAMS + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/statement_parameters.hxx" +#include "pqxx/types.hxx" + + +/// @deprecated The new @ref params class replaces all of this. +namespace pqxx::prepare +{ +/// Pass a number of statement parameters only known at runtime. +/** @deprecated Use @ref params instead. + * + * When you call any of the `exec_params` functions, the number of arguments + * is normally known at compile time. This helper function supports the case + * where it is not. + * + * Use this function to pass a variable number of parameters, based on a + * sequence ranging from `begin` to `end` exclusively. + * + * The technique combines with the regular static parameters. You can use it + * to insert dynamic parameter lists in any place, or places, among the call's + * parameters. You can even insert multiple dynamic sequences. + * + * @param begin A pointer or iterator for iterating parameters. + * @param end A pointer or iterator for iterating parameters. + * @return An object representing the parameters. + */ +template +[[deprecated("Use the params class instead.")]] constexpr inline auto +make_dynamic_params(IT begin, IT end) +{ + return pqxx::internal::dynamic_params(begin, end); +} + + +/// Pass a number of statement parameters only known at runtime. +/** @deprecated Use @ref params instead. + * + * When you call any of the `exec_params` functions, the number of arguments + * is normally known at compile time. This helper function supports the case + * where it is not. + * + * Use this function to pass a variable number of parameters, based on a + * container of parameter values. + * + * The technique combines with the regular static parameters. You can use it + * to insert dynamic parameter lists in any place, or places, among the call's + * parameters. You can even insert multiple dynamic containers. + * + * @param container A container of parameter values. + * @return An object representing the parameters. + */ +template +[[deprecated("Use the params class instead.")]] constexpr inline auto +make_dynamic_params(C const &container) +{ + using IT = typename C::const_iterator; +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return pqxx::internal::dynamic_params{container}; +#include "pqxx/internal/ignore-deprecated-post.hxx" +} + + +/// Pass a number of statement parameters only known at runtime. +/** @deprecated Use @ref params instead. + * + * When you call any of the `exec_params` functions, the number of arguments + * is normally known at compile time. This helper function supports the case + * where it is not. + * + * Use this function to pass a variable number of parameters, based on a + * container of parameter values. + * + * The technique combines with the regular static parameters. You can use it + * to insert dynamic parameter lists in any place, or places, among the call's + * parameters. You can even insert multiple dynamic containers. + * + * @param container A container of parameter values. + * @param accessor For each parameter `p`, pass `accessor(p)`. + * @return An object representing the parameters. + */ +template +[[deprecated("Use the params class instead.")]] constexpr inline auto +make_dynamic_params(C &container, ACCESSOR accessor) +{ + using IT = decltype(std::begin(container)); +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return pqxx::internal::dynamic_params{container, accessor}; +#include "pqxx/internal/ignore-deprecated-post.hxx" +} +} // namespace pqxx::prepare + + +namespace pqxx +{ +/// Generate parameter placeholders for use in an SQL statement. +/** When you want to pass parameters to a prepared statement or a parameterised + * statement, you insert placeholders into the SQL. During invocation, the + * database replaces those with the respective parameter values you passed. + * + * The placeholders look like `$1` (for the first parameter value), `$2` (for + * the second), and so on. You can just write those directly in your + * statement. But for those rare cases where it becomes difficult to track + * which number a placeholder should have, you can use a `placeholders` object + * to count and generate them in order. + */ +template class placeholders +{ +public: + /// Maximum number of parameters we support. + static inline constexpr unsigned int max_params{ + (std::numeric_limits::max)()}; + + placeholders() + { + static constexpr auto initial{"$1\0"sv}; + initial.copy(std::data(m_buf), std::size(initial)); + } + + /// Read an ephemeral version of the current placeholder text. + /** @warning Changing the current placeholder number will overwrite this. + * Use the view immediately, or lose it. + */ + constexpr zview view() const &noexcept + { + return zview{std::data(m_buf), m_len}; + } + + /// Read the current placeholder text, as a `std::string`. + /** This will be slightly slower than converting to a `zview`. With most + * C++ implementations however, until you get into ridiculous numbers of + * parameters, the string will benefit from the Short String Optimization, or + * SSO. + */ + std::string get() const { return std::string(std::data(m_buf), m_len); } + + /// Move on to the next parameter. + void next() & + { + if (m_current >= max_params) + throw range_error{pqxx::internal::concat( + "Too many parameters in one statement: limit is ", max_params, ".")}; + ++m_current; + if (m_current % 10 == 0) + { + // Carry the 1. Don't get too clever for this relatively rare + // case, just rewrite the entire number. Leave the $ in place + // though. + char *const data{std::data(m_buf)}; + char *const end{string_traits::into_buf( + data + 1, data + std::size(m_buf), m_current)}; + // (Subtract because we don't include the trailing zero.) + m_len = check_cast(end - data, "placeholders counter") - 1; + } + else + { + PQXX_LIKELY + // Shortcut for the common case: just increment that last digit. + ++m_buf[m_len - 1]; + } + } + + /// Return the current placeholder number. The initial placeholder is 1. + COUNTER count() const noexcept { return m_current; } + +private: + /// Current placeholder number. Starts at 1. + COUNTER m_current = 1; + + /// Length of the current placeholder string, not including trailing zero. + COUNTER m_len = 2; + + /// Text buffer where we render the placeholders, with a trailing zero. + /** We keep reusing this for every subsequent placeholder, just because we + * don't like string allocations. + * + * Maximum length is the maximum base-10 digits that COUNTER can fully + * represent, plus 1 more for the extra digit that it can only partially + * fill up, plus room for the dollar sign and the trailing zero. + */ + std::array::digits10 + 3> m_buf; +}; + + +/// Build a parameter list for a parameterised or prepared statement. +/** When calling a parameterised statement or a prepared statement, you can + * pass parameters into the statement directly in the invocation, as + * additional arguments to `exec_prepared` or `exec_params`. But in + * complex cases, sometimes that's just not convenient. + * + * In those situations, you can create a `params` and append your parameters + * into that, one by one. Then you pass the `params` to `exec_prepared` or + * `exec_params`. + * + * Combinations also work: if you have a `params` containing a string + * parameter, and you call `exec_params` with an `int` argument followed by + * your `params`, you'll be passing the `int` as the first parameter and + * the string as the second. You can even insert a `params` in a `params`, + * or pass two `params` objects to a statement. + */ +class PQXX_LIBEXPORT params +{ +public: + params() = default; + + /// Pre-populate a `params` with `args`. Feel free to add more later. + template constexpr params(Args &&...args) + { + reserve(sizeof...(args)); + append_pack(std::forward(args)...); + } + + /// Pre-allocate room for at least `n` parameters. + /** This is not needed, but it may improve efficiency. + * + * Reserve space if you're going to add parameters individually, and you've + * got some idea of how many there are going to be. It may save some + * memory re-allocations. + */ + void reserve(std::size_t n) &; + + // C++20: constexpr. + /// Get the number of parameters currently in this `params`. + [[nodiscard]] auto size() const noexcept { return m_params.size(); } + + // C++20: Use the vector's ssize() directly and go noexcept+constexpr. + /// Get the number of parameters (signed). + /** Unlike `size()`, this is not yet `noexcept`. That's because C++17's + * `std::vector` does not have a `ssize()` member function. These member + * functions are `noexcept`, but `std::size()` and `std::ssize()` are + * not. + */ + [[nodiscard]] auto ssize() const { return pqxx::internal::ssize(m_params); } + + /// Append a null value. + void append() &; + + /// Append a non-null zview parameter. + /** The underlying data must stay valid for as long as the `params` + * remains active. + */ + void append(zview) &; + + /// Append a non-null string parameter. + /** Copies the underlying data into internal storage. For best efficiency, + * use the @ref zview variant if you can, or `std::move()` + */ + void append(std::string const &) &; + + /// Append a non-null string parameter. + void append(std::string &&) &; + + /// Append a non-null binary parameter. + /** The underlying data must stay valid for as long as the `params` + * remains active. + */ + void append(std::basic_string_view) &; + + /// Append a non-null binary parameter. + /** Copies the underlying data into internal storage. For best efficiency, + * use the `std::basic_string_view` variant if you can, or + * `std::move()`. + */ + void append(std::basic_string const &) &; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Append a non-null binary parameter. + /** The `data` object must stay in place and unchanged, for as long as the + * `params` remains active. + */ + template void append(DATA const &data) & + { + append( + std::basic_string_view{std::data(data), std::size(data)}); + } +#endif // PQXX_HAVE_CONCEPTS + + /// Append a non-null binary parameter. + void append(std::basic_string &&) &; + + /// @deprecated Append binarystring parameter. + /** The binarystring must stay valid for as long as the `params` remains + * active. + */ + void append(binarystring const &value) &; + + /// Append all parameters from value. + template + void append(pqxx::internal::dynamic_params const &value) & + { + for (auto ¶m : value) append(value.access(param)); + } + + void append(params const &value) &; + + void append(params &&value) &; + + /// Append a non-null parameter, converting it to its string + /// representation. + template void append(TYPE const &value) & + { + // TODO: Pool storage for multiple string conversions in one buffer? + if constexpr (nullness>::always_null) + { + ignore_unused(value); + m_params.emplace_back(); + } + else if (is_null(value)) + { + m_params.emplace_back(); + } + else + { + m_params.emplace_back(entry{to_string(value)}); + } + } + + /// Append all elements of `range` as parameters. + template void append_multi(RANGE const &range) & + { +#if defined(PQXX_HAVE_CONCEPTS) + if constexpr (std::ranges::sized_range) + reserve(std::size(*this) + std::size(range)); +#endif + for (auto &value : range) append(value); + } + + /// For internal use: Generate a `params` object for use in calls. + /** The params object encapsulates the pointers which we will need to pass + * to libpq when calling a parameterised or prepared statement. + * + * The pointers in the params will refer to storage owned by either the + * params object, or the caller. This is not a problem because a + * `c_params` object is guaranteed to live only while the call is going on. + * As soon as we climb back out of that call tree, we're done with that + * data. + */ + pqxx::internal::c_params make_c_params() const; + +private: + /// Recursively append a pack of params. + template + void append_pack(Arg &&arg, More &&...args) + { + this->append(std::forward(arg)); + // Recurse for remaining args. + append_pack(std::forward(args)...); + } + + /// Terminating case: append an empty parameter pack. It's not hard BTW. + constexpr void append_pack() noexcept {} + + // The way we store a parameter depends on whether it's binary or text + // (most types are text), and whether we're responsible for storing the + // contents. + using entry = std::variant< + std::nullptr_t, zview, std::string, std::basic_string_view, + std::basic_string>; + std::vector m_params; + + static constexpr std::string_view s_overflow{ + "Statement parameter length overflow."sv}; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/pipeline b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/pipeline new file mode 100644 index 000000000..bf828843a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/pipeline @@ -0,0 +1,8 @@ +/** pqxx::pipeline class. + * + * Throughput-optimized query interface. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/pipeline.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/pipeline.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/pipeline.hxx new file mode 100644 index 000000000..049dcdd58 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/pipeline.hxx @@ -0,0 +1,237 @@ +/* Definition of the pqxx::pipeline class. + * + * Throughput-optimized mechanism for executing queries. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/pipeline instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_PIPELINE +#define PQXX_H_PIPELINE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/transaction_base.hxx" + + +namespace pqxx +{ +// TODO: libpq 14 introduced a similar "pipeline mode." Can we use that? + +/// Processes several queries in FIFO manner, optimized for high throughput. +/** Use a pipeline if you want to keep doing useful work while your queries are + * executing. Result retrieval is decoupled from execution request; queries + * "go in at the front" and results "come out the back." + * + * Actually, you can retrieve the results in any order if you want, but it may + * lead to surprising "time travel" effects if any of the queries fails. In + * particular, syntax errors in the queries can confuse things and show up too + * early in the stream of results. + * + * Generally, if any of the queries fails, it will throw an exception at the + * point where you request its result. But it may happen earlier, especially + * if you request results out of chronological order. + * + * @warning While a pipeline is active, you cannot execute queries, open + * streams, etc. on the same transaction. A transaction can have at most one + * object of a type derived from @ref pqxx::transaction_focus active on it at a + * time. + */ +class PQXX_LIBEXPORT pipeline : public transaction_focus +{ +public: + /// Identifying numbers for queries. + using query_id = long; + + pipeline(pipeline const &) = delete; + pipeline &operator=(pipeline const &) = delete; + + /// Start a pipeline. + explicit pipeline(transaction_base &t) : transaction_focus{t, s_classname} + { + init(); + } + /// Start a pipeline. Assign it a name, for more helpful error messages. + pipeline(transaction_base &t, std::string_view tname) : + transaction_focus{t, s_classname, tname} + { + init(); + } + + /// Close the pipeline. + ~pipeline() noexcept; + + /// Add query to the pipeline. + /** Queries accumulate in the pipeline, which sends them to the backend in a + * batch separated by semicolons. The queries you insert must not use this + * trick themselves, or the pipeline will get hopelessly confused! + * + * @return Identifier for this query, unique only within this pipeline. + */ + query_id insert(std::string_view) &; + + /// Wait for all ongoing or pending operations to complete, and detach. + /** Detaches from the transaction when done. + * + * This does not produce the queries' results, so it may not report any + * errors which may have occurred in their execution. To be sure that your + * statements succeeded, call @ref retrieve until the pipeline is empty. + */ + void complete(); + + /// Forget all ongoing or pending operations and retrieved results. + /** Queries already sent to the backend may still be completed, depending + * on implementation and timing. + * + * Any error state (unless caused by an internal error) will also be cleared. + * This is mostly useful in a nontransaction, since a backend transaction is + * aborted automatically when an error occurs. + * + * Detaches from the transaction when done. + */ + void flush(); + + /// Cancel ongoing query, if any. + /** May cancel any or all of the queries that have been inserted at this + * point whose results have not yet been retrieved. If the pipeline lives in + * a backend transaction, that transaction may be left in a nonfunctional + * state in which it can only be aborted. + * + * Therefore, either use this function in a nontransaction, or abort the + * transaction after calling it. + */ + void cancel(); + + /// Is result for given query available? + [[nodiscard]] bool is_finished(query_id) const; + + /// Retrieve result for given query. + /** If the query failed for whatever reason, this will throw an exception. + * The function will block if the query has not finished yet. + * @warning If results are retrieved out-of-order, i.e. in a different order + * than the one in which their queries were inserted, errors may "propagate" + * to subsequent queries. + */ + result retrieve(query_id qid) + { + return retrieve(m_queries.find(qid)).second; + } + + /// Retrieve oldest unretrieved result (possibly wait for one). + /** @return The query's identifier and its result set. */ + std::pair retrieve(); + + [[nodiscard]] bool empty() const noexcept { return std::empty(m_queries); } + + /// Set maximum number of queries to retain before issuing them to the + /// backend. + /** The pipeline will perform better if multiple queries are issued at once, + * but retaining queries until the results are needed (as opposed to issuing + * them to the backend immediately) may negate any performance benefits the + * pipeline can offer. + * + * Recommended practice is to set this value no higher than the number of + * queries you intend to insert at a time. + * @param retain_max A nonnegative "retention capacity;" passing zero will + * cause queries to be issued immediately + * @return Old retention capacity + */ + int retain(int retain_max = 2) &; + + + /// Resume retained query emission. Harmless when not needed. + void resume() &; + +private: + struct PQXX_PRIVATE Query + { + explicit Query(std::string_view q) : + query{std::make_shared(q)} + {} + + std::shared_ptr query; + result res; + }; + + using QueryMap = std::map; + + void init(); + void attach(); + void detach(); + + /// Upper bound to query id's. + static constexpr query_id qid_limit() noexcept + { + // Parenthesise this to work around an eternal Visual C++ problem: + // Without the extra parentheses, unless NOMINMAX is defined, the + // preprocessor will mistake this "max" for its annoying built-in macro + // of the same name. + return (std::numeric_limits::max)(); + } + + /// Create new query_id. + PQXX_PRIVATE query_id generate_id(); + + bool have_pending() const noexcept + { + return m_issuedrange.second != m_issuedrange.first; + } + + PQXX_PRIVATE void issue(); + + /// The given query failed; never issue anything beyond that. + void set_error_at(query_id qid) noexcept + { + PQXX_UNLIKELY + if (qid < m_error) + m_error = qid; + } + + /// Throw pqxx::internal_error. + [[noreturn]] PQXX_PRIVATE void internal_error(std::string const &err); + + PQXX_PRIVATE bool obtain_result(bool expect_none = false); + + PQXX_PRIVATE void obtain_dummy(); + PQXX_PRIVATE void get_further_available_results(); + PQXX_PRIVATE void check_end_results(); + + /// Receive any results that happen to be available; it's not urgent. + PQXX_PRIVATE void receive_if_available(); + + /// Receive results, up to stop if possible. + PQXX_PRIVATE void receive(pipeline::QueryMap::const_iterator stop); + std::pair retrieve(pipeline::QueryMap::iterator); + + QueryMap m_queries; + std::pair m_issuedrange; + int m_retain = 0; + int m_num_waiting = 0; + query_id m_q_id = 0; + + /// Is there a "dummy query" pending? + bool m_dummy_pending = false; + + /// Point at which an error occurred; no results beyond it will be available + query_id m_error = qid_limit(); + + /// Encoding. + /** We store this in the object to avoid the risk of exceptions at awkward + * moments. + */ + internal::encoding_group m_encoding; + + static constexpr std::string_view s_classname{"pipeline"}; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/pqxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/pqxx new file mode 100644 index 000000000..17a8eaa9c --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/pqxx @@ -0,0 +1,28 @@ +/// Convenience header: include all libpqxx definitions. +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/array.hxx" +#include "pqxx/binarystring.hxx" +#include "pqxx/blob.hxx" +#include "pqxx/connection.hxx" +#include "pqxx/cursor.hxx" +#include "pqxx/errorhandler.hxx" +#include "pqxx/except.hxx" +#include "pqxx/largeobject.hxx" +#include "pqxx/nontransaction.hxx" +#include "pqxx/notification.hxx" +#include "pqxx/params.hxx" +#include "pqxx/pipeline.hxx" +#include "pqxx/prepared_statement.hxx" +#include "pqxx/result.hxx" +#include "pqxx/internal/result_iterator.hxx" +#include "pqxx/internal/result_iter.hxx" +#include "pqxx/robusttransaction.hxx" +#include "pqxx/row.hxx" +#include "pqxx/stream_from.hxx" +#include "pqxx/stream_to.hxx" +#include "pqxx/subtransaction.hxx" +#include "pqxx/transaction.hxx" +#include "pqxx/transactor.hxx" + +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/prepared_statement b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/prepared_statement new file mode 100644 index 000000000..674be7090 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/prepared_statement @@ -0,0 +1,3 @@ +/// @deprecated Include @c instead. + +#include "params.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/prepared_statement.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/prepared_statement.hxx new file mode 100644 index 000000000..674be7090 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/prepared_statement.hxx @@ -0,0 +1,3 @@ +/// @deprecated Include @c instead. + +#include "params.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/range b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/range new file mode 100644 index 000000000..11985eca4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/range @@ -0,0 +1,6 @@ +/** Client-side support for SQL range types. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/range.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/range.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/range.hxx new file mode 100644 index 000000000..dc480e4b7 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/range.hxx @@ -0,0 +1,515 @@ +#ifndef PQXX_H_RANGE +#define PQXX_H_RANGE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include + +#include "pqxx/internal/array-composite.hxx" +#include "pqxx/internal/concat.hxx" + +namespace pqxx +{ +/// An _unlimited_ boundary value to a @ref pqxx::range. +/** Use this as a lower or upper bound for a range if the range should extend + * to infinity on that side. + * + * An unlimited boundary is always inclusive of "infinity" values, if the + * range's value type supports them. + */ +struct no_bound +{ + template constexpr bool extends_down_to(TYPE const &) const + { + return true; + } + template constexpr bool extends_up_to(TYPE const &) const + { + return true; + } +}; + + +/// An _inclusive_ boundary value to a @ref pqxx::range. +/** Use this as a lower or upper bound for a range if the range should include + * the value. + */ +template class inclusive_bound +{ +public: + inclusive_bound() = delete; + explicit inclusive_bound(TYPE const &value) : m_value{value} + { + if (is_null(value)) + throw argument_error{"Got null value as an inclusive range bound."}; + } + + [[nodiscard]] constexpr TYPE const &get() const &noexcept { return m_value; } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as a lower bound, include value? + [[nodiscard]] bool extends_down_to(TYPE const &value) const + { + return not(value < m_value); + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as an upper bound, include value? + [[nodiscard]] bool extends_up_to(TYPE const &value) const + { + return not(m_value < value); + } + +private: + TYPE m_value; +}; + + +/// An _exclusive_ boundary value to a @ref pqxx::range. +/** Use this as a lower or upper bound for a range if the range should _not_ + * include the value. + */ +template class exclusive_bound +{ +public: + exclusive_bound() = delete; + explicit exclusive_bound(TYPE const &value) : m_value{value} + { + if (is_null(value)) + throw argument_error{"Got null value as an exclusive range bound."}; + } + + [[nodiscard]] constexpr TYPE const &get() const &noexcept { return m_value; } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as a lower bound, include value? + [[nodiscard]] bool extends_down_to(TYPE const &value) const + { + return m_value < value; + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Would this bound, as an upper bound, include value? + [[nodiscard]] bool extends_up_to(TYPE const &value) const + { + return value < m_value; + } + +private: + TYPE m_value; +}; + + +/// A range boundary value. +/** A range bound is either no bound at all; or an inclusive bound; or an + * exclusive bound. Pass one of the three to the constructor. + */ +template class range_bound +{ +public: + range_bound() = delete; + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(no_bound) : m_bound{} {} + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(inclusive_bound const &bound) : m_bound{bound} {} + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(exclusive_bound const &bound) : m_bound{bound} {} + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(range_bound const &) = default; + // TODO: constexpr and/or noexcept if underlying constructor supports it. + range_bound(range_bound &&) = default; + + // TODO: constexpr and/or noexcept if underlying operators support it. + bool operator==(range_bound const &rhs) const + { + if (this->is_limited()) + return ( + rhs.is_limited() and (this->is_inclusive() == rhs.is_inclusive()) and + (*this->value() == *rhs.value())); + else + return not rhs.is_limited(); + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + bool operator!=(range_bound const &rhs) const { return not(*this == rhs); } + range_bound &operator=(range_bound const &) = default; + range_bound &operator=(range_bound &&) = default; + + /// Is this a finite bound? + constexpr bool is_limited() const noexcept + { + return not std::holds_alternative(m_bound); + } + + /// Is this boundary an inclusive one? + constexpr bool is_inclusive() const noexcept + { + return std::holds_alternative>(m_bound); + } + + /// Is this boundary an exclusive one? + constexpr bool is_exclusive() const noexcept + { + return std::holds_alternative>(m_bound); + } + + // TODO: constexpr/noexcept if underlying function supports it. + /// Would this bound, as a lower bound, include `value`? + bool extends_down_to(TYPE const &value) const + { + return std::visit( + [&value](auto const &bound) { return bound.extends_down_to(value); }, + m_bound); + } + + // TODO: constexpr/noexcept if underlying function supports it. + /// Would this bound, as an upper bound, include `value`? + bool extends_up_to(TYPE const &value) const + { + return std::visit( + [&value](auto const &bound) { return bound.extends_up_to(value); }, + m_bound); + } + + /// Return bound value, or `nullptr` if it's not limited. + [[nodiscard]] constexpr TYPE const *value() const &noexcept + { + return std::visit( + [](auto const &bound) noexcept { + using bound_t = std::decay_t; + if constexpr (std::is_same_v) + return static_cast(nullptr); + else + return &bound.get(); + }, + m_bound); + } + +private: + std::variant, exclusive_bound> m_bound; +}; + + +// C++20: Concepts for comparisons, construction, etc. +/// A C++ equivalent to PostgreSQL's range types. +/** You can use this as a client-side representation of a "range" in SQL. + * + * PostgreSQL defines several range types, differing in the data type over + * which they range. You can also define your own range types. + * + * Usually you'll want the server to deal with ranges. But on occasions where + * you need to work with them client-side, you may want to use @ref + * pqxx::range. (In cases where all you do is pass them along to the server + * though, it's not worth the complexity. In that case you might as well treat + * ranges as just strings.) + * + * For documentation on PostgreSQL's range types, see: + * https://www.postgresql.org/docs/current/rangetypes.html + * + * The value type must be copyable and default-constructible, and support the + * less-than (`<`) and equals (`==`) comparisons. Value initialisation must + * produce a consistent value. + */ +template class range +{ +public: + /// Create a range. + /** For each of the two bounds, pass a @ref no_bound, @ref inclusive_bound, + * or + * @ref exclusive_bound. + */ + range(range_bound lower, range_bound upper) : + m_lower{lower}, m_upper{upper} + { + if ( + lower.is_limited() and upper.is_limited() and + (*upper.value() < *lower.value())) + throw range_error{internal::concat( + "Range's lower bound (", *lower.value(), + ") is greater than its upper bound (", *upper.value(), ").")}; + } + + // TODO: constexpr and/or noexcept if underlying constructor supports it. + /// Create an empty range. + /** SQL has a separate literal to denote an empty range, but any range which + * encompasses no values is an empty range. + */ + range() : + m_lower{exclusive_bound{TYPE{}}}, + m_upper{exclusive_bound{TYPE{}}} + {} + + // TODO: constexpr and/or noexcept if underlying operators support it. + bool operator==(range const &rhs) const + { + return (this->lower_bound() == rhs.lower_bound() and + this->upper_bound() == rhs.upper_bound()) or + (this->empty() and rhs.empty()); + } + + // TODO: constexpr and/or noexcept if underlying operator supports it. + bool operator!=(range const &rhs) const { return !(*this == rhs); } + + range(range const &) = default; + range(range &&) = default; + range &operator=(range const &) = default; + range &operator=(range &&) = default; + + // TODO: constexpr and/or noexcept if underlying operator supports it. + /// Is this range clearly empty? + /** An empty range encompasses no values. + * + * It is possible to "fool" this. For example, if your range is of an + * integer type and has exclusive bounds of 0 and 1, it encompasses no values + * but its `empty()` will return false. The PostgreSQL implementation, by + * contrast, will notice that it is empty. Similar things can happen for + * floating-point types, but with more subtleties and edge cases. + */ + bool empty() const + { + return (m_lower.is_exclusive() or m_upper.is_exclusive()) and + m_lower.is_limited() and m_upper.is_limited() and + not(*m_lower.value() < *m_upper.value()); + } + + // TODO: constexpr and/or noexcept if underlying functions support it. + /// Does this range encompass `value`? + bool contains(TYPE value) const + { + return m_lower.extends_down_to(value) and m_upper.extends_up_to(value); + } + + // TODO: constexpr and/or noexcept if underlying operators support it. + /// Does this range encompass all of `other`? + /** This function is not particularly smart. It does not know, for example, + * that integer ranges `[0,9]` and `[0,10)` contain the same values. + */ + bool contains(range const &other) const + { + return (*this & other) == other; + } + + [[nodiscard]] constexpr range_bound const & + lower_bound() const &noexcept + { + return m_lower; + } + [[nodiscard]] constexpr range_bound const & + upper_bound() const &noexcept + { + return m_upper; + } + + // TODO: constexpr and/or noexcept if underlying operators support it. + /// Intersection of two ranges. + /** Returns a range describing those values which are in both ranges. + */ + range operator&(range const &other) const + { + range_bound lower{no_bound{}}; + if (not this->lower_bound().is_limited()) + lower = other.lower_bound(); + else if (not other.lower_bound().is_limited()) + lower = this->lower_bound(); + else if (*this->lower_bound().value() < *other.lower_bound().value()) + lower = other.lower_bound(); + else if (*other.lower_bound().value() < *this->lower_bound().value()) + lower = this->lower_bound(); + else if (this->lower_bound().is_exclusive()) + lower = this->lower_bound(); + else + lower = other.lower_bound(); + + range_bound upper{no_bound{}}; + if (not this->upper_bound().is_limited()) + upper = other.upper_bound(); + else if (not other.upper_bound().is_limited()) + upper = this->upper_bound(); + else if (*other.upper_bound().value() < *this->upper_bound().value()) + upper = other.upper_bound(); + else if (*this->upper_bound().value() < *other.upper_bound().value()) + upper = this->upper_bound(); + else if (this->upper_bound().is_exclusive()) + upper = this->upper_bound(); + else + upper = other.upper_bound(); + + if ( + lower.is_limited() and upper.is_limited() and + (*upper.value() < *lower.value())) + return {}; + else + return {lower, upper}; + } + + /// Convert to another base type. + template operator range() const + { + range_bound lower{no_bound{}}, upper{no_bound{}}; + if (lower_bound().is_inclusive()) + lower = inclusive_bound{*lower_bound().value()}; + else if (lower_bound().is_exclusive()) + lower = exclusive_bound{*lower_bound().value()}; + + if (upper_bound().is_inclusive()) + upper = inclusive_bound{*upper_bound().value()}; + else if (upper_bound().is_exclusive()) + upper = exclusive_bound{*upper_bound().value()}; + + return {lower, upper}; + } + +private: + range_bound m_lower, m_upper; +}; + + +/// String conversions for a @ref range type. +/** Conversion assumes that either your client encoding is UTF-8, or the values + * are pure ASCII. + */ +template struct string_traits> +{ + [[nodiscard]] static inline zview + to_buf(char *begin, char *end, range const &value) + { + return generic_to_buf(begin, end, value); + } + + static inline char * + into_buf(char *begin, char *end, range const &value) + { + if (value.empty()) + { + if ((end - begin) <= internal::ssize(s_empty)) + throw conversion_overrun{s_overrun.c_str()}; + char *here = begin + s_empty.copy(begin, std::size(s_empty)); + *here++ = '\0'; + return here; + } + else + { + if (end - begin < 4) + throw conversion_overrun{s_overrun.c_str()}; + char *here = begin; + *here++ = + (static_cast(value.lower_bound().is_inclusive() ? '[' : '(')); + TYPE const *lower{value.lower_bound().value()}; + // Convert bound (but go back to overwrite that trailing zero). + if (lower != nullptr) + here = string_traits::into_buf(here, end, *lower) - 1; + *here++ = ','; + TYPE const *upper{value.upper_bound().value()}; + // Convert bound (but go back to overwrite that trailing zero). + if (upper != nullptr) + here = string_traits::into_buf(here, end, *upper) - 1; + if ((end - here) < 2) + throw conversion_overrun{s_overrun.c_str()}; + *here++ = + static_cast(value.upper_bound().is_inclusive() ? ']' : ')'); + *here++ = '\0'; + return here; + } + } + + [[nodiscard]] static inline range from_string(std::string_view text) + { + if (std::size(text) < 3) + throw pqxx::conversion_error{err_bad_input(text)}; + bool left_inc{false}; + switch (text[0]) + { + case '[': left_inc = true; break; + + case '(': break; + + case 'e': + case 'E': + if ( + (std::size(text) != std::size(s_empty)) or + (text[1] != 'm' and text[1] != 'M') or + (text[2] != 'p' and text[2] != 'P') or + (text[3] != 't' and text[3] != 'T') or + (text[4] != 'y' and text[4] != 'Y')) + throw pqxx::conversion_error{err_bad_input(text)}; + return {}; + break; + + default: throw pqxx::conversion_error{err_bad_input(text)}; + } + + auto scan{internal::get_glyph_scanner(internal::encoding_group::UTF8)}; + // The field parser uses this to track which field it's parsing, and + // when not to expect a field separator. + std::size_t index{0}; + // The last field we expect to see. + static constexpr std::size_t last{1}; + // Current parsing position. We skip the opening parenthesis or bracket. + std::size_t pos{1}; + // The string may leave out either bound to indicate that it's unlimited. + std::optional lower, upper; + // We reuse the same field parser we use for composite values and arrays. + internal::parse_composite_field(index, text, pos, lower, scan, last); + internal::parse_composite_field(index, text, pos, upper, scan, last); + + // We need one more character: the closing parenthesis or bracket. + if (pos != std::size(text)) + throw pqxx::conversion_error{err_bad_input(text)}; + char const closing{text[pos - 1]}; + if (closing != ')' and closing != ']') + throw pqxx::conversion_error{err_bad_input(text)}; + bool const right_inc{closing == ']'}; + + range_bound lower_bound{no_bound{}}, upper_bound{no_bound{}}; + if (lower) + { + if (left_inc) + lower_bound = inclusive_bound{*lower}; + else + lower_bound = exclusive_bound{*lower}; + } + if (upper) + { + if (right_inc) + upper_bound = inclusive_bound{*upper}; + else + upper_bound = exclusive_bound{*upper}; + } + + return {lower_bound, upper_bound}; + } + + [[nodiscard]] static inline constexpr std::size_t + size_buffer(range const &value) noexcept + { + TYPE const *lower{value.lower_bound().value()}, + *upper{value.upper_bound().value()}; + std::size_t const lsz{ + lower == nullptr ? 0 : string_traits::size_buffer(*lower) - 1}, + usz{upper == nullptr ? 0 : string_traits::size_buffer(*upper) - 1}; + + if (value.empty()) + return std::size(s_empty) + 1; + else + return 1 + lsz + 1 + usz + 2; + } + +private: + static constexpr zview s_empty{"empty"_zv}; + static constexpr auto s_overrun{"Not enough space in buffer for range."_zv}; + + /// Compose error message for invalid range input. + static std::string err_bad_input(std::string_view text) + { + return internal::concat("Invalid range input: '", text, "'"); + } +}; + + +/// A range type does not have an innate null value. +template struct nullness> : no_null> +{}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/result b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/result new file mode 100644 index 000000000..523394b72 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/result @@ -0,0 +1,16 @@ +/** pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/result.hxx" + +// Now include some types which depend on result, but which the user will +// expect to see defined after including this header. +#include "pqxx/internal/result_iterator.hxx" +#include "pqxx/field.hxx" +#include "pqxx/internal/result_iter.hxx" + +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/result.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/result.hxx new file mode 100644 index 000000000..6c41cc096 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/result.hxx @@ -0,0 +1,335 @@ +/* Definitions for the pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/result instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_RESULT +#define PQXX_H_RESULT + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include + +#include "pqxx/except.hxx" +#include "pqxx/types.hxx" +#include "pqxx/util.hxx" +#include "pqxx/zview.hxx" + +#include "pqxx/internal/encodings.hxx" + + +namespace pqxx::internal +{ +// TODO: Make noexcept (but breaks ABI). +PQXX_LIBEXPORT void clear_result(pq::PGresult const *); +} // namespace pqxx::internal + + +namespace pqxx::internal::gate +{ +class result_connection; +class result_creation; +class result_pipeline; +class result_row; +class result_sql_cursor; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +/// Result set containing data returned by a query or command. +/** This behaves as a container (as defined by the C++ standard library) and + * provides random access const iterators to iterate over its rows. You can + * also access a row by indexing a `result R` by the row's zero-based + * number: + * + * + * for (result::size_type i=0; i < std::size(R); ++i) Process(R[i]); + * + * + * Result sets in libpqxx are lightweight, reference-counted wrapper objects + * which are relatively small and cheap to copy. Think of a result object as + * a "smart pointer" to an underlying result set. + * + * @warning The result set that a result object points to is not thread-safe. + * If you copy a result object, it still refers to the same underlying result + * set. So never copy, destroy, query, or otherwise access a result while + * another thread may be copying, destroying, querying, or otherwise accessing + * the same result set--even if it is doing so through a different result + * object! + */ +class PQXX_LIBEXPORT result +{ +public: + using size_type = result_size_type; + using difference_type = result_difference_type; + using reference = row; + using const_iterator = const_result_iterator; + using pointer = const_iterator; + using iterator = const_iterator; + using const_reverse_iterator = const_reverse_result_iterator; + using reverse_iterator = const_reverse_iterator; + + result() noexcept : + m_data{make_data_pointer()}, + m_query{}, + m_encoding{internal::encoding_group::MONOBYTE} + {} + + result(result const &rhs) noexcept = default; + result(result &&rhs) noexcept = default; + + /// Assign one result to another. + /** Copying results is cheap: it copies only smart pointers, but the actual + * data stays in the same place. + */ + result &operator=(result const &rhs) noexcept = default; + + /// Assign one result to another, invaliding the old one. + result &operator=(result &&rhs) noexcept = default; + + /** + * @name Comparisons + * + * You can compare results for equality. Beware: this is a very strict, + * dumb comparison. The smallest difference between two results (such as a + * string "Foo" versus a string "foo") will make them unequal. + */ + //@{ + /// Compare two results for equality. + [[nodiscard]] bool operator==(result const &) const noexcept; + /// Compare two results for inequality. + [[nodiscard]] bool operator!=(result const &rhs) const noexcept + { + return not operator==(rhs); + } + //@} + + /// Iterate rows, reading them directly into a tuple of "TYPE...". + /** Converts the fields to values of the given respective types. + * + * Use this only with a ranged "for" loop. The iteration produces + * std::tuple which you can "unpack" to a series of `auto` + * variables. + */ + template auto iter() const; + + [[nodiscard]] const_reverse_iterator rbegin() const; + [[nodiscard]] const_reverse_iterator crbegin() const; + [[nodiscard]] const_reverse_iterator rend() const; + [[nodiscard]] const_reverse_iterator crend() const; + + [[nodiscard]] const_iterator begin() const noexcept; + [[nodiscard]] const_iterator cbegin() const noexcept; + [[nodiscard]] inline const_iterator end() const noexcept; + [[nodiscard]] inline const_iterator cend() const noexcept; + + [[nodiscard]] reference front() const noexcept; + [[nodiscard]] reference back() const noexcept; + + [[nodiscard]] PQXX_PURE size_type size() const noexcept; + [[nodiscard]] PQXX_PURE bool empty() const noexcept; + [[nodiscard]] size_type capacity() const noexcept { return size(); } + + /// Exchange two `result` values in an exception-safe manner. + /** If the swap fails, the two values will be exactly as they were before. + * + * The swap is not necessarily thread-safe. + */ + void swap(result &) noexcept; + + /// Index a row by number. + /** This returns a @ref row object. Generally you should not keep the row + * around as a variable, but if you do, make sure that your variable is a + * `row`, not a `row&`. + */ + [[nodiscard]] row operator[](size_type i) const noexcept; + +#if defined(PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT) + // TODO: If C++23 will let us, also accept string for the column. + [[nodiscard]] field + operator[](size_type row_num, row_size_type col_num) const noexcept; +#endif + + /// Index a row by number, but check that the row number is valid. + row at(size_type) const; + + /// Index a field by row number and column number. + field at(size_type, row_size_type) const; + + /// Let go of the result's data. + /** Use this if you need to deallocate the result data earlier than you can + * destroy the `result` object itself. + * + * Multiple `result` objects can refer to the same set of underlying data. + * The underlying data will be deallocated once all `result` objects that + * refer to it are cleared or destroyed. + */ + void clear() noexcept + { + m_data.reset(); + m_query = nullptr; + } + + /** + * @name Column information + */ + //@{ + /// Number of columns in result. + [[nodiscard]] PQXX_PURE row_size_type columns() const noexcept; + + /// Number of given column (throws exception if it doesn't exist). + [[nodiscard]] row_size_type column_number(zview name) const; + + /// Name of column with this number (throws exception if it doesn't exist) + [[nodiscard]] char const *column_name(row_size_type number) const &; + + /// Return column's type, as an OID from the system catalogue. + [[nodiscard]] oid column_type(row_size_type col_num) const; + + /// Return column's type, as an OID from the system catalogue. + [[nodiscard]] oid column_type(zview col_name) const + { + return column_type(column_number(col_name)); + } + + /// What table did this column come from? + [[nodiscard]] oid column_table(row_size_type col_num) const; + + /// What table did this column come from? + [[nodiscard]] oid column_table(zview col_name) const + { + return column_table(column_number(col_name)); + } + + /// What column in its table did this column come from? + [[nodiscard]] row_size_type table_column(row_size_type col_num) const; + + /// What column in its table did this column come from? + [[nodiscard]] row_size_type table_column(zview col_name) const + { + return table_column(column_number(col_name)); + } + //@} + + /// Query that produced this result, if available (empty string otherwise) + [[nodiscard]] PQXX_PURE std::string const &query() const &noexcept; + + /// If command was an `INSERT` of 1 row, return oid of the inserted row. + /** @return Identifier of inserted row if exactly one row was inserted, or + * @ref oid_none otherwise. + */ + [[nodiscard]] PQXX_PURE oid inserted_oid() const; + + /// If command was `INSERT`, `UPDATE`, or `DELETE`: number of affected rows. + /** @return Number of affected rows if last command was `INSERT`, `UPDATE`, + * or `DELETE`; zero for all other commands. + */ + [[nodiscard]] PQXX_PURE size_type affected_rows() const; + + // C++20: Concept like std::invocable, but without specifying param types. + /// Run `func` on each row, passing the row's fields as parameters. + /** Goes through the rows from first to last. You provide a callable `func`. + * + * For each row in the `result`, `for_each` will call `func`. It converts + * the row's fields to the types of `func`'s parameters, and pass them to + * `func`. + * + * (Therefore `func` must have a _single_ signature. It can't be a generic + * lambda, or an object of a class with multiple overloaded function call + * operators. Otherwise, `for_each` will have no way to detect a parameter + * list without ambiguity.) + * + * If any of your parameter types is `std::string_view`, it refers to the + * underlying storage of this `result`. + * + * If any of your parameter types is a reference type, its argument will + * refer to a temporary value which only lives for the duration of that + * single invocation to `func`. If the reference is an lvalue reference, it + * must be `const`. + * + * For example, this queries employee names and salaries from the database + * and prints how much each would like to earn instead: + * ```cxx + * tx.exec("SELECT name, salary FROM employee").for_each( + * [](std::string_view name, float salary){ + * std::cout << name << " would like " << salary * 2 << ".\n"; + * }) + * ``` + * + * If `func` throws an exception, processing stops at that point and + * propagates the exception. + * + * @throws usage_error if `func`'s number of parameters does not match the + * number of columns in this result. + */ + template inline void for_each(CALLABLE &&func) const; + +private: + using data_pointer = std::shared_ptr; + + /// Underlying libpq result set. + data_pointer m_data; + + /// Factory for data_pointer. + static data_pointer + make_data_pointer(internal::pq::PGresult const *res = nullptr) noexcept + { + return {res, internal::clear_result}; + } + + friend class pqxx::internal::gate::result_pipeline; + PQXX_PURE std::shared_ptr query_ptr() const noexcept + { + return m_query; + } + + /// Query string. + std::shared_ptr m_query; + + internal::encoding_group m_encoding; + + static std::string const s_empty_string; + + friend class pqxx::field; + // TODO: noexcept. Breaks ABI. + PQXX_PURE char const *get_value(size_type row, row_size_type col) const; + // TODO: noexcept. Breaks ABI. + PQXX_PURE bool get_is_null(size_type row, row_size_type col) const; + PQXX_PURE + field_size_type get_length(size_type, row_size_type) const noexcept; + + friend class pqxx::internal::gate::result_creation; + result( + internal::pq::PGresult *rhs, std::shared_ptr query, + internal::encoding_group enc); + + PQXX_PRIVATE void check_status(std::string_view desc = ""sv) const; + + friend class pqxx::internal::gate::result_connection; + friend class pqxx::internal::gate::result_row; + bool operator!() const noexcept { return m_data.get() == nullptr; } + operator bool() const noexcept { return m_data.get() != nullptr; } + + [[noreturn]] PQXX_PRIVATE void + throw_sql_error(std::string const &Err, std::string const &Query) const; + PQXX_PRIVATE PQXX_PURE int errorposition() const; + PQXX_PRIVATE std::string status_error() const; + + friend class pqxx::internal::gate::result_sql_cursor; + PQXX_PURE char const *cmd_status() const noexcept; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/robusttransaction b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/robusttransaction new file mode 100644 index 000000000..04b71d7cc --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/robusttransaction @@ -0,0 +1,8 @@ +/** pqxx::robusttransaction class. + * + * pqxx::robusttransaction is a slower but safer transaction class. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/robusttransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/robusttransaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/robusttransaction.hxx new file mode 100644 index 000000000..faf6dbf5e --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/robusttransaction.hxx @@ -0,0 +1,120 @@ +/* Definition of the pqxx::robusttransaction class. + * + * pqxx::robusttransaction is a slower but safer transaction class. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/robusttransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ROBUSTTRANSACTION +#define PQXX_H_ROBUSTTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/dbtransaction.hxx" + +namespace pqxx::internal +{ +/// Helper base class for the @ref robusttransaction class template. +class PQXX_LIBEXPORT PQXX_NOVTABLE basic_robusttransaction + : public dbtransaction +{ +public: + virtual ~basic_robusttransaction() override = 0; + +protected: + basic_robusttransaction( + connection &c, zview begin_command, std::string_view tname); + basic_robusttransaction(connection &c, zview begin_command); + +private: + using IDType = unsigned long; + + std::string m_conn_string; + std::string m_xid; + int m_backendpid = -1; + + void init(zview begin_command); + + // @warning This function will become `final`. + virtual void do_commit() override; +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/** + * @ingroup transactions + * + * @{ + */ + +/// Slightly slower, better-fortified version of transaction. +/** Requires PostgreSQL 10 or better. + * + * robusttransaction is similar to transaction, but spends more time and effort + * to deal with the hopefully rare case that the connection to the backend is + * lost just while it's trying to commit. In such cases, the client does not + * know whether the backend (on the other side of the broken connection) + * managed to commit the transaction. + * + * When this happens, robusttransaction tries to reconnect to the database and + * figure out what happened. + * + * This service level was made optional since you may not want to pay the + * overhead where it is not necessary. Certainly the use of this class makes + * no sense for local connections, or for transactions that read the database + * but never modify it, or for noncritical database manipulations. + * + * Besides being slower, it's also more complex. Which means that in practice + * a robusttransaction could actually fail more instead of less often than a + * normal transaction. What robusttransaction tries to achieve is to give you + * certainty, not just be more successful per se. + */ +template +class robusttransaction final : public internal::basic_robusttransaction +{ +public: + /** Create robusttransaction of given name. + * @param c Connection inside which this robusttransaction should live. + * @param tname optional human-readable name for this transaction. + */ + robusttransaction(connection &c, std::string_view tname) : + internal::basic_robusttransaction{ + c, pqxx::internal::begin_cmd, + tname} + {} + + /** Create robusttransaction of given name. + * @param c Connection inside which this robusttransaction should live. + * @param tname optional human-readable name for this transaction. + */ + robusttransaction(connection &c, std::string &&tname) : + internal::basic_robusttransaction{ + c, pqxx::internal::begin_cmd, + std::move(tname)} + {} + + /** Create robusttransaction of given name. + * @param c Connection inside which this robusttransaction should live. + */ + explicit robusttransaction(connection &c) : + internal::basic_robusttransaction{ + c, pqxx::internal::begin_cmd} + {} + + virtual ~robusttransaction() noexcept override { close(); } +}; + +/** + * @} + */ +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/row b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/row new file mode 100644 index 000000000..62a950ac8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/row @@ -0,0 +1,11 @@ +/** pqxx::row class. + * + * pqxx::row refers to a row in a result. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/result.hxx" +#include "pqxx/row.hxx" + +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/row.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/row.hxx new file mode 100644 index 000000000..5be5132e3 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/row.hxx @@ -0,0 +1,561 @@ +/* Definitions for the pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/result instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ROW +#define PQXX_H_ROW + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/except.hxx" +#include "pqxx/field.hxx" +#include "pqxx/result.hxx" + +#include "pqxx/internal/concat.hxx" + +namespace pqxx::internal +{ +template class result_iter; +} // namespace pqxx::internal + + +namespace pqxx +{ +/// Reference to one row in a result. +/** A row represents one row (also called a row) in a query result set. + * It also acts as a container mapping column numbers or names to field + * values (see below): + * + * ```cxx + * cout << row["date"].c_str() << ": " << row["name"].c_str() << endl; + * ``` + * + * The row itself acts like a (non-modifyable) container, complete with its + * own const_iterator and const_reverse_iterator. + */ +class PQXX_LIBEXPORT row +{ +public: + using size_type = row_size_type; + using difference_type = row_difference_type; + using const_iterator = const_row_iterator; + using iterator = const_iterator; + using reference = field; + using pointer = const_row_iterator; + using const_reverse_iterator = const_reverse_row_iterator; + using reverse_iterator = const_reverse_iterator; + + row() noexcept = default; + row(row &&) noexcept = default; + row(row const &) noexcept = default; + row &operator=(row const &) noexcept = default; + row &operator=(row &&) noexcept = default; + + /** + * @name Comparison + */ + //@{ + [[nodiscard]] PQXX_PURE bool operator==(row const &) const noexcept; + [[nodiscard]] bool operator!=(row const &rhs) const noexcept + { + return not operator==(rhs); + } + //@} + + [[nodiscard]] const_iterator begin() const noexcept; + [[nodiscard]] const_iterator cbegin() const noexcept; + [[nodiscard]] const_iterator end() const noexcept; + [[nodiscard]] const_iterator cend() const noexcept; + + /** + * @name Field access + */ + //@{ + [[nodiscard]] reference front() const noexcept; + [[nodiscard]] reference back() const noexcept; + + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator rbegin() const; + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator crbegin() const; + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator rend() const; + // TODO: noexcept. Breaks ABI. + [[nodiscard]] const_reverse_row_iterator crend() const; + + [[nodiscard]] reference operator[](size_type) const noexcept; + /** Address field by name. + * @warning This is much slower than indexing by number, or iterating. + */ + [[nodiscard]] reference operator[](zview col_name) const; + + reference at(size_type) const; + /** Address field by name. + * @warning This is much slower than indexing by number, or iterating. + */ + reference at(zview col_name) const; + + [[nodiscard]] constexpr size_type size() const noexcept + { + return m_end - m_begin; + } + + [[deprecated("Swap iterators, not rows.")]] void swap(row &) noexcept; + + /// Row number, assuming this is a real row and not end()/rend(). + [[nodiscard]] constexpr result::size_type rownumber() const noexcept + { + return m_index; + } + + /** + * @name Column information + */ + //@{ + /// Number of given column (throws exception if it doesn't exist). + [[nodiscard]] size_type column_number(zview col_name) const; + + /// Return a column's type. + [[nodiscard]] oid column_type(size_type) const; + + /// Return a column's type. + [[nodiscard]] oid column_type(zview col_name) const + { + return column_type(column_number(col_name)); + } + + /// What table did this column come from? + [[nodiscard]] oid column_table(size_type col_num) const; + + /// What table did this column come from? + [[nodiscard]] oid column_table(zview col_name) const + { + return column_table(column_number(col_name)); + } + + /// What column number in its table did this result column come from? + /** A meaningful answer can be given only if the column in question comes + * directly from a column in a table. If the column is computed in any + * other way, a logic_error will be thrown. + * + * @param col_num a zero-based column number in this result set + * @return a zero-based column number in originating table + */ + [[nodiscard]] size_type table_column(size_type) const; + + /// What column number in its table did this result column come from? + [[nodiscard]] size_type table_column(zview col_name) const + { + return table_column(column_number(col_name)); + } + //@} + + [[nodiscard]] constexpr result::size_type num() const noexcept + { + return rownumber(); + } + + /** Produce a slice of this row, containing the given range of columns. + * + * @deprecated I haven't heard of anyone caring about row slicing at all in + * at least the last 15 years. Yet it adds complexity, so unless anyone + * files a bug explaining why they really need this feature, I'm going to + * remove it. Even if they do, the feature may need an update. + * + * The slice runs from the range's starting column to the range's end + * column, exclusive. It looks just like a normal result row, except + * slices can be empty. + */ + [[deprecated("Row slicing is going away. File a bug if you need it.")]] row + slice(size_type sbegin, size_type send) const; + + /// Is this a row without fields? Can only happen to a slice. + [[nodiscard, deprecated("Row slicing is going away.")]] PQXX_PURE bool + empty() const noexcept; + + /// Extract entire row's values into a tuple. + /** Converts to the types of the tuple's respective fields. + */ + template void to(Tuple &t) const + { + check_size(std::tuple_size_v); + convert(t); + } + + template std::tuple as() const + { + check_size(sizeof...(TYPE)); + using seq = std::make_index_sequence; + return get_tuple>(seq{}); + } + +protected: + friend class const_row_iterator; + friend class result; + row(result const &r, result_size_type index, size_type cols) noexcept; + + /// Throw @ref usage_error if row size is not `expected`. + void check_size(size_type expected) const + { + if (size() != expected) + throw usage_error{internal::concat( + "Tried to extract ", expected, " field(s) from a row of ", size(), + ".")}; + } + + /// Convert to a given tuple of values, don't check sizes. + /** We need this for cases where we have a full tuple of field types, but + * not a parameter pack. + */ + template TUPLE as_tuple() const + { + using seq = std::make_index_sequence>; + return get_tuple(seq{}); + } + + template friend class pqxx::internal::result_iter; + /// Convert entire row to tuple fields, without checking row size. + template void convert(Tuple &t) const + { + extract_fields(t, std::make_index_sequence>{}); + } + + friend class field; + + /// Result set of which this is one row. + result m_result; + + /// Row number. + /** + * You'd expect this to be unsigned, but due to the way reverse iterators + * are related to regular iterators, it must be allowed to underflow to -1. + */ + result::size_type m_index = 0; + + // TODO: Remove m_begin and (if possible) m_end when we remove slice(). + /// First column in slice. This row ignores lower-numbered columns. + size_type m_begin = 0; + /// End column in slice. This row only sees lower-numbered columns. + size_type m_end = 0; + +private: + template + void extract_fields(Tuple &t, std::index_sequence) const + { + (extract_value(t), ...); + } + + template + void extract_value(Tuple &t) const; + + /// Convert row's values as a new tuple. + template + auto get_tuple(std::index_sequence) const + { + return std::make_tuple(get_field()...); + } + + /// Extract and convert a field. + template auto get_field() const + { + return (*this)[index].as>(); + } +}; + + +/// Iterator for fields in a row. Use as row::const_iterator. +class PQXX_LIBEXPORT const_row_iterator : public field +{ +public: + using iterator_category = std::random_access_iterator_tag; + using value_type = field const; + using pointer = field const *; + using size_type = row_size_type; + using difference_type = row_difference_type; + using reference = field; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + const_row_iterator() = default; +#include "pqxx/internal/ignore-deprecated-post.hxx" + const_row_iterator(row const &t, row_size_type c) noexcept : + field{t.m_result, t.m_index, c} + {} + const_row_iterator(field const &F) noexcept : field{F} {} + const_row_iterator(const_row_iterator const &) noexcept = default; + const_row_iterator(const_row_iterator &&) noexcept = default; + + /** + * @name Dereferencing operators + */ + //@{ + [[nodiscard]] constexpr pointer operator->() const noexcept { return this; } + [[nodiscard]] reference operator*() const noexcept { return {*this}; } + //@} + + /** + * @name Manipulations + */ + //@{ + const_row_iterator &operator=(const_row_iterator const &) noexcept = default; + const_row_iterator &operator=(const_row_iterator &&) noexcept = default; + + // TODO: noexcept. Breaks ABI. + const_row_iterator operator++(int); + const_row_iterator &operator++() noexcept + { + ++m_col; + return *this; + } + // TODO: noexcept. Breaks ABI. + const_row_iterator operator--(int); + const_row_iterator &operator--() noexcept + { + --m_col; + return *this; + } + + const_row_iterator &operator+=(difference_type i) noexcept + { + m_col = size_type(difference_type(m_col) + i); + return *this; + } + const_row_iterator &operator-=(difference_type i) noexcept + { + m_col = size_type(difference_type(m_col) - i); + return *this; + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] constexpr bool + operator==(const_row_iterator const &i) const noexcept + { + return col() == i.col(); + } + [[nodiscard]] constexpr bool + operator!=(const_row_iterator const &i) const noexcept + { + return col() != i.col(); + } + [[nodiscard]] constexpr bool + operator<(const_row_iterator const &i) const noexcept + { + return col() < i.col(); + } + [[nodiscard]] constexpr bool + operator<=(const_row_iterator const &i) const noexcept + { + return col() <= i.col(); + } + [[nodiscard]] constexpr bool + operator>(const_row_iterator const &i) const noexcept + { + return col() > i.col(); + } + [[nodiscard]] constexpr bool + operator>=(const_row_iterator const &i) const noexcept + { + return col() >= i.col(); + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] inline const_row_iterator + operator+(difference_type) const noexcept; + + friend const_row_iterator + operator+(difference_type, const_row_iterator const &) noexcept; + + [[nodiscard]] inline const_row_iterator + operator-(difference_type) const noexcept; + [[nodiscard]] inline difference_type + operator-(const_row_iterator const &) const noexcept; + //@} +}; + + +/// Reverse iterator for a row. Use as row::const_reverse_iterator. +class PQXX_LIBEXPORT const_reverse_row_iterator : private const_row_iterator +{ +public: + using super = const_row_iterator; + using iterator_type = const_row_iterator; + using iterator_type::difference_type; + using iterator_type::iterator_category; + using iterator_type::pointer; + using value_type = iterator_type::value_type; + using reference = iterator_type::reference; + + const_reverse_row_iterator() noexcept = default; + const_reverse_row_iterator(const_reverse_row_iterator const &) noexcept = + default; + const_reverse_row_iterator(const_reverse_row_iterator &&) noexcept = default; + + explicit const_reverse_row_iterator(super const &rhs) noexcept : + const_row_iterator{rhs} + { + super::operator--(); + } + + [[nodiscard]] PQXX_PURE iterator_type base() const noexcept; + + /** + * @name Dereferencing operators + */ + //@{ + using iterator_type::operator->; + using iterator_type::operator*; + //@} + + /** + * @name Manipulations + */ + //@{ + const_reverse_row_iterator & + operator=(const_reverse_row_iterator const &r) noexcept + { + iterator_type::operator=(r); + return *this; + } + const_reverse_row_iterator operator++() noexcept + { + iterator_type::operator--(); + return *this; + } + // TODO: noexcept. Breaks ABI. + const_reverse_row_iterator operator++(int); + const_reverse_row_iterator &operator--() noexcept + { + iterator_type::operator++(); + return *this; + } + const_reverse_row_iterator operator--(int); + // TODO: noexcept. Breaks ABI. + const_reverse_row_iterator &operator+=(difference_type i) noexcept + { + iterator_type::operator-=(i); + return *this; + } + const_reverse_row_iterator &operator-=(difference_type i) noexcept + { + iterator_type::operator+=(i); + return *this; + } + //@} + + /** + * @name Arithmetic operators + */ + //@{ + [[nodiscard]] const_reverse_row_iterator + operator+(difference_type i) const noexcept + { + return const_reverse_row_iterator{base() - i}; + } + [[nodiscard]] const_reverse_row_iterator + operator-(difference_type i) noexcept + { + return const_reverse_row_iterator{base() + i}; + } + [[nodiscard]] difference_type + operator-(const_reverse_row_iterator const &rhs) const noexcept + { + return rhs.const_row_iterator::operator-(*this); + } + //@} + + /** + * @name Comparisons + */ + //@{ + [[nodiscard]] bool + operator==(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator==(rhs); + } + [[nodiscard]] bool + operator!=(const_reverse_row_iterator const &rhs) const noexcept + { + return !operator==(rhs); + } + + [[nodiscard]] constexpr bool + operator<(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator>(rhs); + } + [[nodiscard]] constexpr bool + operator<=(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator>=(rhs); + } + [[nodiscard]] constexpr bool + operator>(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator<(rhs); + } + [[nodiscard]] constexpr bool + operator>=(const_reverse_row_iterator const &rhs) const noexcept + { + return iterator_type::operator<=(rhs); + } + //@} +}; + + +const_row_iterator +const_row_iterator::operator+(difference_type o) const noexcept +{ + // TODO:: More direct route to home().columns()? + return { + row{home(), idx(), home().columns()}, + size_type(difference_type(col()) + o)}; +} + +inline const_row_iterator operator+( + const_row_iterator::difference_type o, const_row_iterator const &i) noexcept +{ + return i + o; +} + +inline const_row_iterator +const_row_iterator::operator-(difference_type o) const noexcept +{ + // TODO:: More direct route to home().columns()? + return { + row{home(), idx(), home().columns()}, + size_type(difference_type(col()) - o)}; +} + +inline const_row_iterator::difference_type +const_row_iterator::operator-(const_row_iterator const &i) const noexcept +{ + return difference_type(num() - i.num()); +} + + +template +inline void row::extract_value(Tuple &t) const +{ + using field_type = strip_t(t))>; + field const f{m_result, m_index, index}; + std::get(t) = from_string(f); +} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/separated_list b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/separated_list new file mode 100644 index 000000000..1bdf51c6a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/separated_list @@ -0,0 +1,6 @@ +/** Helper similar to Python's @c str.join(). + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/separated_list.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/separated_list.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/separated_list.hxx new file mode 100644 index 000000000..d4230ea08 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/separated_list.hxx @@ -0,0 +1,142 @@ +/* Helper similar to Python's `str.join()`. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/separated_list instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_SEPARATED_LIST +#define PQXX_H_SEPARATED_LIST + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/strconv.hxx" + +// C++20: Simplify using std::ranges::range. +// C++20: Optimise buffer allocation using random_access_range/iterator. +namespace pqxx +{ +/** + * @defgroup utility Utility functions + */ +//@{ + +/// Represent sequence of values as a string, joined by a given separator. +/** + * Use this to turn e.g. the numbers 1, 2, and 3 into a string "1, 2, 3". + * + * @param sep separator string (to be placed between items) + * @param begin beginning of items sequence + * @param end end of items sequence + * @param access functor defining how to dereference sequence elements + */ +template +[[nodiscard]] inline std::string +separated_list(std::string_view sep, ITER begin, ITER end, ACCESS access) +{ + if (end == begin) + return {}; + auto next{begin}; + ++next; + if (next == end) + return to_string(access(begin)); + + // From here on, we've got at least 2 elements -- meaning that we need sep. + using elt_type = strip_t; + using traits = string_traits; + + std::size_t budget{0}; + for (ITER cnt{begin}; cnt != end; ++cnt) + budget += traits::size_buffer(access(cnt)); + budget += + static_cast(std::distance(begin, end)) * std::size(sep); + + std::string result; + result.resize(budget); + + char *const data{result.data()}; + char *here{data}; + char *stop{data + budget}; + here = traits::into_buf(here, stop, access(begin)) - 1; + for (++begin; begin != end; ++begin) + { + here += sep.copy(here, std::size(sep)); + here = traits::into_buf(here, stop, access(begin)) - 1; + } + result.resize(static_cast(here - data)); + return result; +} + + +/// Render sequence as a string, using given separator between items. +template +[[nodiscard]] inline std::string +separated_list(std::string_view sep, ITER begin, ITER end) +{ + return separated_list(sep, begin, end, [](ITER i) { return *i; }); +} + + +/// Render items in a container as a string, using given separator. +template +[[nodiscard]] inline auto +separated_list(std::string_view sep, CONTAINER const &c) + /* + Always std::string; necessary because SFINAE doesn't work with the + contents of function bodies, so the check for iterability has to be in + the signature. + */ + -> typename std::enable_if< + (not std::is_void::value and + not std::is_void::value), + std::string>::type +{ + return separated_list(sep, std::begin(c), std::end(c)); +} + + +/// Render items in a tuple as a string, using given separator. +template< + typename TUPLE, std::size_t INDEX = 0, typename ACCESS, + typename std::enable_if< + (INDEX == std::tuple_size::value - 1), int>::type = 0> +[[nodiscard]] inline std::string separated_list( + std::string_view /* sep */, TUPLE const &t, ACCESS const &access) +{ + return to_string(access(&std::get(t))); +} + +template< + typename TUPLE, std::size_t INDEX = 0, typename ACCESS, + typename std::enable_if< + (INDEX < std::tuple_size::value - 1), int>::type = 0> +[[nodiscard]] inline std::string +separated_list(std::string_view sep, TUPLE const &t, ACCESS const &access) +{ + std::string out{to_string(access(&std::get(t)))}; + out.append(sep); + out.append(separated_list(sep, t, access)); + return out; +} + +template< + typename TUPLE, std::size_t INDEX = 0, + typename std::enable_if< + (INDEX <= std::tuple_size::value), int>::type = 0> +[[nodiscard]] inline std::string +separated_list(std::string_view sep, TUPLE const &t) +{ + // TODO: Optimise allocation. + return separated_list(sep, t, [](TUPLE const &tup) { return *tup; }); +} +//@} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/strconv b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/strconv new file mode 100644 index 000000000..aa2c40ed5 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/strconv @@ -0,0 +1,6 @@ +/** String conversion definitions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/strconv.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/strconv.hxx new file mode 100644 index 000000000..863711228 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/strconv.hxx @@ -0,0 +1,468 @@ +/* String conversion definitions. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/stringconv instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STRCONV +#define PQXX_H_STRCONV + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include +#include +#include + +#if __has_include() +# include +#endif + +#if defined(PQXX_HAVE_RANGES) && __has_include() +# include +#endif + +#include "pqxx/except.hxx" +#include "pqxx/util.hxx" +#include "pqxx/zview.hxx" + + +namespace pqxx::internal +{ +/// Attempt to demangle @c std::type_info::name() to something human-readable. +PQXX_LIBEXPORT std::string demangle_type_name(char const[]); +} // namespace pqxx::internal + + +namespace pqxx +{ +/** + * @defgroup stringconversion String conversion + * + * The PostgreSQL server accepts and represents data in string form. It has + * its own formats for various data types. The string conversions define how + * various C++ types translate to and from their respective PostgreSQL text + * representations. + * + * Each conversion is defined by a specialisations of @c string_traits. It + * gets complicated if you want top performance, but until you do, all you + * really need to care about when converting values between C++ in-memory + * representations such as @c int and the postgres string representations is + * the @c pqxx::to_string and @c pqxx::from_string functions. + * + * If you need to convert a type which is not supported out of the box, you'll + * need to define your own specialisations for these templates, similar to the + * ones defined here and in `pqxx/conversions.hxx`. Any conversion code which + * "sees" your specialisation will now support your conversion. In particular, + * you'll be able to read result fields into a variable of the new type. + * + * There is a macro to help you define conversions for individual enumeration + * types. The conversion will represent enumeration values as numeric strings. + */ +//@{ + +/// A human-readable name for a type, used in error messages and such. +/** Actually this may not always be very user-friendly. It uses + * @c std::type_info::name(). On gcc-like compilers we try to demangle its + * output. Visual Studio produces human-friendly names out of the box. + * + * This variable is not inline. Inlining it gives rise to "memory leak" + * warnings from asan, the address sanitizer, possibly from use of + * @c std::type_info::name. + */ +template +std::string const type_name{internal::demangle_type_name(typeid(TYPE).name())}; + + +/// Traits describing a type's "null value," if any. +/** Some C++ types have a special value or state which correspond directly to + * SQL's NULL. + * + * The @c nullness traits describe whether it exists, and whether a particular + * value is null. + */ +template struct nullness +{ + /// Does this type have a null value? + static bool has_null; + + /// Is this type always null? + static bool always_null; + + /// Is @c value a null? + static bool is_null(TYPE const &value); + + /// Return a null value. + /** Don't use this in generic code to compare a value and see whether it is + * null. Some types may have multiple null values which do not compare as + * equal, or may define a null value which is not equal to anything including + * itself, like in SQL. + */ + [[nodiscard]] static TYPE null(); +}; + + +/// Nullness traits describing a type which does not have a null value. +template struct no_null +{ + /// Does @c TYPE have a "built-in null value"? + /** For example, a pointer can equal @c nullptr, which makes a very natural + * representation of an SQL null value. For such types, the code sometimes + * needs to make special allowances. + * + * for most types, such as @c int or @c std::string, there is no built-in + * null. If you want to represent an SQL null value for such a type, you + * would have to wrap it in something that does have a null value. For + * example, you could use @c std::optional for "either an @c int or a + * null value." + */ + static constexpr bool has_null = false; + + /// Are all values of this type null? + /** There are a few special C++ types which are always null - mainly + * @c std::nullptr_t. + */ + static constexpr bool always_null = false; + + /// Does a given value correspond to an SQL null value? + /** Most C++ types, such as @c int or @c std::string, have no inherent null + * value. But some types such as C-style string pointers do have a natural + * equivalent to an SQL null. + */ + [[nodiscard]] static constexpr bool is_null(TYPE const &) noexcept + { + return false; + } +}; + + +/// Traits class for use in string conversions. +/** Specialize this template for a type for which you wish to add to_string + * and from_string support. + * + * String conversions are not meant to work for nulls. Check for null before + * converting a value of @c TYPE to a string, or vice versa. + */ +template struct string_traits +{ + /// Return a @c string_view representing value, plus terminating zero. + /** Produces a @c string_view containing the PostgreSQL string representation + * for @c value. + * + * Uses the space from @c begin to @c end as a buffer, if needed. The + * returned string may lie somewhere in that buffer, or it may be a + * compile-time constant, or it may be null if value was a null value. Even + * if the string is stored in the buffer, its @c begin() may or may not be + * the same as @c begin. + * + * The @c string_view is guaranteed to be valid as long as the buffer from + * @c begin to @c end remains accessible and unmodified. + * + * @throws pqxx::conversion_overrun if the provided buffer space may not be + * enough. For maximum performance, this is a conservative estimate. It may + * complain about a buffer which is actually large enough for your value, if + * an exact check gets too expensive. + */ + [[nodiscard]] static inline zview + to_buf(char *begin, char *end, TYPE const &value); + + /// Write value's string representation into buffer at @c begin. + /** Assumes that value is non-null. + * + * Writes value's string representation into the buffer, starting exactly at + * @c begin, and ensuring a trailing zero. Returns the address just beyond + * the trailing zero, so the caller could use it as the @c begin for another + * call to @c into_buf writing a next value. + */ + static inline char *into_buf(char *begin, char *end, TYPE const &value); + + /// Parse a string representation of a @c TYPE value. + /** Throws @c conversion_error if @c value does not meet the expected format + * for a value of this type. + */ + [[nodiscard]] static inline TYPE from_string(std::string_view text); + + // C++20: Can we make these all constexpr? + /// Estimate how much buffer space is needed to represent value. + /** The estimate may be a little pessimistic, if it saves time. + * + * The estimate includes the terminating zero. + */ + [[nodiscard]] static inline std::size_t + size_buffer(TYPE const &value) noexcept; +}; + + +/// Nullness: Enums do not have an inherent null value. +template +struct nullness>> : no_null +{}; +} // namespace pqxx + + +namespace pqxx::internal +{ +/// Helper class for defining enum conversions. +/** The conversion will convert enum values to numeric strings, and vice versa. + * + * To define a string conversion for an enum type, derive a @c string_traits + * specialisation for the enum from this struct. + * + * There's usually an easier way though: the @c PQXX_DECLARE_ENUM_CONVERSION + * macro. Use @c enum_traits manually only if you need to customise your + * traits type in more detail. + */ +template struct enum_traits +{ + using impl_type = std::underlying_type_t; + using impl_traits = string_traits; + + [[nodiscard]] static constexpr zview + to_buf(char *begin, char *end, ENUM const &value) + { + return impl_traits::to_buf(begin, end, to_underlying(value)); + } + + static constexpr char *into_buf(char *begin, char *end, ENUM const &value) + { + return impl_traits::into_buf(begin, end, to_underlying(value)); + } + + [[nodiscard]] static ENUM from_string(std::string_view text) + { + return static_cast(impl_traits::from_string(text)); + } + + [[nodiscard]] static std::size_t size_buffer(ENUM const &value) noexcept + { + return impl_traits::size_buffer(to_underlying(value)); + } + +private: + // C++23: Replace with std::to_underlying. + static constexpr impl_type to_underlying(ENUM const &value) noexcept + { + return static_cast(value); + } +}; +} // namespace pqxx::internal + + +/// Macro: Define a string conversion for an enum type. +/** This specialises the @c pqxx::string_traits template, so use it in the + * @c ::pqxx namespace. + * + * For example: + * + * #include + * #include + * enum X { xa, xb }; + * namespace pqxx { PQXX_DECLARE_ENUM_CONVERSION(x); } + * int main() { std::cout << pqxx::to_string(xa) << std::endl; } + */ +#define PQXX_DECLARE_ENUM_CONVERSION(ENUM) \ + template<> struct string_traits : pqxx::internal::enum_traits \ + {}; \ + template<> inline std::string const type_name { #ENUM } + + +namespace pqxx +{ +/// Parse a value in postgres' text format as a TYPE. +/** If the form of the value found in the string does not match the expected + * type, e.g. if a decimal point is found when converting to an integer type, + * the conversion fails. Overflows (e.g. converting "9999999999" to a 16-bit + * C++ type) are also treated as errors. If in some cases this behaviour + * should be inappropriate, convert to something bigger such as @c long @c int + * first and then truncate the resulting value. + * + * Only the simplest possible conversions are supported. Fancy features like + * hexadecimal or octal, spurious signs, or exponent notation won't work. + * Whitespace is not stripped away. Only the kinds of strings that come out of + * PostgreSQL and out of to_string() can be converted. + */ +template +[[nodiscard]] inline TYPE from_string(std::string_view text) +{ + return string_traits::from_string(text); +} + + +/// "Convert" a std::string_view to a std::string_view. +/** Just returns its input. + * + * @warning Of course the result is only valid for as long as the original + * string remains valid! Never access the string referenced by the return + * value after the original has been destroyed. + */ +template<> +[[nodiscard]] inline std::string_view from_string(std::string_view text) +{ + return text; +} + + +/// Attempt to convert postgres-generated string to given built-in object. +/** This is like the single-argument form of the function, except instead of + * returning the value, it sets @c value. + * + * You may find this more convenient in that it infers the type you want from + * the argument you pass. But there are disadvantages: it requires an + * assignment operator, and it may be less efficient. + */ +template inline void from_string(std::string_view text, T &value) +{ + value = from_string(text); +} + + +/// Convert a value to a readable string that PostgreSQL will understand. +/** The conversion does no special formatting, and ignores any locale settings. + * The resulting string will be human-readable and in a format suitable for use + * in SQL queries. It won't have niceties such as "thousands separators" + * though. + */ +template inline std::string to_string(TYPE const &value); + + +/// Convert multiple values to strings inside a single buffer. +/** There must be enough room for all values, or this will throw + * @c conversion_overrun. You can obtain a conservative estimate of the buffer + * space required by calling @c size_buffer() on the values. + * + * The @c std::string_view results may point into the buffer, so don't assume + * that they will remain valid after you destruct or move the buffer. + */ +template +[[nodiscard]] inline std::vector +to_buf(char *here, char const *end, TYPE... value) +{ + return {[&here, end](auto v) { + auto begin = here; + here = string_traits::into_buf(begin, end, v); + // Exclude the trailing zero out of the string_view. + auto len{static_cast(here - begin) - 1}; + return std::string_view{begin, len}; + }(value)...}; +} + +/// Convert a value to a readable string that PostgreSQL will understand. +/** This variant of to_string can sometimes save a bit of time in loops, by + * re-using a std::string for multiple conversions. + */ +template +inline void into_string(TYPE const &value, std::string &out); + + +/// Is @c value null? +template +[[nodiscard]] inline constexpr bool is_null(TYPE const &value) noexcept +{ + return nullness>::is_null(value); +} + + +/// Estimate how much buffer space is needed to represent values as a string. +/** The estimate may be a little pessimistic, if it saves time. It also + * includes room for a terminating zero after each value. + */ +template +[[nodiscard]] inline std::size_t size_buffer(TYPE const &...value) noexcept +{ + return (string_traits>::size_buffer(value) + ...); +} + + +/// Does this type translate to an SQL array? +/** Specialisations may override this to be true for container types. + * + * This may not always be a black-and-white choice. For instance, a + * @c std::string is a container, but normally it translates to an SQL string, + * not an SQL array. + */ +template inline constexpr bool is_sql_array{false}; + + +/// Can we use this type in arrays and composite types without quoting them? +/** Define this as @c true only if values of @c TYPE can never contain any + * special characters that might need escaping or confuse the parsing of array + * or composite * types, such as commas, quotes, parentheses, braces, newlines, + * and so on. + * + * When converting a value of such a type to a string in an array or a field in + * a composite type, we do not need to add quotes, nor escape any special + * characters. + * + * This is just an optimisation, so it defaults to @c false to err on the side + * of slow correctness. + */ +template inline constexpr bool is_unquoted_safe{false}; + + +/// Element separator between SQL array elements of this type. +template inline constexpr char array_separator{','}; + + +/// What's the preferred format for passing non-null parameters of this type? +/** This affects how we pass parameters of @c TYPE when calling parameterised + * statements or prepared statements. + * + * Generally we pass parameters in text format, but binary strings are the + * exception. We also pass nulls in binary format, so this function need not + * handle null values. + */ +template inline constexpr format param_format(TYPE const &) +{ + return format::text; +} + + +/// Implement @c string_traits::to_buf by calling @c into_buf. +/** When you specialise @c string_traits for a new type, most of the time its + * @c to_buf implementation has no special optimisation tricks and just writes + * its text into the buffer it receives from the caller, starting at the + * beginning. + * + * In that common situation, you can implement @c to_buf as just a call to + * @c generic_to_buf. It will call @c into_buf and return the right result for + * @c to_buf. + */ +template +inline zview generic_to_buf(char *begin, char *end, TYPE const &value) +{ + using traits = string_traits; + // The trailing zero does not count towards the zview's size, so subtract 1 + // from the result we get from into_buf(). + if (is_null(value)) + return {}; + else + return {begin, traits::into_buf(begin, end, value) - begin - 1}; +} + + +#if defined(PQXX_HAVE_CONCEPTS) +/// Concept: Binary string, akin to @c std::string for binary data. +/** Any type that satisfies this concept can represent an SQL BYTEA value. + * + * A @c binary has a @c begin(), @c end(), @c size(), and @data(). Each byte + * is a @c std::byte, and they must all be laid out contiguously in memory so + * we can reference them by a pointer. + */ +template +concept binary = std::ranges::contiguous_range and + std::is_same_v>, std::byte>; +#endif +//@} +} // namespace pqxx + + +#include "pqxx/internal/conversions.hxx" +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_from b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_from new file mode 100644 index 000000000..972762443 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_from @@ -0,0 +1,8 @@ +/** pqxx::stream_from class. + * + * pqxx::stream_from enables optimized batch reads from a database table. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/stream_from.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_from.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_from.hxx new file mode 100644 index 000000000..ff4a93d2e --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_from.hxx @@ -0,0 +1,361 @@ +/* Definition of the pqxx::stream_from class. + * + * pqxx::stream_from enables optimized batch reads from a database table. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/stream_from instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STREAM_FROM +#define PQXX_H_STREAM_FROM + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#include "pqxx/connection.hxx" +#include "pqxx/except.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/encoding_group.hxx" +#include "pqxx/internal/stream_iterator.hxx" +#include "pqxx/separated_list.hxx" +#include "pqxx/transaction_focus.hxx" + + +namespace pqxx +{ +class transaction_base; + + +/// Pass this to a `stream_from` constructor to stream table contents. +/** @deprecated Use @ref stream_from::table() instead. + */ +constexpr from_table_t from_table; +/// Pass this to a `stream_from` constructor to stream query results. +/** @deprecated Use stream_from::query() instead. + */ +constexpr from_query_t from_query; + + +/// Stream data from the database. +/** For larger data sets, retrieving data this way is likely to be faster than + * executing a query and then iterating and converting the rows fields. You + * will also be able to start processing before all of the data has come in. + * + * There are also downsides. Not all kinds of query will work in a stream. + * But straightforward `SELECT` and `UPDATE ... RETURNING` queries should work. + * This function makes use of @ref pqxx::stream_from, which in turn uses + * PostgreSQL's `COPY` command, so see the documentation for those to get the + * full details. + * + * There are other downsides. If there stream encounters an error, it may + * leave the entire connection in an unusable state, so you'll have to give the + * whole thing up. Finally, opening a stream puts the connection in a special + * state, so you won't be able to do many other things with the connection or + * the transaction while the stream is open. + * + * There are two ways of starting a stream: you stream either all rows in a + * table (using one of the factories, `table()` or `raw_table()`), or the + * results of a query (using the `query()` factory). + * + * Usually you'll want the `stream` convenience wrapper in + * @ref transaction_base, * so you don't need to deal with this class directly. + * + * @warning While a stream is active, you cannot execute queries, open a + * pipeline, etc. on the same transaction. A transaction can have at most one + * object of a type derived from @ref pqxx::transaction_focus active on it at a + * time. + */ +class PQXX_LIBEXPORT stream_from : transaction_focus +{ +public: + using raw_line = + std::pair>, std::size_t>; + + /// Factory: Execute query, and stream the results. + /** The query can be a SELECT query or a VALUES query; or it can be an + * UPDATE, INSERT, or DELETE with a RETURNING clause. + * + * The query is executed as part of a COPY statement, so there are additional + * restrictions on what kind of query you can use here. See the PostgreSQL + * documentation for the COPY command: + * + * https://www.postgresql.org/docs/current/sql-copy.html + */ + static stream_from query(transaction_base &tx, std::string_view q) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return {tx, from_query, q}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /** + * @name Streaming data from tables + * + * You can use `stream_from` to read a table's contents. This is a quick + * and easy way to read a table, but it comes with limitations. It cannot + * stream from a view, only from a table. It does not support conditions. + * And there are no guarantees about ordering. If you need any of those + * things, consider streaming from a query instead. + */ + //@{ + + /// Factory: Stream data from a pre-quoted table and columns. + /** Use this factory if you need to create multiple streams using the same + * table path and/or columns list, and you want to save a bit of work on + * composing the internal SQL statement for starting the stream. It lets you + * compose the string representations for the table path and the columns + * list, so you can compute these once and then re-use them later. + * + * @param tx The transaction within which the stream will operate. + * @param path Name or path for the table upon which the stream will + * operate. If any part of the table path may contain special + * characters or be case-sensitive, quote the path using + * pqxx::connection::quote_table(). + * @param columns Columns which the stream will read. They should be + * comma-separated and, if needed, quoted. You can produce the string + * using pqxx::connection::quote_columns(). If you omit this argument, + * the stream will read all columns in the table, in schema order. + */ + static stream_from raw_table( + transaction_base &tx, std::string_view path, + std::string_view columns = ""sv); + + /// Factory: Stream data from a given table. + /** This is the convenient way to stream from a table. + */ + static stream_from table( + transaction_base &tx, table_path path, + std::initializer_list columns = {}); + //@} + + /// Execute query, and stream over the results. + /** @deprecated Use factory function @ref query instead. + */ + [[deprecated("Use query() factory instead.")]] stream_from( + transaction_base &, from_query_t, std::string_view query); + + /// Stream all rows in table, all columns. + /** @deprecated Use factories @ref table or @ref raw_table instead. + */ + [[deprecated("Use table() or raw_table() factory instead.")]] stream_from( + transaction_base &, from_table_t, std::string_view table); + + /// Stream given columns from all rows in table. + /** @deprecated Use factories @ref table or @ref raw_table instead. + */ + template + [[deprecated("Use table() or raw_table() factory instead.")]] stream_from( + transaction_base &, from_table_t, std::string_view table, + Iter columns_begin, Iter columns_end); + + /// Stream given columns from all rows in table. + /** @deprecated Use factory function @ref query instead. + */ + template + [[deprecated("Use table() or raw_table() factory instead.")]] stream_from( + transaction_base &tx, from_table_t, std::string_view table, + Columns const &columns); + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + /// @deprecated Use factories @ref table or @ref raw_table instead. + [[deprecated("Use the from_table_t overload instead.")]] stream_from( + transaction_base &tx, std::string_view table) : + stream_from{tx, from_table, table} + {} +#include "pqxx/internal/ignore-deprecated-post.hxx" + + /// @deprecated Use factories @ref table or @ref raw_table instead. + template + [[deprecated("Use the from_table_t overload instead.")]] stream_from( + transaction_base &tx, std::string_view table, Columns const &columns) : + stream_from{tx, from_table, table, columns} + {} + + /// @deprecated Use factories @ref table or @ref raw_table instead. + template + [[deprecated("Use the from_table_t overload instead.")]] stream_from( + transaction_base &, std::string_view table, Iter columns_begin, + Iter columns_end); + + ~stream_from() noexcept; + + /// May this stream still produce more data? + [[nodiscard]] constexpr operator bool() const noexcept + { + return not m_finished; + } + /// Has this stream produced all the data it is going to produce? + [[nodiscard]] constexpr bool operator!() const noexcept + { + return m_finished; + } + + /// Finish this stream. Call this before continuing to use the connection. + /** Consumes all remaining lines, and closes the stream. + * + * This may take a while if you're abandoning the stream before it's done, so + * skip it in error scenarios where you're not planning to use the connection + * again afterwards. + */ + void complete(); + + /// Read one row into a tuple. + /** Converts the row's fields into the fields making up the tuple. + * + * For a column which can contain nulls, be sure to give the corresponding + * tuple field a type which can be null. For example, to read a field as + * `int` when it may contain nulls, read it as `std::optional`. + * Using `std::shared_ptr` or `std::unique_ptr` will also work. + */ + template stream_from &operator>>(Tuple &); + + /// Doing this with a `std::variant` is going to be horrifically borked. + template + stream_from &operator>>(std::variant &) = delete; + + /// Iterate over this stream. Supports range-based "for" loops. + /** Produces an input iterator over the stream. + * + * Do not call this yourself. Use it like "for (auto data : stream.iter())". + */ + template [[nodiscard]] auto iter() & + { + return pqxx::internal::stream_input_iteration{*this}; + } + + /// Read a row. Return fields as views, valid until you read the next row. + /** Returns `nullptr` when there are no more rows to read. Do not attempt + * to read any further rows after that. + * + * Do not access the vector, or the storage referenced by the views, after + * closing or completing the stream, or after attempting to read a next row. + * + * A @ref pqxx::zview is like a `std::string_view`, but with the added + * guarantee that if its data pointer is non-null, the string is followed by + * a terminating zero (which falls just outside the view itself). + * + * If any of the views' data pointer is null, that means that the + * corresponding SQL field is null. + * + * @warning The return type may change in the future, to support C++20 + * coroutine-based usage. + */ + std::vector const *read_row() &; + + /// Read a raw line of text from the COPY command. + /** @warning Do not use this unless you really know what you're doing. */ + raw_line get_raw_line(); + +private: + // TODO: Clean up this signature once we cull the deprecated constructors. + /// @deprecated + stream_from( + transaction_base &tx, std::string_view table, std::string_view columns, + from_table_t); + + // TODO: Clean up this signature once we cull the deprecated constructors. + /// @deprecated + stream_from( + transaction_base &, std::string_view unquoted_table, + std::string_view columns, from_table_t, int); + + template + void extract_fields(Tuple &t, std::index_sequence) const + { + (extract_value(t), ...); + } + + pqxx::internal::glyph_scanner_func *m_glyph_scanner; + + /// Current row's fields' text, combined into one reusable string. + std::string m_row; + + /// The current row's fields. + std::vector m_fields; + + bool m_finished = false; + + void close(); + + template + void extract_value(Tuple &) const; + + /// Read a line of COPY data, write `m_row` and `m_fields`. + void parse_line(); +}; + + +template +inline stream_from::stream_from( + transaction_base &tx, from_table_t, std::string_view table_name, + Columns const &columns) : + stream_from{ + tx, from_table, table_name, std::begin(columns), std::end(columns)} +{} + + +template +inline stream_from::stream_from( + transaction_base &tx, from_table_t, std::string_view table, + Iter columns_begin, Iter columns_end) : + stream_from{ + tx, table, separated_list(",", columns_begin, columns_end), + from_table, 1} +{} + + +template inline stream_from &stream_from::operator>>(Tuple &t) +{ + if (m_finished) + return *this; + static constexpr auto tup_size{std::tuple_size_v}; + m_fields.reserve(tup_size); + parse_line(); + if (m_finished) + return *this; + + if (std::size(m_fields) != tup_size) + throw usage_error{internal::concat( + "Tried to extract ", tup_size, " field(s) from a stream of ", + std::size(m_fields), ".")}; + + extract_fields(t, std::make_index_sequence{}); + return *this; +} + + +template +inline void stream_from::extract_value(Tuple &t) const +{ + using field_type = strip_t(t))>; + using nullity = nullness; + assert(index < std::size(m_fields)); + if constexpr (nullity::always_null) + { + if (std::data(m_fields[index]) != nullptr) + throw conversion_error{"Streaming non-null value into null field."}; + } + else if (std::data(m_fields[index]) == nullptr) + { + if constexpr (nullity::has_null) + std::get(t) = nullity::null(); + else + internal::throw_null_conversion(type_name); + } + else + { + // Don't ever try to convert a non-null value to nullptr_t! + std::get(t) = from_string(m_fields[index]); + } +} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_to b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_to new file mode 100644 index 000000000..8760cf1f4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_to @@ -0,0 +1,8 @@ +/** pqxx::stream_to class. + * + * pqxx::stream_to enables optimized batch updates to a database table. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/stream_to.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_to.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_to.hxx new file mode 100644 index 000000000..2a49d8f85 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/stream_to.hxx @@ -0,0 +1,455 @@ +/* Definition of the pqxx::stream_to class. + * + * pqxx::stream_to enables optimized batch updates to a database table. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/stream_to.hxx instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_STREAM_TO +#define PQXX_H_STREAM_TO + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/separated_list.hxx" +#include "pqxx/transaction_base.hxx" + + +namespace pqxx +{ +/// Efficiently write data directly to a database table. +/** If you wish to insert rows of data into a table, you can compose INSERT + * statements and execute them. But it's slow and tedious, and you need to + * worry about quoting and escaping the data. + * + * If you're just inserting a single row, it probably won't matter much. You + * can use prepared or parameterised statements to take care of the escaping + * for you. But if you're inserting large numbers of rows you will want + * something better. + * + * Inserting rows one by one using INSERT statements involves a lot of + * pointless overhead, especially when you are working with a remote database + * server over the network. You may end up sending each row over the network + * as a separate query, and waiting for a reply. Do it "in bulk" using + * `stream_to`, and you may find that it goes many times faster. Sometimes + * you gain orders of magnitude in speed. + * + * Here's how it works: you create a `stream_to` stream to start writing to + * your table. You will probably want to specify the columns. Then, you + * feed your data into the stream one row at a time. And finally, you call the + * stream's @ref complete function to tell it to finalise the operation, wait + * for completion, and check for errors. + * + * (You _must_ complete the stream before committing or aborting the + * transaction. The connection is in a special state while the stream is + * active, where it can't process commands, and can't commit or abort a + * transaction.) + * + * So how do you feed a row of data into the stream? There's several ways, but + * the preferred one is to call its @ref write_values. Pass the field values + * as arguments. Doesn't matter what type they are, as long as libpqxx knows + * how to convert them to PostgreSQL's text format: `int`, `std::string` or + * `std:string_view`, `float` and `double`, `bool`... lots of basic types + * are supported. If some of the values are null, feel free to use + * `std::optional`, `std::shared_ptr`, or `std::unique_ptr`. + * + * The arguments' types don't even have to match the fields' SQL types. If you + * want to insert an `int` into a `DECIMAL` column, that's your choice -- it + * will produce a `DECIMAL` value which happens to be integral. Insert a + * `float` into a `VARCHAR` column? That's fine, you'll get a string whose + * contents happen to read like a number. And so on. You can even insert + * different types of value in the same column on different rows. If you have + * a code path where a particular field is always null, just insert `nullptr`. + * + * There is another way to insert rows: the `<<` ("shift-left") operator. + * It's not as fast and it doesn't support variable arguments: each row must be + * either a `std::tuple` or something iterable, such as a `std::vector`, or + * anything else with a `begin()` and `end()`. + * + * @warning While a stream is active, you cannot execute queries, open a + * pipeline, etc. on the same transaction. A transaction can have at most one + * object of a type derived from @ref pqxx::transaction_focus active on it at a + * time. + */ +class PQXX_LIBEXPORT stream_to : transaction_focus +{ +public: + /// Stream data to a pre-quoted table and columns. + /** This factory can be useful when it's not convenient to provide the + * columns list in the form of a `std::initializer_list`, or when the list + * of columns is simply not known at compile time. + * + * Also use this if you need to create multiple streams using the same table + * path and/or columns list, and you want to save a bit of work on composing + * the internal SQL statement for starting the stream. It lets you compose + * the string representations for the table path and the columns list, so you + * can compute these once and then re-use them later. + * + * @param tx The transaction within which the stream will operate. + * @param path Name or path for the table upon which the stream will + * operate. If any part of the table path may contain special + * characters or be case-sensitive, quote the path using + * pqxx::connection::quote_table(). + * @param columns Columns to which the stream will write. They should be + * comma-separated and, if needed, quoted. You can produce the string + * using pqxx::connection::quote_columns(). If you omit this argument, + * the stream will write all columns in the table, in schema order. + */ + static stream_to raw_table( + transaction_base &tx, std::string_view path, std::string_view columns = "") + { + return {tx, path, columns}; + } + + /// Create a `stream_to` writing to a named table and columns. + /** Use this to stream data to a table, where the list of columns is known at + * compile time. + * + * @param tx The transaction within which the stream will operate. + * @param path A @ref table_path designating the target table. + * @param columns Optionally, the columns to which the stream should write. + * If you do not pass this, the stream will write to all columns in the + * table, in schema order. + */ + static stream_to table( + transaction_base &tx, table_path path, + std::initializer_list columns = {}) + { + auto const &conn{tx.conn()}; + return raw_table(tx, conn.quote_table(path), conn.quote_columns(columns)); + } + +#if defined(PQXX_HAVE_CONCEPTS) + /// Create a `stream_to` writing to a named table and columns. + /** Use this version to stream data to a table, when the list of columns is + * not known at compile time. + * + * @param tx The transaction within which the stream will operate. + * @param path A @ref table_path designating the target table. + * @param columns The columns to which the stream should write. + */ + template + static stream_to + table(transaction_base &tx, table_path path, COLUMNS const &columns) + { + auto const &conn{tx.conn()}; + return stream_to::raw_table( + tx, conn.quote_table(path), tx.conn().quote_columns(columns)); + } + + /// Create a `stream_to` writing to a named table and columns. + /** Use this version to stream data to a table, when the list of columns is + * not known at compile time. + * + * @param tx The transaction within which the stream will operate. + * @param path A @ref table_path designating the target table. + * @param columns The columns to which the stream should write. + */ + template + static stream_to + table(transaction_base &tx, std::string_view path, COLUMNS const &columns) + { + return stream_to::raw_table(tx, path, tx.conn().quote_columns(columns)); + } +#endif // PQXX_HAVE_CONCEPTS + + /// Create a stream, without specifying columns. + /** @deprecated Use @ref table or @ref raw_table as a factory. + * + * Fields will be inserted in whatever order the columns have in the + * database. + * + * You'll probably want to specify the columns, so that the mapping between + * your data fields and the table is explicit in your code, and not hidden + * in an "implicit contract" between your code and your schema. + */ + [[deprecated("Use table() or raw_table() factory.")]] stream_to( + transaction_base &tx, std::string_view table_name) : + stream_to{tx, table_name, ""sv} + {} + + /// Create a stream, specifying column names as a container of strings. + /** @deprecated Use @ref table or @ref raw_table as a factory. + */ + template + [[deprecated("Use table() or raw_table() factory.")]] stream_to( + transaction_base &, std::string_view table_name, Columns const &columns); + + /// Create a stream, specifying column names as a sequence of strings. + /** @deprecated Use @ref table or @ref raw_table as a factory. + */ + template + [[deprecated("Use table() or raw_table() factory.")]] stream_to( + transaction_base &, std::string_view table_name, Iter columns_begin, + Iter columns_end); + + ~stream_to() noexcept; + + /// Does this stream still need to @ref complete()? + [[nodiscard]] constexpr operator bool() const noexcept + { + return not m_finished; + } + /// Has this stream been through its concluding @c complete()? + [[nodiscard]] constexpr bool operator!() const noexcept + { + return m_finished; + } + + /// Complete the operation, and check for errors. + /** Always call this to close the stream in an orderly fashion, even after + * an error. (In the case of an error, abort the transaction afterwards.) + * + * The only circumstance where it's safe to skip this is after an error, if + * you're discarding the entire connection. + */ + void complete(); + + /// Insert a row of data. + /** Returns a reference to the stream, so you can chain the calls. + * + * The @c row can be a tuple, or any type that can be iterated. Each + * item becomes a field in the row, in the same order as the columns you + * specified when creating the stream. + * + * If you don't already happen to have your fields in the form of a tuple or + * container, prefer @c write_values. It's faster and more convenient. + */ + template stream_to &operator<<(Row const &row) + { + write_row(row); + return *this; + } + + /// Stream a `stream_from` straight into a `stream_to`. + /** This can be useful when copying between different databases. If the + * source and the destination are on the same database, you'll get better + * performance doing it all in a regular query. + */ + stream_to &operator<<(stream_from &); + + /// Insert a row of data, given in the form of a @c std::tuple or container. + /** The @c row can be a tuple, or any type that can be iterated. Each + * item becomes a field in the row, in the same order as the columns you + * specified when creating the stream. + * + * The preferred way to insert a row is @c write_values. + */ + template void write_row(Row const &row) + { + fill_buffer(row); + write_buffer(); + } + + /// Insert values as a row. + /** This is the recommended way of inserting data. Pass your field values, + * of any convertible type. + */ + template void write_values(Ts const &...fields) + { + fill_buffer(fields...); + write_buffer(); + } + +private: + /// Stream a pre-quoted table name and columns list. + stream_to( + transaction_base &tx, std::string_view path, std::string_view columns); + + bool m_finished = false; + + /// Reusable buffer for a row. Saves doing an allocation for each row. + std::string m_buffer; + + /// Reusable buffer for converting/escaping a field. + std::string m_field_buf; + + /// Glyph scanner, for parsing the client encoding. + internal::glyph_scanner_func *m_scanner; + + /// Write a row of raw text-format data into the destination table. + void write_raw_line(std::string_view); + + /// Write a row of data from @c m_buffer into the destination table. + /** Resets the buffer for the next row. + */ + void write_buffer(); + + /// COPY encoding for a null field, plus subsequent separator. + static constexpr std::string_view null_field{"\\N\t"}; + + /// Estimate buffer space needed for a field which is always null. + template + static std::enable_if_t::always_null, std::size_t> + estimate_buffer(T const &) + { + return std::size(null_field); + } + + /// Estimate buffer space needed for field f. + /** The estimate is not very precise. We don't actually know how much space + * we'll need once the escaping comes in. + */ + template + static std::enable_if_t::always_null, std::size_t> + estimate_buffer(T const &field) + { + return is_null(field) ? std::size(null_field) : size_buffer(field); + } + + /// Append escaped version of @c data to @c m_buffer, plus a tab. + void escape_field_to_buffer(std::string_view data); + + /// Append string representation for @c f to @c m_buffer. + /** This is for the general case, where the field may contain a value. + * + * Also appends a tab. The tab is meant to be a separator, not a terminator, + * so if you write any fields at all, you'll end up with one tab too many + * at the end of the buffer. + */ + template + std::enable_if_t::always_null> + append_to_buffer(Field const &f) + { + // We append each field, terminated by a tab. That will leave us with + // one tab too many, assuming we write any fields at all; we remove that + // at the end. + if (is_null(f)) + { + // Easy. Append null and tab in one go. + m_buffer.append(null_field); + } + else + { + // Convert f into m_buffer. + + using traits = string_traits; + auto const budget{estimate_buffer(f)}; + auto const offset{std::size(m_buffer)}; + + if constexpr (std::is_arithmetic_v) + { + // Specially optimised for "safe" types, which never need any + // escaping. Convert straight into m_buffer. + + // The budget we get from size_buffer() includes room for the trailing + // zero, which we must remove. But we're also inserting tabs between + // fields, so we re-purpose the extra byte for that. + auto const total{offset + budget}; + m_buffer.resize(total); + auto const data{m_buffer.data()}; + char *const end{traits::into_buf(data + offset, data + total, f)}; + *(end - 1) = '\t'; + // Shrink to fit. Keep the tab though. + m_buffer.resize(static_cast(end - data)); + } + else if constexpr ( + std::is_same_v or + std::is_same_v or + std::is_same_v) + { + // This string may need escaping. + m_field_buf.resize(budget); + escape_field_to_buffer(f); + } + else + { + // This field needs to be converted to a string, and after that, + // escaped as well. + m_field_buf.resize(budget); + auto const data{m_field_buf.data()}; + escape_field_to_buffer( + traits::to_buf(data, data + std::size(m_field_buf), f)); + } + } + } + + /// Append string representation for a null field to @c m_buffer. + /** This special case is for types which are always null. + * + * Also appends a tab. The tab is meant to be a separator, not a terminator, + * so if you write any fields at all, you'll end up with one tab too many + * at the end of the buffer. + */ + template + std::enable_if_t::always_null> + append_to_buffer(Field const &) + { + m_buffer.append(null_field); + } + + /// Write raw COPY line into @c m_buffer, based on a container of fields. + template + std::enable_if_t> + fill_buffer(Container const &c) + { + // To avoid unnecessary allocations and deallocations, we run through c + // twice: once to determine how much buffer space we may need, and once to + // actually write it into the buffer. + std::size_t budget{0}; + for (auto const &f : c) budget += estimate_buffer(f); + m_buffer.reserve(budget); + for (auto const &f : c) append_to_buffer(f); + } + + /// Estimate how many buffer bytes we need to write tuple. + template + static std::size_t + budget_tuple(Tuple const &t, std::index_sequence) + { + return (estimate_buffer(std::get(t)) + ...); + } + + /// Write tuple of fields to @c m_buffer. + template + void append_tuple(Tuple const &t, std::index_sequence) + { + (append_to_buffer(std::get(t)), ...); + } + + /// Write raw COPY line into @c m_buffer, based on a tuple of fields. + template void fill_buffer(std::tuple const &t) + { + using indexes = std::make_index_sequence; + + m_buffer.reserve(budget_tuple(t, indexes{})); + append_tuple(t, indexes{}); + } + + /// Write raw COPY line into @c m_buffer, based on varargs fields. + template void fill_buffer(const Ts &...fields) + { + (..., append_to_buffer(fields)); + } + + constexpr static std::string_view s_classname{"stream_to"}; +}; + + +template +inline stream_to::stream_to( + transaction_base &tx, std::string_view table_name, Columns const &columns) : + stream_to{tx, table_name, std::begin(columns), std::end(columns)} +{} + + +template +inline stream_to::stream_to( + transaction_base &tx, std::string_view table_name, Iter columns_begin, + Iter columns_end) : + stream_to{ + tx, + tx.quote_name( + table_name, + separated_list(",", columns_begin, columns_end, [&tx](auto col) { + return tx.quote_name(*col); + }))} +{} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/subtransaction b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/subtransaction new file mode 100644 index 000000000..e0d154903 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/subtransaction @@ -0,0 +1,8 @@ +/** pqxx::subtransaction class. + * + * pqxx::subtransaction is a nested transaction, i.e. one inside a transaction. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/subtransaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/subtransaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/subtransaction.hxx new file mode 100644 index 000000000..e66b7a7a8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/subtransaction.hxx @@ -0,0 +1,96 @@ +/* Definition of the pqxx::subtransaction class. + * + * pqxx::subtransaction is a nested transaction, i.e. one within a transaction. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/subtransaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_SUBTRANSACTION +#define PQXX_H_SUBTRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/dbtransaction.hxx" + +namespace pqxx +{ +/** + * @ingroup transactions + */ +/// "Transaction" nested within another transaction +/** A subtransaction can be executed inside a backend transaction, or inside + * another subtransaction. This can be useful when, for example, statements in + * a transaction may harmlessly fail and you don't want them to abort the + * entire transaction. Here's an example of how a temporary table may be + * dropped before re-creating it, without failing if the table did not exist: + * + * ```cxx + * void do_job(connection &C) + * { + * string const temptable = "fleetingtable"; + * + * work W(C, "do_job"); + * do_firstpart(W); + * + * // Attempt to delete our temporary table if it already existed. + * try + * { + * subtransaction S(W, "droptemp"); + * S.exec0("DROP TABLE " + temptable); + * S.commit(); + * } + * catch (undefined_table const &) + * { + * // Table did not exist. Which is what we were hoping to achieve anyway. + * // Carry on without regrets. + * } + * + * // S may have gone into a failed state and been destroyed, but the + * // upper-level transaction W is still fine. We can continue to use it. + * W.exec0("CREATE TEMP TABLE " + temptable + "(bar integer, splat + * varchar)"); + * + * do_lastpart(W); + * } + * ``` + * + * (This is just an example. If you really wanted to do drop a table without + * an error if it doesn't exist, you'd use DROP TABLE IF EXISTS.) + * + * There are no isolation levels inside a transaction. They are not needed + * because all actions within the same backend transaction are always performed + * sequentially anyway. + * + * @warning While the subtransaction is "live," you cannot execute queries or + * open streams etc. on its parent transaction. A transaction can have at most + * one object of a type derived from @ref pqxx::transaction_focus active on it + * at a time. + */ +class PQXX_LIBEXPORT subtransaction : public transaction_focus, + public dbtransaction +{ +public: + /// Nest a subtransaction nested in another transaction. + explicit subtransaction(dbtransaction &t, std::string_view tname = ""sv); + + /// Nest a subtransaction in another subtransaction. + explicit subtransaction(subtransaction &t, std::string_view name = ""sv); + + virtual ~subtransaction() noexcept override; + +private: + std::string quoted_name() const + { + return quote_name(transaction_focus::name()); + } + virtual void do_commit() override; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/time b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/time new file mode 100644 index 000000000..85df05744 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/time @@ -0,0 +1,6 @@ +/** Date/time string conversions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/time.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/time.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/time.hxx new file mode 100644 index 000000000..effed05e0 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/time.hxx @@ -0,0 +1,88 @@ +/** Support for date/time values. + * + * At the moment this supports dates, but not times. + */ +#ifndef PQXX_H_TIME +#define PQXX_H_TIME + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/internal/concat.hxx" +#include "pqxx/strconv.hxx" + + +#if defined(PQXX_HAVE_YEAR_MONTH_DAY) + +namespace pqxx +{ +using namespace std::literals; + +template<> +struct nullness + : no_null +{}; + + +/// String representation for a Gregorian date in ISO-8601 format. +/** @warning Experimental. There may still be design problems, particularly + * when it comes to BC years. + * + * PostgreSQL supports a choice of date formats, but libpqxx does not. The + * other formats in turn support a choice of "month before day" versus "day + * before month," meaning that it's not necessarily known which format a given + * date is supposed to be. So I repeat: ISO-8601-style format only! + * + * Invalid dates will not convert. This includes February 29 on non-leap + * years, which is why it matters that `year_month_day` represents a + * _Gregorian_ date. + * + * The range of years is limited. At the time of writing, PostgreSQL 14 + * supports years from 4713 BC to 294276 AD inclusive, and C++20 supports + * a range of 32767 BC to 32767 AD inclusive. So in practice, years must fall + * between 4713 BC and 32767 AD, inclusive. + * + * @warning Support for BC (or BCE) years is still experimental. I still need + * confirmation on this issue: it looks as if C++ years are astronomical years, + * which means they have a Year Zero. Regular BC/AD years do not have a year + * zero, so the year 1 AD follows directly after 1 BC. + * + * So, what to our calendars (and to PostgreSQL) is the year "0001 BC" seems to + * count as year "0" in a `std::chrono::year_month_day`. The year 0001 AD is + * still equal to 1 as you'd expect, and all AD years work normally, but all + * years before then are shifted by one. For instance, the year 543 BC would + * be -542 in C++. + */ +template<> struct PQXX_LIBEXPORT string_traits +{ + [[nodiscard]] static zview + to_buf(char *begin, char *end, std::chrono::year_month_day const &value) + { + return generic_to_buf(begin, end, value); + } + + static char * + into_buf(char *begin, char *end, std::chrono::year_month_day const &value); + + [[nodiscard]] static std::chrono::year_month_day + from_string(std::string_view text); + + [[nodiscard]] static std::size_t + size_buffer(std::chrono::year_month_day const &) noexcept + { + static_assert(int{(std::chrono::year::min)()} >= -99999); + static_assert(int{(std::chrono::year::max)()} <= 99999); + return 5 + 1 + 2 + 1 + 2 + std::size(s_bc) + 1; + } + +private: + /// The "BC" suffix for years before 1 AD. + static constexpr std::string_view s_bc{" BC"sv}; +}; +} // namespace pqxx +#endif // PQXX_HAVE_YEAR_MONTH_DAY +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction new file mode 100644 index 000000000..a7ae39d43 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction @@ -0,0 +1,8 @@ +/** pqxx::transaction class. + * + * pqxx::transaction represents a standard database transaction. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/transaction.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction.hxx new file mode 100644 index 000000000..e90917e38 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction.hxx @@ -0,0 +1,108 @@ +/* Definition of the pqxx::transaction class. + * pqxx::transaction represents a standard database transaction. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/transaction instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTION +#define PQXX_H_TRANSACTION + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/dbtransaction.hxx" + +namespace pqxx::internal +{ +/// Helper base class for the @ref transaction class template. +class PQXX_LIBEXPORT basic_transaction : public dbtransaction +{ +protected: + basic_transaction( + connection &c, zview begin_command, std::string_view tname); + basic_transaction(connection &c, zview begin_command, std::string &&tname); + basic_transaction(connection &c, zview begin_command); + + virtual ~basic_transaction() noexcept override = 0; + +private: + virtual void do_commit() override; +}; +} // namespace pqxx::internal + + +namespace pqxx +{ +/** + * @ingroup transactions + */ +//@{ + +/// Standard back-end transaction, templatised on isolation level. +/** This is the type you'll normally want to use to represent a transaction on + * the database. + * + * Usage example: double all wages. + * + * ```cxx + * extern connection C; + * work T(C); + * try + * { + * T.exec0("UPDATE employees SET wage=wage*2"); + * T.commit(); // NOTE: do this inside try block + * } + * catch (exception const &e) + * { + * cerr << e.what() << endl; + * T.abort(); // Usually not needed; same happens when T's life ends. + * } + * ``` + */ +template< + isolation_level ISOLATION = isolation_level::read_committed, + write_policy READWRITE = write_policy::read_write> +class transaction final : public internal::basic_transaction +{ +public: + /// Begin a transaction. + /** + * @param c Connection for this transaction to operate on. + * @param tname Optional name for transaction. Must begin with a letter and + * may contain letters and digits only. + */ + transaction(connection &c, std::string_view tname) : + internal::basic_transaction{ + c, internal::begin_cmd, tname} + {} + + /// Begin a transaction. + /** + * @param c Connection for this transaction to operate on. + * may contain letters and digits only. + */ + explicit transaction(connection &c) : + internal::basic_transaction{ + c, internal::begin_cmd} + {} + + virtual ~transaction() noexcept override { close(); } +}; + + +/// The default transaction type. +using work = transaction<>; + +/// Read-only transaction. +using read_transaction = + transaction; + +//@} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_base b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_base new file mode 100644 index 000000000..c39219aac --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_base @@ -0,0 +1,9 @@ +/** Base for the transaction classes. + * + * pqxx::transaction_base defines the interface for any abstract class that + * represents a database transaction. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/transaction_base.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_base.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_base.hxx new file mode 100644 index 000000000..4363cc56a --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_base.hxx @@ -0,0 +1,810 @@ +/* Common code and definitions for the transaction classes. + * + * pqxx::transaction_base defines the interface for any abstract class that + * represents a database transaction. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/transaction_base instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTION_BASE +#define PQXX_H_TRANSACTION_BASE + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +/* End-user programs need not include this file, unless they define their own + * transaction classes. This is not something the typical program should want + * to do. + * + * However, reading this file is worthwhile because it defines the public + * interface for the available transaction classes such as transaction and + * nontransaction. + */ + +#include "pqxx/connection.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/encoding_group.hxx" +#include "pqxx/isolation.hxx" +#include "pqxx/result.hxx" +#include "pqxx/row.hxx" +#include "pqxx/stream_from.hxx" +#include "pqxx/util.hxx" + +namespace pqxx::internal::gate +{ +class transaction_subtransaction; +class transaction_sql_cursor; +class transaction_stream_to; +class transaction_transaction_focus; +} // namespace pqxx::internal::gate + + +namespace pqxx +{ +using namespace std::literals; + + +class transaction_focus; + + +/** + * @defgroup transactions Transaction classes + * + * All database access goes through instances of these classes. + * However, not all implementations of this interface need to provide full + * transactional integrity. + * + * Several implementations of this interface are shipped with libpqxx, + * including the plain transaction class, the entirely unprotected + * nontransaction, and the more cautious robusttransaction. + */ + +/// Interface definition (and common code) for "transaction" classes. +/** + * @ingroup transactions + * + * Abstract base class for all transaction types. + */ +class PQXX_LIBEXPORT PQXX_NOVTABLE transaction_base +{ +public: + transaction_base() = delete; + transaction_base(transaction_base const &) = delete; + transaction_base(transaction_base &&) = delete; + transaction_base &operator=(transaction_base const &) = delete; + transaction_base &operator=(transaction_base &&) = delete; + + virtual ~transaction_base() = 0; + + /// Commit the transaction. + /** Make the effects of this transaction definite. If you destroy a + * transaction without invoking its @ref commit() first, that will implicitly + * abort it. (For the @ref nontransaction class though, "commit" and "abort" + * really don't do anything, hence its name.) + * + * There is, however, a minute risk that you might lose your connection to + * the database at just the wrong moment here. In that case, libpqxx may be + * unable to determine whether the database was able to complete the + * transaction, or had to roll it back. In that scenario, @ref commit() will + * throw an in_doubt_error. There is a different transaction class called + * @ref robusttransaction which takes some special precautions to reduce this + * risk. + */ + void commit(); + + /// Abort the transaction. + /** No special effort is required to call this function; it will be called + * implicitly when the transaction is destructed. + */ + void abort(); + + /** + * @ingroup escaping-functions + * + * Use these when writing SQL queries that incorporate C++ values as SQL + * constants. + * + * The functions you see here are just convenience shortcuts to the same + * functions on the connection object. + */ + //@{ + /// Escape string for use as SQL string literal in this transaction. + template [[nodiscard]] auto esc(ARGS &&...args) const + { + return conn().esc(std::forward(args)...); + } + + /// Escape binary data for use as SQL string literal in this transaction. + /** Raw, binary data is treated differently from regular strings. Binary + * strings are never interpreted as text, so they may safely include byte + * values or byte sequences that don't happen to represent valid characters + * in the character encoding being used. + * + * The binary string does not stop at the first zero byte, as is the case + * with textual strings. Instead, it may contain zero bytes anywhere. If + * it happens to contain bytes that look like quote characters, or other + * things that can disrupt their use in SQL queries, they will be replaced + * with special escape sequences. + */ + template [[nodiscard]] auto esc_raw(ARGS &&...args) const + { + return conn().esc_raw(std::forward(args)...); + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(zview text) const + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return conn().unesc_raw(text); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard]] std::basic_string unesc_bin(zview text) + { + return conn().unesc_bin(text); + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard, deprecated("Use unesc_bin() instead.")]] std::string + unesc_raw(char const *text) const + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return conn().unesc_raw(text); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Unescape binary data, e.g. from a table field or notification payload. + /** Takes a binary string as escaped by PostgreSQL, and returns a restored + * copy of the original binary data. + */ + [[nodiscard]] std::basic_string unesc_bin(char const text[]) + { + return conn().unesc_bin(text); + } + + /// Represent object as SQL string, including quoting & escaping. + /** Nulls are recognized and represented as SQL nulls. */ + template [[nodiscard]] std::string quote(T const &t) const + { + return conn().quote(t); + } + + [[deprecated( + "Use std::basic_string instead of binarystring.")]] std::string + quote(binarystring const &t) const + { + return conn().quote(t.bytes_view()); + } + + /// Binary-escape and quote a binary string for use as an SQL constant. + [[deprecated("Use quote(std::basic_string_view).")]] std::string + quote_raw(unsigned char const bin[], std::size_t len) const + { + return quote(binary_cast(bin, len)); + } + + /// Binary-escape and quote a binary string for use as an SQL constant. + [[deprecated("Use quote(std::basic_string_view).")]] std::string + quote_raw(zview bin) const; + +#if defined(PQXX_HAVE_CONCEPTS) + /// Binary-escape and quote a binary string for use as an SQL constant. + /** For binary data you can also just use @ref quote(data). */ + template + [[nodiscard]] std::string quote_raw(DATA const &data) const + { + return conn().quote_raw(data); + } +#endif + + /// Escape an SQL identifier for use in a query. + [[nodiscard]] std::string quote_name(std::string_view identifier) const + { + return conn().quote_name(identifier); + } + + /// Escape string for literal LIKE match. + [[nodiscard]] std::string + esc_like(std::string_view bin, char escape_char = '\\') const + { + return conn().esc_like(bin, escape_char); + } + //@} + + /** + * @name Command execution + * + * There are many functions for executing (or "performing") a command (or + * "query"). This is the most fundamental thing you can do with the library, + * and you always do it from a transaction class. + * + * Command execution can throw many types of exception, including sql_error, + * broken_connection, and many sql_error subtypes such as + * feature_not_supported or insufficient_privilege. But any exception thrown + * by the C++ standard library may also occur here. All exceptions you will + * see libpqxx throw are derived from std::exception. + * + * One unusual feature in libpqxx is that you can give your query a name or + * description. This does not mean anything to the database, but sometimes + * it can help libpqxx produce more helpful error messages, making problems + * in your code easier to debug. + * + * Many of the execution functions used to accept a `desc` argument, a + * human-readable description of the statement for use in error messages. + * This could make failures easier to debug. Future versions will use + * C++20's `std::source_location` to identify the failing statement. + */ + //@{ + + /// Execute a command. + /** + * @param query Query or command to execute. + * @param desc Optional identifier for query, to help pinpoint SQL errors. + * @return A result set describing the query's or command's result. + */ + [[deprecated("The desc parameter is going away.")]] result + exec(std::string_view query, std::string_view desc); + + /// Execute a command. + /** + * @param query Query or command to execute. + * @return A result set describing the query's or command's result. + */ + result exec(std::string_view query) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec(query, std::string_view{}); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute a command. + /** + * @param query Query or command to execute. + * @param desc Optional identifier for query, to help pinpoint SQL errors. + * @return A result set describing the query's or command's result. + */ + [[deprecated( + "Pass your query as a std::string_view, not stringstream.")]] result + exec(std::stringstream const &query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec(query.str(), desc); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute command, which should return zero rows of data. + /** Works like @ref exec, but fails if the result contains data. It still + * returns a result, however, which may contain useful metadata. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + [[deprecated("The desc parameter is going away.")]] result + exec0(zview query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec_n(0, query, desc); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute command, which should return zero rows of data. + /** Works like @ref exec, but fails if the result contains data. It still + * returns a result, however, which may contain useful metadata. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + result exec0(zview query) { return exec_n(0, query); } + + /// Execute command returning a single row of data. + /** Works like @ref exec, but requires the result to contain exactly one row. + * The row can be addressed directly, without the need to find the first row + * in a result set. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + [[deprecated("The desc parameter is going away.")]] row + exec1(zview query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec_n(1, query, desc).front(); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Execute command returning a single row of data. + /** Works like @ref exec, but requires the result to contain exactly one row. + * The row can be addressed directly, without the need to find the first row + * in a result set. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + row exec1(zview query) { return exec_n(1, query).front(); } + + /// Execute command, expect given number of rows. + /** Works like @ref exec, but checks that the result has exactly the expected + * number of rows. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + [[deprecated("The desc parameter is going away.")]] result + exec_n(result::size_type rows, zview query, std::string_view desc); + + /// Execute command, expect given number of rows. + /** Works like @ref exec, but checks that the result has exactly the expected + * number of rows. + * + * @throw unexpected_rows If the query returned the wrong number of rows. + */ + result exec_n(result::size_type rows, zview query) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return exec_n(rows, query, std::string_view{}); +#include "pqxx/internal/ignore-deprecated-post.hxx" + } + + /// Perform query, expecting exactly 1 row with 1 field, and convert it. + /** This is convenience shorthand for querying exactly one value from the + * database. It returns that value, converted to the type you specify. + */ + template + [[deprecated("The desc parameter is going away.")]] TYPE + query_value(zview query, std::string_view desc) + { +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row const r{exec1(query, desc)}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + if (std::size(r) != 1) + throw usage_error{internal::concat( + "Queried single value from result with ", std::size(r), " columns.")}; + return r[0].as(); + } + + /// Perform query, expecting exactly 1 row with 1 field, and convert it. + /** This is convenience shorthand for querying exactly one value from the + * database. It returns that value, converted to the type you specify. + */ + template TYPE query_value(zview query) + { + row const r{exec1(query)}; + if (std::size(r) != 1) + throw usage_error{internal::concat( + "Queried single value from result with ", std::size(r), " columns.")}; + return r[0].as(); + } + + /// Execute a query, and loop over the results row by row. + /** Converts the rows to `std::tuple`, of the column types you specify. + * + * Use this with a range-based "for" loop. It executes the query, and + * directly maps the resulting rows onto a `std::tuple` of the types you + * specify. It starts before all the data from the server is in, so if your + * network connection to the server breaks while you're iterating, you'll get + * an exception partway through. + * + * The stream lives entirely within the lifetime of the transaction. Make + * sure you destroy the stream before you destroy the transaction. Either + * iterate the stream all the way to the end, or destroy first the stream + * and then the transaction without touching either in any other way. Until + * the stream has finished, the transaction is in a special state where it + * cannot execute queries. + * + * As a special case, tuple may contain `std::string_view` fields, but the + * strings to which they point will only remain valid until you extract the + * next row. After that, the memory holding the string may be overwritten or + * deallocated. + * + * If any of the columns can be null, and the C++ type to which it translates + * does not have a null value, wrap the type in `std::optional` (or if + * you prefer, `std::shared_ptr` or `std::unique_ptr)`. These templates + * do recognise null values, and libpqxx will know how to convert to them. + * + * The connection is in a special state until the iteration finishes. So if + * it does not finish due to a `break` or a `return` or an exception, then + * the entire connection becomes effectively unusable. + * + * Querying in this way is faster than the `exec()` methods for larger + * results (but slower for small ones). You can start processing rows before + * the full result is in. Also, `stream()` scales better in terms of memory + * usage. Where @ref exec() reads the entire result into memory at once, + * `stream()` will read and process one row at at a time. + * + * Your query executes as part of a COPY command, not as a stand-alone query, + * so there are limitations to what you can do in the query. It can be + * either a SELECT or VALUES query; or an INSERT, UPDATE, or DELETE with a + * RETURNING clause. See the documentation for PostgreSQL's COPY command for + * the details: + * + * https://www.postgresql.org/docs/current/sql-copy.html + * + * Iterating in this way does require each of the field types you pass to be + * default-constructible, copy-constructible, and assignable. These + * requirements may be loosened once libpqxx moves on to C++20. + */ + template + [[nodiscard]] auto stream(std::string_view query) & + { + // Tricky: std::make_unique() supports constructors but not RVO functions. + return pqxx::internal::owning_stream_input_iteration{ + std::unique_ptr{ + new stream_from{stream_from::query(*this, query)}}}; + } + + // C++20: Concept like std::invocable, but without specifying param types. + /// Perform a streaming query, and for each result row, call `func`. + /** Here, `func` can be a function, a `std::function`, a lambda, or an + * object that supports the function call operator. Of course `func` must + * have an unambiguous signature; it can't be overloaded or generic. + * + * The `for_each` function executes `query` in a stream using + * @ref pqxx::stream_from. Every time a row of data comes in from the + * server, it converts the row's fields to the types of `func`'s respective + * parameters, and calls `func` with those values. + * + * This will not work for all queries, but straightforward `SELECT` and + * `UPDATE ... RETURNING` queries should work. Consult the documentation for + * @ref pqxx::stream_from and PostgreSQL's underlying `COPY` command for the + * full details. + * + * Streaming a query like this is likely to be slower than the @ref exec() + * functions for small result sets, but faster for large result sets. So if + * performance matters, you'll want to use `for_each` if you query large + * amounts of data, but not if you do lots of queries with small outputs. + */ + template + inline auto for_each(std::string_view query, CALLABLE &&func) + { + using param_types = + pqxx::internal::strip_types_t>; + param_types const *const sample{nullptr}; + auto data_stream{stream_like(query, sample)}; + for (auto const &fields : data_stream) std::apply(func, fields); + } + + /** + * @name Parameterized statements + * + * You'll often need parameters in the queries you execute: "select the + * car with this licence plate." If the parameter is a string, you need to + * quote it and escape any special characters inside it, or it may become a + * target for an SQL injection attack. If it's an integer (for example), + * you need to convert it to a string, but in the database's format, without + * locale-specific niceties like "," separators between the thousands. + * + * Parameterised statements are an easier and safer way to do this. They're + * like prepared statements, but for a single use. You don't need to name + * them, and you don't need to prepare them first. + * + * Your query will include placeholders like `$1` and `$2` etc. in the places + * where you want the arguments to go. Then, you pass the argument values + * and the actual query is constructed for you. + * + * Pass the exact right number of parameters, and in the right order. The + * parameters in the query don't have to be neatly ordered from `$1` to + * `$2` to `$3` - but you must pass the argument for `$1` first, the one + * for `$2` second, etc. + * + * @warning Beware of "nul" bytes. Any string you pass as a parameter will + * end at the first char with value zero. If you pass a string that contains + * a zero byte, the last byte in the value will be the one just before the + * zero. + */ + //@{ + /// Execute an SQL statement with parameters. + template result exec_params(zview query, Args &&...args) + { + params pp(args...); + return internal_exec_params(query, pp.make_c_params()); + } + + // Execute parameterised statement, expect a single-row result. + /** @throw unexpected_rows if the result does not consist of exactly one row. + */ + template row exec_params1(zview query, Args &&...args) + { + return exec_params_n(1, query, std::forward(args)...).front(); + } + + // Execute parameterised statement, expect a result with zero rows. + /** @throw unexpected_rows if the result contains rows. + */ + template result exec_params0(zview query, Args &&...args) + { + return exec_params_n(0, query, std::forward(args)...); + } + + // Execute parameterised statement, expect exactly a given number of rows. + /** @throw unexpected_rows if the result contains the wrong number of rows. + */ + template + result exec_params_n(std::size_t rows, zview query, Args &&...args) + { + auto const r{exec_params(query, std::forward(args)...)}; + check_rowcount_params(rows, std::size(r)); + return r; + } + //@} + + /** + * @name Prepared statements + * + * These are very similar to parameterised statements. The difference is + * that you prepare them in advance, giving them identifying names. You can + * then call them by these names, passing in the argument values appropriate + * for that call. + * + * You prepare a statement on the connection, using + * @ref pqxx::connection::prepare(). But you then call the statement in a + * transaction, using the functions you see here. + * + * Never try to prepare, execute, or unprepare a prepared statement manually + * using direct SQL queries when you also use the libpqxx equivalents. For + * any given statement, either prepare, manage, and execute it through the + * dedicated libpqxx functions; or do it all directly in SQL. Don't mix the + * two, or the code may get confused. + * + * See \ref prepared for a full discussion. + * + * @warning Beware of "nul" bytes. Any string you pass as a parameter will + * end at the first char with value zero. If you pass a string that contains + * a zero byte, the last byte in the value will be the one just before the + * zero. If you need a zero byte, you're dealing with binary strings, not + * regular strings. Represent binary strings on the SQL side as `BYTEA` + * (or as large objects). On the C++ side, use types like + * `std::basic_string` or `std::basic_string_view` + * or (in C++20) `std::vector`. Also, consider large objects on + * the SQL side and @ref blob on the C++ side. + */ + //@{ + + /// Execute a prepared statement, with optional arguments. + template + result exec_prepared(zview statement, Args &&...args) + { + params pp(args...); + return internal_exec_prepared(statement, pp.make_c_params()); + } + + /// Execute a prepared statement, and expect a single-row result. + /** @throw pqxx::unexpected_rows if the result was not exactly 1 row. + */ + template + row exec_prepared1(zview statement, Args &&...args) + { + return exec_prepared_n(1, statement, std::forward(args)...).front(); + } + + /// Execute a prepared statement, and expect a result with zero rows. + /** @throw pqxx::unexpected_rows if the result contained rows. + */ + template + result exec_prepared0(zview statement, Args &&...args) + { + return exec_prepared_n(0, statement, std::forward(args)...); + } + + /// Execute a prepared statement, expect a result with given number of rows. + /** @throw pqxx::unexpected_rows if the result did not contain exactly the + * given number of rows. + */ + template + result + exec_prepared_n(result::size_type rows, zview statement, Args &&...args) + { + auto const r{exec_prepared(statement, std::forward(args)...)}; + check_rowcount_prepared(statement, rows, std::size(r)); + return r; + } + + //@} + + /** + * @name Error/warning output + */ + //@{ + /// Have connection process a warning message. + void process_notice(char const msg[]) const { m_conn.process_notice(msg); } + /// Have connection process a warning message. + void process_notice(zview msg) const { m_conn.process_notice(msg); } + //@} + + /// The connection in which this transaction lives. + [[nodiscard]] constexpr connection &conn() const noexcept { return m_conn; } + + /// Set session variable using SQL "SET" command. + /** @deprecated To set a transaction-local variable, execute an SQL `SET` + * command. To set a session variable, use the connection's + * @ref set_session_var function. + * + * @warning When setting a string value, you must make sure that the string + * is "safe." If you call @ref quote() on the string, it will return a + * safely escaped and quoted version for use as an SQL literal. + * + * @warning This function executes SQL. Do not try to set or get variables + * while a pipeline or table stream is active. + * + * @param var The variable to set. + * @param value The new value to store in the variable. This can be any SQL + * expression. + */ + [[deprecated( + "Set transaction-local variables using SQL SET statements.")]] void + set_variable(std::string_view var, std::string_view value); + + /// Read session variable using SQL "SHOW" command. + /** @warning This executes SQL. Do not try to set or get variables while a + * pipeline or table stream is active. + */ + [[deprecated("Read variables using SQL SHOW statements.")]] std::string + get_variable(std::string_view); + + // C++20: constexpr. + /// Transaction name, if you passed one to the constructor; or empty string. + [[nodiscard]] std::string_view name() const &noexcept { return m_name; } + +protected: + /// Create a transaction (to be called by implementation classes only). + /** The name, if nonempty, must begin with a letter and may contain letters + * and digits only. + */ + transaction_base( + connection &c, std::string_view tname, + std::shared_ptr rollback_cmd) : + m_conn{c}, m_name{tname}, m_rollback_cmd{rollback_cmd} + {} + + /// Create a transaction (to be called by implementation classes only). + /** Its rollback command will be "ROLLBACK". + * + * The name, if nonempty, must begin with a letter and may contain letters + * and digits only. + */ + transaction_base(connection &c, std::string_view tname); + + /// Create a transaction (to be called by implementation classes only). + explicit transaction_base(connection &c); + + /// Register this transaction with the connection. + void register_transaction(); + + /// End transaction. To be called by implementing class' destructor. + void close() noexcept; + + /// To be implemented by derived implementation class: commit transaction. + virtual void do_commit() = 0; + + /// Transaction type-specific way of aborting a transaction. + /** @warning This will become "final", since this function can be called + * from the implementing class destructor. + */ + virtual void do_abort(); + + /// Set the rollback command. + void set_rollback_cmd(std::shared_ptr cmd) + { + m_rollback_cmd = cmd; + } + + /// Execute query on connection directly. + result direct_exec(std::string_view, std::string_view desc = ""sv); + result + direct_exec(std::shared_ptr, std::string_view desc = ""sv); + +private: + enum class status + { + active, + aborted, + committed, + in_doubt + }; + + PQXX_PRIVATE void check_pending_error(); + + result + internal_exec_prepared(zview statement, internal::c_params const &args); + + result internal_exec_params(zview query, internal::c_params const &args); + + /// Throw unexpected_rows if prepared statement returned wrong no. of rows. + void check_rowcount_prepared( + zview statement, result::size_type expected_rows, + result::size_type actual_rows); + + /// Throw unexpected_rows if wrong row count from parameterised statement. + void + check_rowcount_params(std::size_t expected_rows, std::size_t actual_rows); + + /// Describe this transaction to humans, e.g. "transaction 'foo'". + [[nodiscard]] std::string description() const; + + friend class pqxx::internal::gate::transaction_transaction_focus; + PQXX_PRIVATE void register_focus(transaction_focus *); + PQXX_PRIVATE void unregister_focus(transaction_focus *) noexcept; + PQXX_PRIVATE void register_pending_error(zview) noexcept; + PQXX_PRIVATE void register_pending_error(std::string &&) noexcept; + + /// Like @ref stream(), but takes a tuple rather than a parameter pack. + template + auto stream_like(std::string_view query, std::tuple const *) + { + return stream(query); + } + + connection &m_conn; + + /// Current "focus": a pipeline, a nested transaction, a stream... + /** This pointer is used for only one purpose: sanity checks against mistakes + * such as opening one while another is still active. + */ + transaction_focus const *m_focus = nullptr; + + status m_status = status::active; + bool m_registered = false; + std::string m_name; + std::string m_pending_error; + + /// SQL command for aborting this type of transaction. + std::shared_ptr m_rollback_cmd; + + static constexpr std::string_view s_type_name{"transaction"sv}; +}; + + +// C++20: Can borrowed_range help? +/// Forbidden specialisation: underlying buffer immediately goes out of scope. +template<> +std::string_view transaction_base::query_value( + zview query, std::string_view desc) = delete; +/// Forbidden specialisation: underlying buffer immediately goes out of scope. +template<> +zview transaction_base::query_value( + zview query, std::string_view desc) = delete; + +} // namespace pqxx + + +namespace pqxx::internal +{ +/// The SQL command for starting a given type of transaction. +template +extern const zview begin_cmd; + +// These are not static members, so "constexpr" does not imply "inline". +template<> +inline constexpr zview begin_cmd{ + "BEGIN"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN READ ONLY"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL REPEATABLE READ"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL REPEATABLE READ READ ONLY"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL SERIALIZABLE"_zv}; +template<> +inline constexpr zview begin_cmd{ + "BEGIN ISOLATION LEVEL SERIALIZABLE READ ONLY"_zv}; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_focus b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_focus new file mode 100644 index 000000000..fe78a9bcc --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_focus @@ -0,0 +1,7 @@ +/** + * Transaction focus: types which monopolise a transaction's attention. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/types.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_focus.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_focus.hxx new file mode 100644 index 000000000..0707e3cc4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transaction_focus.hxx @@ -0,0 +1,89 @@ +/** Transaction focus: types which monopolise a transaction's attention. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTION_FOCUS +#define PQXX_H_TRANSACTION_FOCUS + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include "pqxx/util.hxx" + +namespace pqxx +{ +/// Base class for things that monopolise a transaction's attention. +/** You probably won't need to use this class. But it can be useful to _know_ + * that a given libpqxx class is derived from it. + * + * Pipelines, SQL statements, and data streams are examples of classes derived + * from `transaction_focus`. For any given transaction, only one object of + * such a class can be active at any given time. + */ +class PQXX_LIBEXPORT transaction_focus +{ +public: + transaction_focus( + transaction_base &t, std::string_view cname, std::string_view oname) : + m_trans{t}, m_classname{cname}, m_name{oname} + {} + + transaction_focus( + transaction_base &t, std::string_view cname, std::string &&oname) : + m_trans{t}, m_classname{cname}, m_name{std::move(oname)} + {} + + transaction_focus(transaction_base &t, std::string_view cname) : + m_trans{t}, m_classname{cname} + {} + + transaction_focus() = delete; + transaction_focus(transaction_focus const &) = delete; + transaction_focus &operator=(transaction_focus const &) = delete; + + /// Class name, for human consumption. + [[nodiscard]] constexpr std::string_view classname() const noexcept + { + return m_classname; + } + + /// Name for this object, if the caller passed one; empty string otherwise. + [[nodiscard]] std::string_view name() const &noexcept { return m_name; } + + [[nodiscard]] std::string description() const + { + return pqxx::internal::describe_object(m_classname, m_name); + } + + /// Can't move a transaction_focus. + /** Moving the transaction_focus would break the transaction's reference back + * to the object. + */ + transaction_focus(transaction_focus &&) = delete; + + /// Can't move a transaction_focus. + /** Moving the transaction_focus would break the transaction's reference back + * to the object. + */ + transaction_focus &operator=(transaction_focus &&) = delete; + +protected: + void register_me(); + void unregister_me() noexcept; + void reg_pending_error(std::string const &) noexcept; + bool registered() const noexcept { return m_registered; } + + transaction_base &m_trans; + +private: + bool m_registered = false; + std::string_view m_classname; + std::string m_name; +}; +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transactor b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transactor new file mode 100644 index 000000000..29d1b9640 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transactor @@ -0,0 +1,8 @@ +/** pqxx::transactor class. + * + * pqxx::transactor is a framework-style wrapper for safe transactions. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/transactor.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transactor.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transactor.hxx new file mode 100644 index 000000000..eefd04ba1 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/transactor.hxx @@ -0,0 +1,147 @@ +/* Transactor framework, a wrapper for safely retryable transactions. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/transactor instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TRANSACTOR +#define PQXX_H_TRANSACTOR + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include + +#include "pqxx/connection.hxx" +#include "pqxx/transaction.hxx" + +namespace pqxx +{ +/** + * @defgroup transactor Transactor framework + * + * Sometimes a transaction can fail for completely transient reasons, such as a + * conflict with another transaction in SERIALIZABLE isolation. The right way + * to handle those failures is often just to re-run the transaction from + * scratch. + * + * For example, your REST API might be handling each HTTP request in its own + * database transaction, and if this kind of transient failure happens, you + * simply want to "replay" the whole request, in a fresh transaction. + * + * You won't necessarily want to execute the exact same SQL commands with the + * exact same data. Some of your SQL statements may depend on state that can + * vary between retries. Data in the database may already have changed, for + * instance. So instead of dumbly replaying the SQL, you re-run the same + * application code that produced those SQL commands, from the start. + * + * The transactor framework makes it a little easier for you to do this safely, + * and avoid typical pitfalls. You encapsulate the work that you want to do + * into a callable that you pass to the @ref perform function. + * + * Here's how it works. You write your transaction code as a lambda or + * function, which creates its own transaction object, does its work, and + * commits at the end. You pass that callback to @ref pqxx::perform, which + * runs it for you. + * + * If there's a failure inside your callback, there will be an exception. Your + * transaction object goes out of scope and gets destroyed, so that it aborts + * implicitly. Seeing this, @ref perform tries running your callback again. It + * stops doing that when the callback succeeds, or when it has failed too many + * times, or when there's an error that leaves the database in an unknown + * state, such as a lost connection just while we're waiting for the database + * to confirm a commit. It all depends on the type of exception. + * + * The callback takes no arguments. If you're using lambdas, the easy way to + * pass arguments is for the lambda to "capture" them from your variables. Or, + * if you're using functions, you may want to use `std::bind`. + * + * Once your callback succeeds, it can return a result, and @ref perform will + * return that result back to you. + */ +//@{ + +/// Simple way to execute a transaction with automatic retry. +/** + * Executes your transaction code as a callback. Repeats it until it completes + * normally, or it throws an error other than the few libpqxx-generated + * exceptions that the framework understands, or after a given number of failed + * attempts, or if the transaction ends in an "in-doubt" state. + * + * (An in-doubt state is one where libpqxx cannot determine whether the server + * finally committed a transaction or not. This can happen if the network + * connection to the server is lost just while we're waiting for its reply to + * a "commit" statement. The server may have completed the commit, or not, but + * it can't tell you because there's no longer a connection. + * + * Using this still takes a bit of care. If your callback makes use of data + * from the database, you'll probably have to query that data within your + * callback. If the attempt to perform your callback fails, and the framework + * tries again, you'll be in a new transaction and the data in the database may + * have changed under your feet. + * + * Also be careful about changing variables or data structures from within + * your callback. The run may still fail, and perhaps get run again. The + * ideal way to do it (in most cases) is to return your result from your + * callback, and change your program's data state only after @ref perform + * completes successfully. + * + * @param callback Transaction code that can be called with no arguments. + * @param attempts Maximum number of times to attempt performing callback. + * Must be greater than zero. + * @return Whatever your callback returns. + */ +template +inline auto perform(TRANSACTION_CALLBACK &&callback, int attempts = 3) + -> std::invoke_result_t +{ + if (attempts <= 0) + throw std::invalid_argument{ + "Zero or negative number of attempts passed to pqxx::perform()."}; + + for (; attempts > 0; --attempts) + { + try + { + return std::invoke(callback); + } + catch (in_doubt_error const &) + { + // Not sure whether transaction went through or not. The last thing in + // the world that we should do now is try again! + throw; + } + catch (statement_completion_unknown const &) + { + // Not sure whether our last statement succeeded. Don't risk running it + // again. + throw; + } + catch (broken_connection const &) + { + // Connection failed. May be worth retrying, if the transactor opens its + // own connection. + if (attempts <= 1) + throw; + continue; + } + catch (transaction_rollback const &) + { + // Some error that may well be transient, such as serialization failure + // or deadlock. Worth retrying. + if (attempts <= 1) + throw; + continue; + } + } + throw pqxx::internal_error{"No outcome reached on perform()."}; +} +} // namespace pqxx +//@} +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/types b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/types new file mode 100644 index 000000000..23a5caae1 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/types @@ -0,0 +1,7 @@ +/** + * Basic typedefs and forward declarations. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/types.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/types.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/types.hxx new file mode 100644 index 000000000..f95b598f8 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/types.hxx @@ -0,0 +1,173 @@ +/* Basic type aliases and forward declarations. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_TYPES +#define PQXX_H_TYPES + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include + +#if defined(PQXX_HAVE_CONCEPTS) && __has_include() +# include +#endif + + +namespace pqxx +{ +/// Number of rows in a result set. +using result_size_type = int; + +/// Difference between result sizes. +using result_difference_type = int; + +/// Number of fields in a row of database data. +using row_size_type = int; + +/// Difference between row sizes. +using row_difference_type = int; + +/// Number of bytes in a field of database data. +using field_size_type = std::size_t; + +/// Number of bytes in a large object. +using large_object_size_type = int64_t; + + +// Forward declarations, to help break compilation dependencies. +// These won't necessarily include all classes in libpqxx. +class binarystring; +class connection; +class const_result_iterator; +class const_reverse_result_iterator; +class const_reverse_row_iterator; +class const_row_iterator; +class dbtransaction; +class field; +class largeobjectaccess; +class notification_receiver; +struct range_error; +class result; +class row; +class stream_from; +class transaction_base; + +/// Marker for @ref stream_from constructors: "stream from table." +/** @deprecated Use @ref stream_from::table() instead. + */ +struct from_table_t +{}; + +/// Marker for @ref stream_from constructors: "stream from query." +/** @deprecated Use @ref stream_from::query() instead. + */ +struct from_query_t +{}; + + +/// Format code: is data text or binary? +/** Binary-compatible with libpq's format codes. + */ +enum class format : int +{ + text = 0, + binary = 1, +}; + + +/// Remove any constness, volatile, and reference-ness from a type. +/** @deprecated In C++20 we'll replace this with std::remove_cvref. + */ +template +using strip_t = std::remove_cv_t>; + + +#if defined(PQXX_HAVE_CONCEPTS) +/// The type of a container's elements. +/** At the time of writing there's a similar thing in `std::experimental`, + * which we may or may not end up using for this. + */ +template +using value_type = strip_t()))>; +#else // PQXX_HAVE_CONCEPTS +/// The type of a container's elements. +/** At the time of writing there's a similar thing in `std::experimental`, + * which we may or may not end up using for this. + */ +template +using value_type = strip_t()))>; +#endif // PQXX_HAVE_CONCEPTS + + +#if defined(PQXX_HAVE_CONCEPTS) +/// Concept: Any type that we can read as a string of `char`. +template +concept char_string = std::ranges::contiguous_range and + std::same_as < strip_t>, +char > ; + +/// Concept: Anything we can iterate to get things we can read as strings. +template +concept char_strings = + std::ranges::range and char_string>>; + +/// Concept: Anything we might want to treat as binary data. +template +concept potential_binary = std::ranges::contiguous_range and + (sizeof(value_type) == 1); +#endif // PQXX_HAVE_CONCEPTS + + +// C++20: Retire these compatibility definitions. +#if defined(PQXX_HAVE_CONCEPTS) + +/// Template argument type for a range. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_RANGE_ARG std::ranges::range + +/// Template argument type for @ref char_string. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRING_ARG pqxx::char_string + +/// Template argument type for @ref char_strings +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRINGS_ARG pqxx::char_strings + +#else // PQXX_HAVE_CONCEPTS + +/// Template argument type for a range. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_RANGE_ARG typename + +/// Template argument type for @ref char_string. +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRING_ARG typename + +/// Template argument type for @ref char_strings +/** This is a concept, so only available in C++20 or better. In pre-C++20 + * environments it's just an alias for @ref typename. + */ +# define PQXX_CHAR_STRINGS_ARG typename + +#endif // PQXX_HAVE_CONCEPTS +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/util b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/util new file mode 100644 index 000000000..6d85ab611 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/util @@ -0,0 +1,6 @@ +/** Various utility definitions for libpqxx. + */ +// Actual definitions in .hxx file so editors and such recognize file type +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/util.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/util.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/util.hxx new file mode 100644 index 000000000..4aa5ecf57 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/util.hxx @@ -0,0 +1,521 @@ +/* Various utility definitions for libpqxx. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/util instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_UTIL +#define PQXX_H_UTIL + +#if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if __has_include() +# include +#endif + +#include "pqxx/except.hxx" +#include "pqxx/internal/encodings.hxx" +#include "pqxx/types.hxx" +#include "pqxx/version.hxx" + + +/// The home of all libpqxx classes, functions, templates, etc. +namespace pqxx +{} + +#include + + +/// Internal items for libpqxx' own use. Do not use these yourself. +namespace pqxx::internal +{ + +// C++20: Retire wrapper. +/// Same as `std::cmp_less`, or a workaround where that's not available. +template +inline constexpr bool cmp_less(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_less(lhs, rhs); +#else + // We need a variable just because lgtm.com gives off a false positive + // warning when we compare the values directly. It considers that a + // "self-comparison." + constexpr bool left_signed{std::is_signed_v}; + if constexpr (left_signed == std::is_signed_v) + return lhs < rhs; + else if constexpr (std::is_signed_v) + return (lhs <= 0) ? true : (std::make_unsigned_t(lhs) < rhs); + else + return (rhs <= 0) ? false : (lhs < std::make_unsigned_t(rhs)); +#endif +} + + +// C++20: Retire wrapper. +/// C++20 std::cmp_greater, or workaround if not available. +template +inline constexpr bool cmp_greater(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_greater(lhs, rhs); +#else + return cmp_less(rhs, lhs); +#endif +} + + +// C++20: Retire wrapper. +/// C++20 std::cmp_less_equal, or workaround if not available. +template +inline constexpr bool cmp_less_equal(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_less_equal(lhs, rhs); +#else + return not cmp_less(rhs, lhs); +#endif +} + + +// C++20: Retire wrapper. +/// C++20 std::cmp_greater_equal, or workaround if not available. +template +inline constexpr bool cmp_greater_equal(LEFT lhs, RIGHT rhs) noexcept +{ +#if defined(PQXX_HAVE_CMP) + return std::cmp_greater_equal(lhs, rhs); +#else + return not cmp_less(lhs, rhs); +#endif +} + + +/// Efficiently concatenate two strings. +/** This is a special case of concatenate(), needed because dependency + * management does not let us use that function here. + */ +[[nodiscard]] inline std::string cat2(std::string_view x, std::string_view y) +{ + std::string buf; + auto const xs{std::size(x)}, ys{std::size(y)}; + buf.resize(xs + ys); + x.copy(std::data(buf), xs); + y.copy(std::data(buf) + xs, ys); + return buf; +} +} // namespace pqxx::internal + + +namespace pqxx +{ +using namespace std::literals; + +/// Suppress compiler warning about an unused item. +template inline constexpr void ignore_unused(T &&...) noexcept +{} + + +/// Cast a numeric value to another type, or throw if it underflows/overflows. +/** Both types must be arithmetic types, and they must either be both integral + * or both floating-point types. + */ +template +inline TO check_cast(FROM value, std::string_view description) +{ + static_assert(std::is_arithmetic_v); + static_assert(std::is_arithmetic_v); + static_assert(std::is_integral_v == std::is_integral_v); + + // The rest of this code won't quite work for bool, but bool is trivially + // convertible to other arithmetic types as far as I can see. + if constexpr (std::is_same_v) + return static_cast(value); + + // Depending on our "if constexpr" conditions, this parameter may not be + // needed. Some compilers will warn. + ignore_unused(description); + + using from_limits = std::numeric_limits; + using to_limits = std::numeric_limits; + if constexpr (std::is_signed_v) + { + if constexpr (std::is_signed_v) + { + if (value < to_limits::lowest()) + throw range_error{internal::cat2("Cast underflow: "sv, description)}; + } + else + { + // FROM is signed, but TO is not. Treat this as a special case, because + // there may not be a good broader type in which the compiler can even + // perform our check. + if (value < 0) + throw range_error{internal::cat2( + "Casting negative value to unsigned type: "sv, description)}; + } + } + else + { + // No need to check: the value is unsigned so can't fall below the range + // of the TO type. + } + + if constexpr (std::is_integral_v) + { + using unsigned_from = std::make_unsigned_t; + using unsigned_to = std::make_unsigned_t; + constexpr auto from_max{static_cast((from_limits::max)())}; + constexpr auto to_max{static_cast((to_limits::max)())}; + if constexpr (from_max > to_max) + { + if (internal::cmp_greater(value, to_max)) + throw range_error{internal::cat2("Cast overflow: "sv, description)}; + } + } + else if constexpr ((from_limits::max)() > (to_limits::max)()) + { + if (value > (to_limits::max)()) + throw range_error{internal::cat2("Cast overflow: ", description)}; + } + + return static_cast(value); +} + + +/** Check library version at link time. + * + * Ensures a failure when linking an application against a radically + * different libpqxx version than the one against which it was compiled. + * + * Sometimes application builds fail in unclear ways because they compile + * using headers from libpqxx version X, but then link against libpqxx + * binary version Y. A typical scenario would be one where you're building + * against a libpqxx which you have built yourself, but a different version + * is installed on the system. + * + * The check_library_version template is declared for any library version, + * but only actually defined for the version of the libpqxx binary against + * which the code is linked. + * + * If the library binary is a different version than the one declared in + * these headers, then this call will fail to link: there will be no + * definition for the function with these exact template parameter values. + * There will be a definition, but the version in the parameter values will + * be different. + */ +inline PQXX_PRIVATE void check_version() noexcept +{ + // There is no particular reason to do this here in @ref connection, except + // to ensure that every meaningful libpqxx client will execute it. The call + // must be in the execution path somewhere or the compiler won't try to link + // it. We can't use it to initialise a global or class-static variable, + // because a smart compiler might resolve it at compile time. + // + // On the other hand, we don't want to make a useless function call too + // often for performance reasons. A local static variable is initialised + // only on the definition's first execution. Compilers will be well + // optimised for this behaviour, so there's a minimal one-time cost. + static auto const version_ok{internal::PQXX_VERSION_CHECK()}; + ignore_unused(version_ok); +} + + +/// Descriptor of library's thread-safety model. +/** This describes what the library knows about various risks to thread-safety. + */ +struct PQXX_LIBEXPORT thread_safety_model +{ + /// Is the underlying libpq build thread-safe? + bool safe_libpq = false; + + /// Is Kerberos thread-safe? + /** @warning Is currently always `false`. + * + * If your application uses Kerberos, all accesses to libpqxx or Kerberos + * must be serialized. Confine their use to a single thread, or protect it + * with a global lock. + */ + bool safe_kerberos = false; + + /// A human-readable description of any thread-safety issues. + std::string description; +}; + + +/// Describe thread safety available in this build. +[[nodiscard]] PQXX_LIBEXPORT thread_safety_model describe_thread_safety(); + + +#if defined(PQXX_HAVE_CONCEPTS) +# define PQXX_POTENTIAL_BINARY_ARG pqxx::potential_binary +#else +# define PQXX_POTENTIAL_BINARY_ARG typename +#endif + + +/// Cast binary data to a type that libpqxx will recognise as binary. +/** There are many different formats for storing binary data in memory. You + * may have yours as a `std::string`, or a `std::vector`, or one of + * many other types. + * + * But for libpqxx to recognise your data as binary, it needs to be a + * `std::basic_string`, or a `std::basic_string_view`; + * or in C++20 or better, any contiguous block of `std::byte`. + * + * Use `binary_cast` as a convenience helper to cast your data as a + * `std::basic_string_view`. + * + * @warning There are two things you should be aware of! First, the data must + * be contiguous in memory. In C++20 the compiler will enforce this, but in + * C++17 it's your own problem. Second, you must keep the object where you + * store the actual data alive for as long as you might use this function's + * return value. + */ +template +std::basic_string_view binary_cast(TYPE const &data) +{ + static_assert(sizeof(value_type) == 1); + return { + reinterpret_cast( + const_cast const *>( + std::data(data))), + std::size(data)}; +} + + +#if defined(PQXX_HAVE_CONCEPTS) +template +concept char_sized = (sizeof(CHAR) == 1); +# define PQXX_CHAR_SIZED_ARG char_sized +#else +# define PQXX_CHAR_SIZED_ARG typename +#endif + +/// Construct a type that libpqxx will recognise as binary. +/** Takes a data pointer and a size, without being too strict about their + * types, and constructs a `std::basic_string_view` pointing to + * the same data. + * + * This makes it a little easier to turn binary data, in whatever form you + * happen to have it, into binary data as libpqxx understands it. + */ +template +std::basic_string_view binary_cast(CHAR const *data, SIZE size) +{ + static_assert(sizeof(CHAR) == 1); + return { + reinterpret_cast(data), + check_cast(size, "binary data size")}; +} + + +/// The "null" oid. +constexpr oid oid_none{0}; +} // namespace pqxx + + +/// Private namespace for libpqxx's internal use; do not access. +/** This namespace hides definitions internal to libpqxx. These are not + * supposed to be used by client programs, and they may change at any time + * without notice. + * + * Conversely, if you find something in this namespace tremendously useful, by + * all means do lodge a request for its publication. + * + * @warning Here be dragons! + */ +namespace pqxx::internal +{ +using namespace std::literals; + + +/// A safer and more generic replacement for `std::isdigit`. +/** Turns out `std::isdigit` isn't as easy to use as it sounds. It takes an + * `int`, but requires it to be nonnegative. Which means it's an outright + * liability on systems where `char` is signed. + */ +template inline constexpr bool is_digit(CHAR c) noexcept +{ + return (c >= '0') and (c <= '9'); +} + + +/// Describe an object for humans, based on class name and optional name. +/** Interprets an empty name as "no name given." + */ +[[nodiscard]] std::string +describe_object(std::string_view class_name, std::string_view name); + + +/// Check validity of registering a new "guest" in a "host." +/** The host might be e.g. a connection, and the guest a transaction. The + * host can only have one guest at a time, so it is an error to register a new + * guest while the host already has a guest. + * + * If the new registration is an error, this function throws a descriptive + * exception. + * + * Pass the old guest (if any) and the new guest (if any), for both, a type + * name (at least if the guest is not null), and optionally an object name + * (but which may be omitted if the caller did not assign one). + */ +void check_unique_register( + void const *old_guest, std::string_view old_class, std::string_view old_name, + void const *new_guest, std::string_view new_class, + std::string_view new_name); + + +/// Like @ref check_unique_register, but for un-registering a guest. +/** Pass the guest which was registered, as well as the guest which is being + * unregistered, so that the function can check that they are the same one. + */ +void check_unique_unregister( + void const *old_guest, std::string_view old_class, std::string_view old_name, + void const *new_guest, std::string_view new_class, + std::string_view new_name); + + +/// Compute buffer size needed to escape binary data for use as a BYTEA. +/** This uses the hex-escaping format. The return value includes room for the + * "\x" prefix. + */ +inline constexpr std::size_t size_esc_bin(std::size_t binary_bytes) noexcept +{ + return 2 + (2 * binary_bytes) + 1; +} + + +/// Compute binary size from the size of its escaped version. +/** Do not include a terminating zero in `escaped_bytes`. + */ +inline constexpr std::size_t size_unesc_bin(std::size_t escaped_bytes) noexcept +{ + return (escaped_bytes - 2) / 2; +} + + +// TODO: Use actual binary type for "data". +/// Hex-escape binary data into a buffer. +/** The buffer must be able to accommodate + * `size_esc_bin(std::size(binary_data))` bytes, and the function will write + * exactly that number of bytes into the buffer. This includes a trailing + * zero. + */ +void PQXX_LIBEXPORT +esc_bin(std::basic_string_view binary_data, char buffer[]) noexcept; + + +/// Hex-escape binary data into a std::string. +std::string PQXX_LIBEXPORT +esc_bin(std::basic_string_view binary_data); + + +/// Reconstitute binary data from its escaped version. +void PQXX_LIBEXPORT +unesc_bin(std::string_view escaped_data, std::byte buffer[]); + + +/// Reconstitute binary data from its escaped version. +std::basic_string + PQXX_LIBEXPORT unesc_bin(std::string_view escaped_data); + + +/// Transitional: std::ssize(), or custom implementation if not available. +template auto ssize(T const &c) +{ +#if defined(__cpp_lib_ssize) && __cplusplus >= __cpp_lib_ssize + return std::ssize(c); +#else + using signed_t = std::make_signed_t; + return static_cast(std::size(c)); +#endif // __cpp_lib_ssize +} + + +/// Helper for determining a function's parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple args_f(RETURN (&func)(ARGS...)); + + +/// Helper for determining a `std::function`'s parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple args_f(std::function const &); + + +/// Helper for determining a member function's parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple member_args_f(RETURN (CLASS::*)(ARGS...)); + + +/// Helper for determining a const member function's parameter types. +/** This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +std::tuple member_args_f(RETURN (CLASS::*)(ARGS...) const); + + +/// Helper for determining a callable type's parameter types. +/** This specialisation should work for lambdas. + * + * This function has no definition. It's not meant to be actually called. + * It's just there for pattern-matching in the compiler, so we can use its + * hypothetical return value. + */ +template +auto args_f(CALLABLE const &f) + -> decltype(member_args_f(&CALLABLE::operator())); + + +/// A callable's parameter types, as a tuple. +template +using args_t = decltype(args_f(std::declval())); + + +/// Helper: Apply `strip_t` to each of a tuple type's component types. +/** This function has no definition. It is not meant to be called, only to be + * used to deduce the right types. + */ +template +std::tuple...> strip_types(std::tuple const &); + + +/// Take a tuple type and apply @ref strip_t to its component types. +template +using strip_types_t = decltype(strip_types(std::declval())); +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/version b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/version new file mode 100644 index 000000000..8dd5e48d4 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/version @@ -0,0 +1,7 @@ +/** libpqxx version info. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/version.hxx" +#include "pqxx/internal/header-post.hxx" + diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/version.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/version.hxx new file mode 100644 index 000000000..a159f1bed --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/version.hxx @@ -0,0 +1,55 @@ +/* Version info for libpqxx. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/version instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_VERSION + +# if !defined(PQXX_HEADER_PRE) +# error "Include libpqxx headers as , not ." +# endif + +/// Full libpqxx version string. +# define PQXX_VERSION "7.7.3" +/// Library ABI version. +# define PQXX_ABI "7.7" + +/// Major version number. +# define PQXX_VERSION_MAJOR 7 +/// Minor version number. +# define PQXX_VERSION_MINOR 7 + +# define PQXX_VERSION_CHECK check_pqxx_version_7_7 + +namespace pqxx::internal +{ +/// Library version check stub. +/** Helps detect version mismatches between libpqxx headers and the libpqxx + * library binary. + * + * Sometimes users run into trouble linking their code against libpqxx because + * they build their own libpqxx, but the system also has a different version + * installed. The declarations in the headers against which they compile their + * code will differ from the ones used to build the libpqxx version they're + * using, leading to confusing link errors. The solution is to generate a link + * error when the libpqxx binary is not the same version as the libpqxx headers + * used to compile the code. + * + * This function's definition is in the libpqxx binary, so it's based on the + * version as found in the binary. The headers contain a call to the function, + * whose name contains the libpqxx version as found in the headers. (The + * library build process will use its own local headers even if another version + * of the headers is installed on the system.) + * + * If the libpqxx binary was compiled for a different version than the user's + * code, linking will fail with an error: `check_pqxx_version_*_*` will not + * exist for the given version number. + */ +PQXX_LIBEXPORT int PQXX_VERSION_CHECK() noexcept; +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/zview b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/zview new file mode 100644 index 000000000..66ea2a625 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/zview @@ -0,0 +1,6 @@ +/** Zero-terminated string view class. + */ +// Actual definitions in .hxx file so editors and such recognize file type. +#include "pqxx/internal/header-pre.hxx" +#include "pqxx/zview.hxx" +#include "pqxx/internal/header-post.hxx" diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/zview.hxx b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/zview.hxx new file mode 100644 index 000000000..36a779f51 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/include/pqxx/zview.hxx @@ -0,0 +1,163 @@ +/* Zero-terminated string view. + * + * DO NOT INCLUDE THIS FILE DIRECTLY; include pqxx/zview instead. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#ifndef PQXX_H_ZVIEW +#define PQXX_H_ZVIEW + +#include +#include +#include + +#include "pqxx/types.hxx" + + +namespace pqxx +{ +/// Marker-type wrapper: zero-terminated `std::string_view`. +/** @warning Use this only if the underlying string is zero-terminated. + * + * When you construct a zview, you are promising that if the data pointer is + * non-null, the underlying string is zero-terminated. It otherwise behaves + * exactly like a std::string_view. + * + * The terminating zero is not "in" the string, so it does not count as part of + * the view's length. + * + * The added guarantee lets the view be used as a C-style string, which often + * matters since libpqxx builds on top of a C library. For this reason, zview + * also adds a @ref c_str method. + */ +class zview : public std::string_view +{ +public: + constexpr zview() noexcept = default; + + /// Convenience overload: construct using pointer and signed length. + constexpr zview(char const text[], std::ptrdiff_t len) : + std::string_view{text, static_cast(len)} + {} + + /// Convenience overload: construct using pointer and signed length. + constexpr zview(char text[], std::ptrdiff_t len) : + std::string_view{text, static_cast(len)} + {} + + /// Explicitly promote a `string_view` to a `zview`. + explicit constexpr zview(std::string_view other) noexcept : + std::string_view{other} + {} + + /// Construct from any initialiser you might use for `std::string_view`. + /** @warning Only do this if you are sure that the string is zero-terminated. + */ + template + explicit constexpr zview(Args &&...args) : + std::string_view(std::forward(args)...) + {} + + // C++20: constexpr. + /// @warning There's an implicit conversion from `std::string`. + zview(std::string const &str) noexcept : + std::string_view{str.c_str(), str.size()} + {} + + /// Construct a `zview` from a C-style string. + /** @warning This scans the string to discover its length. So if you need to + * do it many times, it's probably better to create the `zview` once and + * re-use it. + */ + constexpr zview(char const str[]) : std::string_view{str} {} + + /// Construct a `zview` from a string literal. + /** A C++ string literal ("foo") normally looks a lot like a pointer to + * char const, but that's not really true. It's actually an array of char, + * which _devolves_ to a pointer when you pass it. + * + * For the purpose of creating a `zview` there is one big difference: if we + * know the array's size, we don't need to scan through the string in order + * to find out its length. + */ + template + constexpr zview(char const (&literal)[size]) : zview(literal, size - 1) + {} + + /// Either a null pointer, or a zero-terminated text buffer. + [[nodiscard]] constexpr char const *c_str() const &noexcept + { + return data(); + } +}; + + +/// Support @ref zview literals. +/** You can "import" this selectively into your namespace, without pulling in + * all of the @ref pqxx namespace: + * + * ```cxx + * using pqxx::operator"" _zv; + * ``` + */ +constexpr zview operator"" _zv(char const str[], std::size_t len) noexcept +{ + return zview{str, len}; +} +} // namespace pqxx + + +#if defined(PQXX_HAVE_CONCEPTS) +/// A zview is a view. +template<> inline constexpr bool std::ranges::enable_view{true}; + + +/// A zview is a borrowed range. +template<> +inline constexpr bool std::ranges::enable_borrowed_range{true}; + +namespace pqxx::internal +{ +/// Concept: T is a known zero-terminated string type. +/** There's no unified API for these string types. It's just a check for some + * known types. Any code that makes use of the concept will still have to + * support each of these individually. + */ +template +concept ZString = std::is_convertible_v < strip_t, +char const * > or std::is_convertible_v, zview> or + std::is_convertible_v; +} // namespace pqxx::internal +#endif // PQXX_HAVE_CONCEPTS + + +namespace pqxx::internal +{ +/// Get a raw C string pointer. +inline constexpr char const *as_c_string(char const str[]) noexcept +{ + return str; +} +/// Get a raw C string pointer. +template +inline constexpr char const *as_c_string(char (&str)[N]) noexcept +{ + return str; +} +/// Get a raw C string pointer. +inline constexpr char const *as_c_string(pqxx::zview str) noexcept +{ + return str.c_str(); +} +// C++20: Make this constexpr. +/// Get a raw C string pointer. +inline char const *as_c_string(std::string const &str) noexcept +{ + return str.c_str(); +} +} // namespace pqxx::internal +#endif diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-config-version.cmake b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-config-version.cmake new file mode 100644 index 000000000..c47d6956d --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-config-version.cmake @@ -0,0 +1,70 @@ +# This is a basic version file for the Config-mode of find_package(). +# It is used by write_basic_package_version_file() as input file for configure_file() +# to create a version-file which can be installed along a config.cmake file. +# +# The created file sets PACKAGE_VERSION_EXACT if the current version string and +# the requested version string are exactly the same and it sets +# PACKAGE_VERSION_COMPATIBLE if the current version is >= requested version, +# but only if the requested major version is the same as the current one. +# The variable CVF_VERSION must be set before calling configure_file(). + + +set(PACKAGE_VERSION "7.7.3") + +if(PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION) + set(PACKAGE_VERSION_COMPATIBLE FALSE) +else() + + if("7.7.3" MATCHES "^([0-9]+)\\.") + set(CVF_VERSION_MAJOR "${CMAKE_MATCH_1}") + if(NOT CVF_VERSION_MAJOR VERSION_EQUAL 0) + string(REGEX REPLACE "^0+" "" CVF_VERSION_MAJOR "${CVF_VERSION_MAJOR}") + endif() + else() + set(CVF_VERSION_MAJOR "7.7.3") + endif() + + if(PACKAGE_FIND_VERSION_RANGE) + # both endpoints of the range must have the expected major version + math (EXPR CVF_VERSION_MAJOR_NEXT "${CVF_VERSION_MAJOR} + 1") + if (NOT PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR + OR ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX_MAJOR STREQUAL CVF_VERSION_MAJOR) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND NOT PACKAGE_FIND_VERSION_MAX VERSION_LESS_EQUAL CVF_VERSION_MAJOR_NEXT))) + set(PACKAGE_VERSION_COMPATIBLE FALSE) + elseif(PACKAGE_FIND_VERSION_MIN_MAJOR STREQUAL CVF_VERSION_MAJOR + AND ((PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "INCLUDE" AND PACKAGE_VERSION VERSION_LESS_EQUAL PACKAGE_FIND_VERSION_MAX) + OR (PACKAGE_FIND_VERSION_RANGE_MAX STREQUAL "EXCLUDE" AND PACKAGE_VERSION VERSION_LESS PACKAGE_FIND_VERSION_MAX))) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_COMPATIBLE FALSE) + endif() + else() + if(PACKAGE_FIND_VERSION_MAJOR STREQUAL CVF_VERSION_MAJOR) + set(PACKAGE_VERSION_COMPATIBLE TRUE) + else() + set(PACKAGE_VERSION_COMPATIBLE FALSE) + endif() + + if(PACKAGE_FIND_VERSION STREQUAL PACKAGE_VERSION) + set(PACKAGE_VERSION_EXACT TRUE) + endif() + endif() +endif() + + +# if the installed project requested no architecture check, don't perform the check +if("FALSE") + return() +endif() + +# if the installed or the using project don't have CMAKE_SIZEOF_VOID_P set, ignore it: +if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "8" STREQUAL "") + return() +endif() + +# check that the installed version has the same 32/64bit-ness as the one which is currently searching: +if(NOT CMAKE_SIZEOF_VOID_P STREQUAL "8") + math(EXPR installedBits "8 * 8") + set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)") + set(PACKAGE_VERSION_UNSUITABLE TRUE) +endif() diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-config.cmake b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-config.cmake new file mode 100644 index 000000000..cb25a05f2 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-config.cmake @@ -0,0 +1,4 @@ +include(CMakeFindDependencyMacro) +find_dependency(PostgreSQL) + +include("${CMAKE_CURRENT_LIST_DIR}/libpqxx-targets.cmake") diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-targets-noconfig.cmake b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-targets-noconfig.cmake new file mode 100644 index 000000000..980f46098 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-targets-noconfig.cmake @@ -0,0 +1,19 @@ +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "libpqxx::pqxx" for configuration "" +set_property(TARGET libpqxx::pqxx APPEND PROPERTY IMPORTED_CONFIGURATIONS NOCONFIG) +set_target_properties(libpqxx::pqxx PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_NOCONFIG "CXX" + IMPORTED_LOCATION_NOCONFIG "${_IMPORT_PREFIX}/lib/libpqxx-7.7.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS libpqxx::pqxx ) +list(APPEND _IMPORT_CHECK_FILES_FOR_libpqxx::pqxx "${_IMPORT_PREFIX}/lib/libpqxx-7.7.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-targets.cmake b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-targets.cmake new file mode 100644 index 000000000..c7b525b18 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/cmake/libpqxx/libpqxx-targets.cmake @@ -0,0 +1,99 @@ +# Generated by CMake + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.6) + message(FATAL_ERROR "CMake >= 2.6.0 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.6...3.20) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_targetsDefined) +set(_targetsNotDefined) +set(_expectedTargets) +foreach(_expectedTarget libpqxx::pqxx) + list(APPEND _expectedTargets ${_expectedTarget}) + if(NOT TARGET ${_expectedTarget}) + list(APPEND _targetsNotDefined ${_expectedTarget}) + endif() + if(TARGET ${_expectedTarget}) + list(APPEND _targetsDefined ${_expectedTarget}) + endif() +endforeach() +if("${_targetsDefined}" STREQUAL "${_expectedTargets}") + unset(_targetsDefined) + unset(_targetsNotDefined) + unset(_expectedTargets) + set(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT "${_targetsDefined}" STREQUAL "") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_targetsDefined}\nTargets not yet defined: ${_targetsNotDefined}\n") +endif() +unset(_targetsDefined) +unset(_targetsNotDefined) +unset(_expectedTargets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +if(_IMPORT_PREFIX STREQUAL "/") + set(_IMPORT_PREFIX "") +endif() + +# Create imported target libpqxx::pqxx +add_library(libpqxx::pqxx STATIC IMPORTED) + +set_target_properties(libpqxx::pqxx PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + INTERFACE_LINK_LIBRARIES "/usr/lib/aarch64-linux-gnu/libpq.so" +) + +if(CMAKE_VERSION VERSION_LESS 2.8.12) + message(FATAL_ERROR "This file relies on consumers using CMake 2.8.12 or greater.") +endif() + +# Load information for each installed configuration. +get_filename_component(_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) +file(GLOB CONFIG_FILES "${_DIR}/libpqxx-targets-*.cmake") +foreach(f ${CONFIG_FILES}) + include(${f}) +endforeach() + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(target ${_IMPORT_CHECK_TARGETS} ) + foreach(file ${_IMPORT_CHECK_FILES_FOR_${target}} ) + if(NOT EXISTS "${file}" ) + message(FATAL_ERROR "The imported target \"${target}\" references the file + \"${file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + unset(_IMPORT_CHECK_FILES_FOR_${target}) +endforeach() +unset(_IMPORT_CHECK_TARGETS) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/libpqxx-7.7.a b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/libpqxx-7.7.a new file mode 100644 index 000000000..2cb705a34 Binary files /dev/null and b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/libpqxx-7.7.a differ diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/libpqxx.a b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/libpqxx.a new file mode 120000 index 000000000..d9fcdab85 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/lib/libpqxx.a @@ -0,0 +1 @@ +libpqxx-7.7.a \ No newline at end of file diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/accessing-results.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/accessing-results.md new file mode 100644 index 000000000..920fb6f3b --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/accessing-results.md @@ -0,0 +1,157 @@ +Accessing results and result rows {#accessing-results} +--------------------------------- + +When you execute a query using one of the transaction `exec` functions, you +normally get a `result` object back. A `result` is a container of `row`s. + +(There are exceptions. The `exec1` functions expect exactly one row of data, +so they return just a `row`, not a full `result`.) + +Result objects are an all-or-nothing affair. The `exec` function waits until +it's received all the result data, and then gives it to you in the form of the +`result`. _(There is a faster, easier way of executing simple queries, so see +"streaming rows" below as well.)_ + +For example, your code might do: + +```cxx + pqxx::result r = tx.exec("SELECT * FROM mytable"); +``` + +Now, how do you access the data inside `r`? + +Result sets act as standard C++ containers of rows. Rows act as standard +C++ containers of fields. So the easiest way to go through them is: + +```cxx + for (auto const &row: r) + { + for (auto const &field: row) std::cout << field.c_str() << '\t'; + std::cout << '\n'; + } +``` + +But results and rows also support other kinds of access. Array-style +indexing, for instance, such as `r[rownum]`: + +```cxx + std::size_t const num_rows = std::size(r); + for (std::size_t rownum=0u; rownum < num_rows; ++rownum) + { + pqxx::row const row = r[rownum]; + std::size_t const num_cols = std::size(row); + for (std::size_t colnum=0u; colnum < num_cols; ++colnum) + { + pqxx::field const field = row[colnum]; + std::cout << field.c_str() << '\t'; + } + + std::cout << '\n'; + } +``` + +Every row in the result has the same number of columns, so you don't need to +look up the number of fields again for each one: + +```cxx + std::size_t const num_rows = std::size(r); + std::size_t const num_cols = r.columns(); + for (std::size_t rownum=0u; rownum < num_rows; ++rownum) + { + pqxx::row const row = r[rownum]; + for (std::size_t colnum=0u; colnum < num_cols; ++colnum) + { + pqxx::field const field = row[colnum]; + std::cout << field.c_str() << '\t'; + } + + std::cout << '\n'; + } +``` + +You can even address a field by indexing the `row` using the field's _name:_ + +```cxx + std::cout << row["salary"] << '\n'; +``` + +But try not to do that if speed matters, because looking up the column by name +takes time. At least you'd want to look up the column index before your loop +and then use numerical indexes inside the loop. + +For C++23 or better, there's also a two-dimensional array access operator: + +```cxx + for (std::size_t rownum=0u; rownum < num_rows; ++rownum) + { + for (std::size_t colnum=0u; colnum < num_cols; ++colnum) + std::cout result[rownum, colnum].c_str() << '\t'; + std::cout << '\n'; + } +``` + +And of course you can use classic "begin/end" loops: + +```cxx + for (auto row = std::begin(r); row != std::end(r); row++) + { + for (auto field = std::begin(row); field != std::end(row); field++) + std::cout << field->c_str() << '\t'; + std::cout << '\n'; + } +``` + +Result sets are immutable, so all iterators on results and rows are actually +`const_iterator`s. There are also `const_reverse_iterator` types, which +iterate backwards from `rbegin()` to `rend()` exclusive. + +All these iterator types provide one extra bit of convenience that you won't +normally find in C++ iterators: referential transparency. You don't need to +dereference them to get to the row or field they refer to. That is, instead +of `row->end()` you can also choose to say `row.end()`. Similarly, you +may prefer `field.c_str()` over `field->c_str()`. + +This becomes really helpful with the array-indexing operator. With regular +C++ iterators you would need ugly expressions like `(*row)[0]` or +`row->operator[](0)`. With the iterator types defined by the result and +row classes you can simply say `row[0]`. + + +Streaming rows +-------------- + +There's another way to go through the rows coming out of a query. It's +usually easier and faster, but there are drawbacks. + +**One,** you start getting rows before all the data has come in from the +database. That speeds things up, but what happens if you lose your network +connection while transferring the data? Your application may already have +processed some of the data before finding out that the rest isn't coming. If +that is a problem for your application, streaming may not be the right choice. + +**Two,** streaming only works for some types of query. The `stream()` function +wraps your query in a PostgreSQL `COPY` command, and `COPY` only supports a few +commands: `SELECT`, `VALUES`, `or an `INSERT`, `UPDATE`, or `DELETE` with a +`RETURNING` clause. See the `COPY` documentation here: +https://www.postgresql.org/docs/current/sql-copy.html + +**Three,** when you convert a field to a "view" type (such as +`std::string_view` or `std::basic_string_view`), the view points to +underlying data which only stays valid until you iterate to the next row or +exit the loop. So if you want to use that data for longer than a single +iteration of the streaming loop, you'll have to store it somewhere yourself. + +Now for the good news. Streaming does make it very easy to query data and loop +over it: + +```cxx + for (auto [id, name, x, y] : + tx.stream( + "SELECT id, name, x, y FROM point")) + process(id + 1, "point-" + name, x * 10.0, y * 10.0); +``` + +The conversion to C++ types (here `int`, `std::string_view`, and two `float`s) +is built into the function. You never even see `row` objects, `field` objects, +iterators, or conversion methods. You just put in your query and you receive +your data. diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/binary-data.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/binary-data.md new file mode 100644 index 000000000..20da8dc0c --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/binary-data.md @@ -0,0 +1,56 @@ +Binary data {#binary} +=========== + +The database has two ways of storing binary data: `BYTEA` is like a string, but +containing bytes rather than text characters. And _large objects_ are more +like a separate table containing binary objects. + +Generally you'll want to use `BYTEA` for reasonably-sized values, and large +objects for very large values. + +That's the database side. On the C++ side, in libpqxx, all binary data must be +either `std::basic_string` or `std::basic_string_view`; +or if you're building in C++20 or better, anything that's a block of +contiguous `std::byte` in memory. + +So for example, if you want to write a large object, you'd create a +`pqxx::blob` object. And you might use that to write data in the form of +`std::basic_string_view`. + +Your particular binary data may look different though. You may have it in a +`std::string`, or a `std::vector`, or a pointer to `char` +accompanied by a size (which could be signed or unsigned, and of any of a few +different widths). Sometimes that's your choice, or sometimes some other +library will dictate what form it takes. + +So long as it's _basically_ still a block of bytes though, you can use +`pqxx::binary_cast` to construct a `std::basic_string_view` from it. + +There are two forms of `binary_cast`. One takes a single argument that must +support `std::data()` and `std::size()`: + + std::string hi{"Hello binary world"}; + my_blob.write(pqxx::binary_cast(hi); + +The other takes a pointer and a size: + + char const greeting[] = "Hello binary world"; + char const *hi = greeting; + my_blob.write(pqxx::binary_cast(hi, sizeof(greeting))); + + +Caveats +------- + +There are some restrictions on `binary_cast` that you must be aware of. + +First, your data must of a type that gives us _bytes._ So: `char`, +`unsigned char`, `signed char`, `int8_t`, `uint8_t`, or of course `std::byte`. +You can't feed in a vector of `double`, or anything like that. + +Second, the data must be laid out as a contiguous block in memory. If there's +no `std::data()` implementation for your type, it's not suitable. + +Third, `binary_cast` only constructs something like a `std::string_view`. It +does not make a copy of your actual data. So, make sure that your data remains +alive and in the same place while you're using it. diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/datatypes.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/datatypes.md new file mode 100644 index 000000000..bc14c8b90 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/datatypes.md @@ -0,0 +1,373 @@ +Supporting additional data types {#datatypes} +================================ + +Communication with the database mostly happens in a text format. When you +include an integer value in a query, you use `to_string` to convert it to that +text format. When you get a query result field "as a float," it converts from +the text format to a floating-point type. These conversions are everywhere in +libpqxx. + +The conversion sydstem supports many built-in types, but it is also extensible. +You can "teach" libpqxx (in the scope of your own application) to convert +additional types of values to and from PostgreSQL's string format. + +This is massively useful, but it's not for the faint of heart. You'll need to +specialise some templates. And, **the API for doing this can change with any +major libpqxx release.** + + +Converting types +---------------- + +In your application, a conversion is driven entirely by a C++ type you specify. +The value's SQL type has nothing to do with it, nor is there anything in the +string that would identify its type. + +So, if you've SELECTed a 64-bit integer from the database, and you try to +convert it to a C++ "short," one of two things will happen: either the number +is small enough to fit in your `short` (and it just works), or else it throws a +conversion exception. + +Or, your database table might have a text column, but a given field may contain +a string that _looks_ just like a number. You can convert that value to an +integer type just fine. Or to a floating-point type. All that matters to the +conversion is the actual value, and the type. + +In some cases the templates for these conversions can tell the type from the +arguments you pass them: + + auto x = to_string(99); + +In other cases you may need to instantiate template explicitly: + + auto y = from_string("99"); + + +Supporting a new type +--------------------- + +Let's say you have some other SQL type which you want to be able to store in, +or retrieve from, the database. What would it take to support that? + +Sometimes you do not need _complete_ support. You might need a conversion _to_ +a string but not _from_ a string, for example. The conversion is defined at +compile time, so don't be too afraid to be incomplete. If you leave out one of +these steps, it's not going to crash at run time or mess up your data. The +worst that can happen is that your code won't build. + +So what do you need for a complete conversion? + +First off, of course, you need a C++ type. It may be your own, but it +doesn't have to be. It could be a type from a third-party library, or even one +from the standard library that libpqxx does not yet support. + +You also specialise the `pqxx::type_name` variable to specify the type's name. +This is important for all code which mentions your type in human-readable text, +such as error messages. + +Then, does your type have a built-in null value? You specialise the +`pqxx::nullness` template to specify the details. + +Finally, you specialise the `pqxx::string_traits` template. This is where you +define the actual conversions. + +Let's go through these steps one by one. + + +Your type +--------- + +You'll need a type for which the conversions are not yet defined, because the +C++ type is what determines the right conversion. One type, one set of +conversions. + +The type doesn't have to be one that you create. The conversion logic was +designed such that you can build it around any type. So you can just as +easily build a conversion for a type that's defined somewhere else. There's +no need to include any special methods or other members inside it. That's also +how libpqxx can support converting built-in types like `int`. + +By the way, if the type is an enum, you don't need to do any of this. Just +invoke the preprocessor macro `PQXX_DECLARE_ENUM_CONVERSION`, from the global +namespace near the top of your translation unit, and pass the type as an +argument. + +The library also provides specialisations for `std::optional`, +`std::shared_ptr`, and `std::unique_ptr`. If you have conversions for +`T`, you'll also have conversions for those. + + +Specialise `type_name` +---------------------- + +When errors happen during conversion, libpqxx will compose error messages for +the user. Sometimes these will include the name of the type that's being +converted. + +To tell libpqxx the name of each type, there's a template variable called +`pqxx::type_name`. For any given type `T`, it should have a specialisation +that provides that `T`'s human-readable name: + + namespace pqxx + { + template<> std::string const type_name{"T"}; + } + +(Yes, this means that you need to define something inside the pqxx namespace. +Future versions of libpqxx may move this into a separate namespace.) + +Define this early on in your translation unit, before any code that might cause +libpqxx to need the name. That way, the libpqxx code which needs to know the +type's name can see your definition. + + +Specialise `nullness` +--------------------- + +A struct template `pqxx::nullness` defines whether your type has a natural +"null value" built in. If so, it also provides member functions for producing +and recognising null values. + +The simplest scenario is also the most common: most types don't have a null +value built in. In that case, derive your nullness traits from +`pqxx::no_null`: + + namespace pqxx + { + template<> struct nullness : pqxx::no_null {}; + } + +(Here again you're defining this in the pqxx namespace.) + +If your type does have a natural null value, the definition gets a little more +complex: + + namespace pqxx + { + template<> struct nullness + { + static constexpr bool has_null{true}; + static constexpr bool always_null{false}; + + static bool is_null(T const &value) + { + // Return whether "value" is null. + return ...; + } + + [[nodiscard]] static T null() + { + // Return a null value. + return ...; + } + }; + } + +You may be wondering why there's a function to produce a null value, but also a +function to check whether a value is null. Why not just compare the value to +the result of `null()`? Because two null values may not be equal. `T` may +have several different null values. Or it may override the comparison +operator, similar to SQL where NULL is not equal to NULL. + +As a third case, your type may be one that _always_ represents a null value. +This is the case for `std::nullptr_t` and `std::nullopt_t`. In that case, you +set `nullness::always_null` to `true` (as well as `has_null` of course), +and you won't need to define any actual conversions. + + +Specialise `string_traits` +------------------------- + +This part is more work. (You can skip it for types that are _always_ null, +but those will be rare.) Specialise the `pqxx::string_traits` template: + + namespace pqxx + { + template<> struct string_traits + { + static T from_string(std::string_view text); + static zview to_buf(char *begin, char *end, T const &value); + static char *into_buf(char *begin, char *end, T const &value); + static std::size_t size_buffer(T const &value) noexcept; + }; + } + +You'll also need to write those member functions, or as many of them as needed +to get your code to build. + + +### `from_string` + +We start off simple: `from_string` parses a string as a value of `T`, and +returns that value. + +The string may not be zero-terminated; it's just the `string_view` from +beginning to end (exclusive). In your tests, cover cases where the string +does not end in a zero byte. + +It's perfectly possible that the string isn't actually a `T` value. Mistakes +happen. In that case, throw a `pqxx::conversion_error`. + +(Of course it's also possible that you run into some other error, so it's fine +to throw different exceptions as well. But when it's definitely "this is not +the right format for a `T`," throw `conversion_error`.) + + +### `to_buf` + +In this function, you convert a value of `T` into a string that the postgres +server will understand. + +The caller will provide you with a buffer where you can write the string, if +you need it: from `begin` to `end` exclusive. It's a half-open interval, so +don't access `*end`. + +If the buffer is insufficient for you to do the conversion, throw a +`pqxx::conversion_overrun`. It doesn't have to be exact: you can be a little +pessimistic and demand a bit more space than you need. Just be sure to throw +the exception if there's any risk of overrunning the buffer. + +You don't _have_ to use the buffer for this function though. For example, +`pqxx::string_traits::to_buf` returns a compile-time constant string and +ignores the buffer. + +Even if you do use the buffer, your string does not _have_ to start at the +beginning of the buffer. For example, the integer conversions start by writing +the _least_ significant digit to the _end_ of the buffer, and then writes the +more significant digits before it. It was just more convenient. + +Return a `pqxx::zview`. This is basically a `std::string_view`, but with one +difference: a `zview` guarantees that there will be a valid zero byte right +after the `string_view`. The zero byte is not counted as part of its size, but +it will be there. + +Expressed in code, this rule must hold: + + void invariant(zview z) + { + assert(z[std::size(z)] == 0); + } + +Make sure you write your trailing zero _before_ the `end`. If the trailing +zero doesn't fit in the buffer, then there's just not enough room to perform +the conversion. + +Beware of locales when converting. If you use standard library features like +`sprintf`, they may obey whatever locale is currently set on the system. That +means that a simple integer like 1000000 may come out as "1000000" on your +system, but as "1,000,000" on mine, or as "1.000.000" for somebody else, and on +an Indian system it may be "1,00,000". Values coming from or going to the +database should be in non-localised formats. You can use libpqxx functions for +those conversions: `pqxx::from_string`, `pqxx::to_string`, `pqxx::to_buf`. + + +### `into_buf` + +This is a stricter version of `to_buf`. All the same requirements apply, but +in addition you must write your string into the buffer provided, starting +_exactly_ at `begin`. + +That's why this function returns just a simple pointer: the address right +behind the trailing zero. If the caller wants to use the string, they can +find it at `begin`. If they want to write a different value into the rest of +the buffer, they can start at the location you returned. + + +### `size_buffer` + +Here you estimate how much buffer space you need for converting a `T` to a +string. Be precise if you can, but pessimistic if you must. It's usually +better to waste a few unnecessary bytes than to spend a lot of time computing +the exact buffer space you need. And failing the conversion because you +under-budgeted the buffer is worst of all. + +Include the trailing zero in the buffer size. If your `to_buf` takes more +space than just what's needed to store the result, include that too. + +Make `size_buffer` a `constexpr` function if you can. It can allow the caller +to allocate the buffer on the stack, with a size known at compile time. + + +Optional: Specialise `is_unquoted_safe` +--------------------------------------- + +When converting arrays or composite values to strings, libpqxx may need to +quote values and escape any special characters. This takes time. + +Some types though, such as integral or floating-point types, can never have +any special characters such as quotes, commas, or backslashes in their string +representations. In such cases, there's no need to quote or escape such values +in arrays or composite types. + +If your type is like that, you can tell libpqxx about this by defining: + + namespace pqxx + { + template<> inline constexpr bool is_unquoted_safe{true}; + } + +The code that converts this type of field to strings in an array or a composite +type can then use a simpler, more efficient variant of the code. It's always +safe to leave this out; it's _just_ an optimisation for when you're completely +sure that it's safe. + +Do not do this if a string representation of your type may contain a comma; +semicolon; parenthesis; brace; quote; backslash; newline; or any other +character that might need escaping. + + +Optional: Specialise `param_format` +----------------------------------- + +This one you don't generally need to worry about. Read on if you're writing a +type which represents raw binary data, or if you're writing a template where +_some specialisations_ may contain raw binary data. + +When you call parameterised statements, or prepared statements with parameters, +libpqxx needs to your parameters on to libpq, the underlying C-level PostgreSQL +client library. + +There are two formats for doing that: _text_ and _binary._ In the first, we +represent all values as strings, and the server then converts them into its own +internal binary representation. That's what the string conversions are all +about, and it's what we do for almost all types of parameters. + +But we do it differently when the parameter is a contiguous series of raw bytes +and the corresponding SQL type is `BYTEA`. There is a text format for those, +but we bypass it for efficiency. The server can use the binary data in the +exact same form, without any conversion or extra processing. The binary data +is also twice as compact during transport. + +(People sometimes ask why we can't just treat all types as binary. However the +general case isn't so clear-cut. The binary formats are not documented, there +are no guarantees that they will be platform-independent or that they will +remain stable, and there's no really solid way to detect when we might get the +format wrong. But also, the conversions aren't necessarily as straightforward +and efficient as they sound. So, for the general case, libpqxx sticks with the +text formats. Raw binary data alone stands out as a clear win.) + +Long story short, the machinery for passing parameters needs to know: is this +parameter a binary string, or not? In the normal case it can assume "no," and +that's what it does. The text format is always a safe choice; we just try to +use the binary format where it's faster. + +The `param_format` function template is what makes the decision. We specialise +it for types which may be binary strings, and use the default for all other +types. + +"Types which _may_ be binary"? You might think we know whether a type is a +binary type or not. But there are some complications with generic types. + +Templates like `std::shared_ptr`, `std::optional`, and so on act like +"wrappers" for another type. A `std::optional` is binary if `T` is binary. +Otherwise, it's not. If you're building support for a template of this nature, +you'll probably want to implement `param_format` for it. + +The decision to use binary format is made based on a given object, not +necessarily based on the type in general. Look at `std::variant`. If you have +a `std::variant` type which can hold an `int` or a binary string, is that a +binary parameter? We can't decide without knowing the individual object. + +Containers are another hard case. Should we pass `std::vector` in binary? +Even when `T` is a binary type, we don't currently have any way to pass an +array in binary format, so we always pass it as text. diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/escaping.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/escaping.md new file mode 100644 index 000000000..2ad9fe3db --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/escaping.md @@ -0,0 +1,74 @@ +String escaping {#escaping} +=============== + +Writing queries as strings is easy. But sometimes you need a variable in +there: `"SELECT id FROM user WHERE name = '" + name + "'"`. + +This is dangerous. See the bug? If `name` can contain quotes, you may have +an SQL injection vulnerability there, where users can enter nasty stuff like +"`.'; DROP TABLE user`". Or if you're lucky, it's just a nasty bug that you +discover when `name` happens to be "d'Arcy". + +So, you'll need to _escape_ the `name` before you insert it. This is where +quotes and other problematic characters are marked as "this is just a character +in the string, not the end of the string." There are +[several functions](@ref escaping-functions) in libpqxx to do this for you. + + +SQL injection +------------- + +To understand what SQL injection vulnerabilities are and why they should be +prevented, imagine you use the following SQL statement somewhere in your +program: + + TX.exec( + "SELECT number,amount " + "FROM accounts " + "WHERE allowed_to_see('" + userid + "','" + password + "')"); + +This shows a logged-in user important information on all accounts he is +authorized to view. The userid and password strings are variables entered +by the user himself. + +Now, if the user is actually an attacker who knows (or can guess) the +general shape of this SQL statement, imagine he enters the following +password: + + x') OR ('x' = 'x + +Does that make sense to you? Probably not. But if this is inserted into +the SQL string by the C++ code above, the query becomes: + + SELECT number,amount + FROM accounts + WHERE allowed_to_see('user','x') OR ('x' = 'x') + +Is this what you wanted to happen? Probably not! The neat `allowed_to_see()` +clause is completely circumvented by the "`OR ('x' = 'x')`" clause, which is +always `true`. Therefore, the attacker will get to see all accounts in the +database! + + +Using the esc functions +----------------------- + +Here's how you can fix the problem in the example above: + + TX.exec( + "SELECT number,amount " + "FROM accounts " + "WHERE allowed_to_see('" + TX.esc(userid) + "', " + "'" + TX.esc(password) + "')"); + +Now, the quotes embedded in the attacker's string will be neatly escaped so +they can't "break out" of the quoted SQL string they were meant to go into: + + SELECT number,amount + FROM accounts + WHERE allowed_to_see('user', 'x'') OR (''x'' = ''x') + +If you look carefully, you'll see that thanks to the added escape characters +(a single-quote is escaped in SQL by doubling it) all we get is a very +strange-looking password string--but not a change in the SQL statement. + diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/getting-started.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/getting-started.md new file mode 100644 index 000000000..1b87b881f --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/getting-started.md @@ -0,0 +1,142 @@ +Getting started {#getting-started} +=============== + +The most basic three types in libpqxx are the _connection_, the _transaction_, +and the _result_. + +They fit together as follows: +* You connect to the database by creating a `pqxx::connection` object (see + @ref connections). + +* You create a transaction object (see @ref transactions) operating on that + connection. You'll usually want the `pqxx::work` variety. + + Once you're done you call the transaction's `commit` function to make its + work final. If you don't call this, the work will be rolled back when the + transaction object is destroyed. + +* Until then, use the transaction's `exec`, `query_value`, and `stream` + functions (and variants) to execute SQL statements. You pass the statements + themselves in as simple strings. (See @ref streams for more about data + streaming). + +* Most of the `exec` functions return a `pqxx::result` object, which acts + as a standard container of rows: `pqxx::row`. + + Each row in a result, in turn, acts as a container of fields: `pqxx::field`. + See @ref accessing-results for more about results, rows, and fields. + +* Each field's data is stored internally as a text string, in a format defined + by PostgreSQL. You can convert field or row values using their `as()` and + `to()` member functions. + +* After you've closed the transaction, the connection is free to run a next + transaction. + +Here's a very basic example. It connects to the default database (you'll +need to have one set up), queries it for a very simple result, converts it to +an `int`, and prints it out. It also contains some basic error handling. + + #include + #include + + int main() + { + try + { + // Connect to the database. In practice we may have to pass some + // arguments to say where the database server is, and so on. + // The constructor parses options exactly like libpq's + // PQconnectdb/PQconnect, see: + // https://www.postgresql.org/docs/10/static/libpq-connect.html + pqxx::connection c; + + // Start a transaction. In libpqxx, you always work in one. + pqxx::work w(c); + + // work::exec1() executes a query returning a single row of data. + // We'll just ask the database to return the number 1 to us. + pqxx::row r = w.exec1("SELECT 1"); + + // Commit your transaction. If an exception occurred before this + // point, execution will have left the block, and the transaction will + // have been destroyed along the way. In that case, the failed + // transaction would implicitly abort instead of getting to this point. + w.commit(); + + // Look at the first and only field in the row, parse it as an integer, + // and print it. + // + // "r[0]" returns the first field, which has an "as<...>()" member + // function template to convert its contents from their string format + // to a type of your choice. + std::cout << r[0].as() << std::endl; + } + catch (std::exception const &e) + { + std::cerr << e.what() << std::endl; + return 1; + } + } + +This prints the number 1. Notice that you can keep the result object around +after you've closed the transaction or even the connection. There are +situations where you can't do it, but generally it's fine. If you're +interested: you can install your own callbacks for receiving error messages +from the database, and in that case you'll have to keep the connection object +alive. But otherwise, it's nice to be able to "fire and forget" your +connection and deal with the data. + +You can also convert an entire row to a series of C++-side types in one go, +using the @c as member function on the row: + + pqxx::connection c; + pqxx::work w(c); + pqxx::row r = w.exec1("SELECT 1, 2, 'Hello'"); + auto [one, two, hello] = r.as(); + std::cout << (one + two) << ' ' << std::strlen(hello) << std::endl; + +Here's a slightly more complicated example. It takes an argument from the +command line and retrieves a string with that value. The interesting part is +that it uses the escaping-and-quoting function `quote` to embed this +string value in SQL safely. It also reads the result field's value as a +plain C-style string using its `c_str` function. + + #include + #include + #include + + int main(int argc, char *argv[]) + { + try + { + if (!argv[1]) throw std::runtime_error("Give me a string!"); + + pqxx::connection c; + pqxx::work w(c); + + // work::exec() returns a full result set, which can consist of any + // number of rows. + pqxx::result r = w.exec("SELECT " + w.quote(argv[1])); + + // End our transaction here. We can still use the result afterwards. + w.commit(); + + // Print the first field of the first row. Read it as a C string, + // just like std::string::c_str() does. + std::cout << r[0][0].c_str() << std::endl; + } + catch (std::exception const &e) + { + std::cerr << e.what() << std::endl; + return 1; + } + } + +You can find more about converting field values to native types, or +converting values to strings for use with libpqxx, under +@ref stringconversion. More about getting to the rows and fields of a +result is under @ref accessing-results. + +If you want to handle exceptions thrown by libpqxx in more detail, for +example to print the SQL contents of a query that failed, see @ref exception. diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/mainpage.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/mainpage.md new file mode 100644 index 000000000..5d4b8f9b2 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/mainpage.md @@ -0,0 +1,28 @@ +libpqxx {#mainpage} +======= + +@version 7.7.3 +@author Jeroen T. Vermeulen +@see http://pqxx.org +@see https://github.com/jtv/libpqxx + +Welcome to libpqxx, the C++ API to the PostgreSQL database management system. + +Compiling this package requires PostgreSQL to be installed -- including the +C headers for client development. The library builds on top of PostgreSQL's +standard C API, libpq. The libpq headers are not needed to compile client +programs, however. + +For a quick introduction to installing and using libpqxx, see the README.md +file. The latest information can be found at http://pqxx.org/ + + +Some links that should help you find your bearings: +* @ref getting-started +* @ref thread-safety +* @ref connections +* @ref transactions +* @ref escaping +* @ref performance +* @ref transactor +* @ref datatypes diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/parameters.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/parameters.md new file mode 100644 index 000000000..7ac792025 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/parameters.md @@ -0,0 +1,90 @@ +Statement parameters {#parameters} +==================== + +When you execute a prepared statement (see @ref prepared), or a parameterised +statement (using functions like `pqxx::connection::exec_params`), you may write +special _placeholders_ in the query text. They look like `$1`, `$2`, and so +on. + +If you execute the query and pass parameter values, the call will respectively +substitute the first where it finds `$1`, the second where it finds `$2`, et +cetera. + +Doing this saves you work. If you don't use statement parameters, you'll need +to quote and escape your values (see `connection::quote()` and friends) as you +insert them into your query as literal values. + +Or if you forget to do that, you leave yourself open to horrible +[SQL injection attacks](https://xkcd.com/327/). Trust me, I was born in a town +whose name started with an apostrophe! + +Statement parameters save you this work. With these parameters you can pass +your values as-is, and they will go across the wire to the database in a safe +format. + +In some cases it may even be faster! When a parameter represents binary data +(as in the SQL `BYTEA` type), libpqxx will send it directly as binary, which is +a bit more efficient. If you insert the binary data directly in your query +text, your CPU will have some extra work to do, converting the data into a text +format, escaping it, and adding quotes. + + +Dynamic parameter lists +----------------------- + +In rare cases you may just not know how many parameters you'll pass into your +statement when you call it. + +For these situations, have a look at `params`. It lets you compose your +parameters list on the fly, even add whole ranges of parameters at a time. + +You can pass a `params` into your statement as a normal parameter. It will +fill in all the parameter values it contains into that position of the +statement's overall parameter list. + +So if you call your statement passing a regular parameter `a`, a +`params` containing just a parameter `b`, and another regular parameter `c`, +then your call will pass parameters `a`, `b`, and `c`. Or if the params object +is empty, it will pass just `a` and `c`. If the params object contains `x` and +`y`, your call will pass `a, x, y, c`. + +You can mix static and dynamic parameters freely. Don't go overboard though: +complexity is where bugs happen! + + +Generating placeholders +----------------------- + +If your code gets particularly complex, it may sometimes happen that it becomes +hard to track which parameter value belongs with which placeholder. Did you +intend to pass this numeric value as `$7`, or as `$8`? The answer may depend +on an `if` that happened earlier in a different function. + +(Generally if things get that complex, it's a good idea to look for simpler +solutions. But especially when performance matters, sometimes you can't avoid +complexity like that.) + +There's a little helper class called `placeholders`. You can use it as a +counter which produces those placeholder strings, `$1`, `$2`, `$3`, et cetera. +When you start generating a complex statement, you can create both a `params` +and a `placeholders`: + + pqxx::params values; + pqxx::placeholders name; + +Let's say you've got some complex code to generate the conditions for an SQL +"WHERE" clause. You'll generally want to do these things close together in +your, so that you don't accidentally update one part and forget another: + + if (extra_clause) + { + // Extend the query text, using the current placeholder. + query += " AND x = " + name.get(); + // Add the parameter value. + values.append(my_x); + // Move on to the next placeholder value. + name.next(); + } + +Depending on the starting value of `name`, this might add to `query` a fragment +like "` AND x = $3`" or "` AND x = $5`". diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/performance.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/performance.md new file mode 100644 index 000000000..6c403684f --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/performance.md @@ -0,0 +1,24 @@ +Performance features {#performance} +==================== + +If your program's database interaction is not as efficient as it needs to be, +the first place to look is usually the SQL you're executing. But libpqxx +has a few specialized features to help you squeeze more performance out +of how you issue commands and retrieve data: + +* @ref streams. Use these as a faster way to transfer data between your + code and the database. +* `std::string_view` and `pqxx::zview`. In places where traditional C++ worked + with `std::string`, see whether `std::string_view` or `pqxx::zview` will + do. Of course that means that you'll have to look at the data's lifetime + more carefully, but it'll save the computer a lot of copying. +* @ref prepared. These can be executed many times without the server + parsing and planning them anew each time. They also save you having to + escape string parameters. +* `pqxx::pipeline` lets you send queries to the database in batches, and + continue other processing while they are executing. +* `pqxx::connecting` lets you start setting up a database connection, but + without blocking the thread. + +As always of course, don't risk the quality of your code for optimizations +that you don't need! diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/prepared-statement.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/prepared-statement.md new file mode 100644 index 000000000..5193866a6 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/prepared-statement.md @@ -0,0 +1,125 @@ +Prepared statements {#prepared} +=================== + +Prepared statements are SQL queries that you define once and then invoke +as many times as you like, typically with varying parameters. It's basically +a function that you can define ad hoc. + +If you have an SQL statement that you're going to execute many times in +quick succession, it may be more efficient to prepare it once and reuse it. +This saves the database backend the effort of parsing complex SQL and +figuring out an efficient execution plan. Another nice side effect is that +you don't need to worry about escaping parameters. Some corporate coding +standards require all SQL parameters to be passed in this way, to reduce the +risk of programmer mistakes leaving room for SQL injections. + + +Preparing a statement +--------------------- + +You create a prepared statement by preparing it on the connection (using the +`pqxx::connection::prepare` functions), passing an identifier and its SQL text. + +The identifier is the name by which the prepared statement will be known; it +should consist of ASCII letters, digits, and underscores only, and start with +an ASCII letter. The name is case-sensitive. + +```cxx + void prepare_my_statement(pqxx::connection &c) + { + c.prepare( + "my_statement", + "SELECT * FROM Employee WHERE name = 'Xavier'"); + } +``` + +Once you've done this, you'll be able to call `my_statement` from any +transaction you execute on the same connection. For this, use the +`pqxx::transaction_base::exec_prepared` functions. + +```cxx + pqxx::result execute_my_statement(pqxx::transaction_base &t) + { + return t.exec_prepared("my_statement"); + } +``` + + +Parameters +---------- + +Did I mention that prepared statements can have parameters? The query text +can contain `$1`, `$2` etc. as placeholders for parameter values that you +will provide when you invoke the prepared satement. + +See @ref parameters for more about this. And here's a simple example of +preparing a statement and invoking it with parameters: + +```cxx + void prepare_find(pqxx::connection &c) + { + // Prepare a statement called "find" that looks for employees with a + // given name (parameter 1) whose salary exceeds a given number + // (parameter 2). + c.prepare( + "find", + "SELECT * FROM Employee WHERE name = $1 AND salary > $2"); + } +``` + +This example looks up the prepared statement "find," passes `name` and +`min_salary` as parameters, and invokes the statement with those values: + +```cxx + pqxx::result execute_find( + pqxx::transaction_base &t, std::string name, int min_salary) + { + return t.exec_prepared("find", name, min_salary); + } +``` + + +A special prepared statement +---------------------------- + +There is one special case: the _nameless_ prepared statement. You may prepare +a statement without a name, i.e. whose name is an empty string. The unnamed +statement can be redefined at any time, without un-preparing it first. + + +Performance note +---------------- + +Don't assume that using prepared statements will speed up your application. +There are cases where prepared statements are actually slower than plain SQL. + +The reason is that the backend can often produce a better execution plan when +it knows the statement's actual parameter values. + +For example, say you've got a web application and you're querying for users +with status "inactive" who have email addresses in a given domain name X. If +X is a very popular provider, the best way for the database engine to plan the +query may be to list the inactive users first and then filter for the email +addresses you're looking for. But in other cases, it may be much faster to +find matching email addresses first and then see which of their owners are +"inactive." A prepared statement must be planned to fit either case, but a +direct query will be optimised based on table statistics, partial indexes, etc. + + +Zero bytes +---------- + +@warning Beware of "nul" bytes! + +Any string you pass as a parameter will end at the _first char with value +zero._ If you pass a string that contains a zero byte, the last byte in the +value will be the one just before the zero. + +So, if you need a zero byte in a string, consider that it's really a _binary +string,_ which is not the same thing as a text string. SQL represents binary +data as the `BYTEA` type, or in binary large objects ("blobs"). + +In libpqxx, you represent binary data as a range of `std::byte`. They must be +contiguous in memory, so that libpqxx can pass pointers to the underlying C +library. So you might use `std::basic_string`, or +`std::basic_string_view`, or `std::vector`. diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/streams.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/streams.md new file mode 100644 index 000000000..3df4d6126 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/streams.md @@ -0,0 +1,107 @@ +Streams {#streams} +======= + +Most of the time it's fine to retrieve data from the database using `SELECT` +queries, and store data using `INSERT`. But for those cases where efficiency +matters, there are two classes to help you do this better: `stream_from` and +`stream_to`. They're less flexible than SQL queries, and there's the risk of +losing your connection while you're in mid-stream, but you get some speed and +memory efficiencies in return. + +Both stream classes do data conversion for you: `stream_from` receives values +from the database in PostgreSQL's text format, and converts them to the C++ +types you specify. Likewise, `stream_to` converts C++ values you provide to +PostgreSQL's text format for transfer. (On its end, the database of course +converts values to and from their SQL types.) + + +Null values +----------- + +So how do you deal with nulls? It depends on the C++ type you're using. Some +types may have a built-in null value. For instance, if you have a +`char const *` value and you convert it to an SQL string, then converting a +`nullptr` will produce a NULL SQL value. + +But what do you do about C++ types which don't have a built-in null value, such +as `int`? The trick is to wrap it in `std::optional`. The difference between +`int` and `std::optional` is that the former always has an `int` value, +and the latter doesn't have to. + +Actually it's not just `std::optional`. You can do the same thing with +`std::unique_ptr` or `std::shared_ptr`. A smart pointer is less efficient than +`std::optional` in most situations because they allocate their value on the +heap, but sometimes that's what you want in order to save moving or copying +large values around. + +This part is not generic though. It won't work with just any smart-pointer +type, just the ones which are explicitly supported: `shared_ptr` and +`unique_ptr`. If you really need to, you can build support for additional +wrappers and smart pointers by copying the implementation patterns from the +existing smart-pointer support. + + +stream\_from +------------ + +Use `stream_from` to read data directly from the database. It's faster than +the transaction's `exec` functions if the result contains enough rows. But +also, you won't need to keep your full result set in memory. That can really +matter with larger data sets. + +And, you can start processing your data right after the first row of data comes +in from the server. With `exec()` you need to wait to receive all data, and +then you begin processing. With `stream_from` you can be processing data on +the client side while the server is still sending you the rest. + +You don't actually need to create a `stream_from` object yourself, though you +can. Two shorthand functions, @ref pqxx::transaction_base::stream +and @ref pqxx::transaction_base::for_each, can create the streams for you with +a minimum of overhead. + +Not all kinds of queries will work in a stream. Internally the streams make +use of PostgreSQL's `COPY` command, so see the PostgreSQL documentation for +`COPY` for the exact limitations. Basic `SELECT` and `UPDATE ... RETURNING` +queries should just work. + +As you read a row, the stream converts its fields to a tuple type containing +the value types you ask for: + + auto stream pqxx::stream_from::query( + tx, "SELECT name, points FROM score"); + std::tuple row; + while (stream >> row) + process(row); + stream.complete(); + +As the stream reads each row, it converts that row's data into your tuple, +goes through your loop body, and then promptly forgets that row's data. This +means you can easily process more data than will fit in memory. + + +stream\_to +---------- + +Use `stream_to` to write data directly to a database table. This saves you +having to perform an `INSERT` for every row, and so it can be significantly +faster if you want to insert more than just one or two rows at a time. + +As with `stream_from`, you can specify the table and the columns, and not much +else. You insert tuple-like objects of your choice: + + pqxx::stream_to stream{ + tx, + "score", + std::vector{"name", "points"}}; + for (auto const &entry: scores) + stream << entry; + stream.complete(); + +Each row is processed as you provide it, and not retained in memory after that. + +The call to `complete()` is more important here than it is for `stream_from`. +It's a lot like a "commit" or "abort" at the end of a transaction. If you omit +it, it will be done automatically during the stream's destructor. But since +destructors can't throw exceptions, any failures at that stage won't be visible +in your code. So, always call `complete()` on a `stream_to` to close it off +properly! diff --git a/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/thread-safety.md b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/thread-safety.md new file mode 100644 index 000000000..07c7f9984 --- /dev/null +++ b/ext/libpqxx-7.7.3/install/ubuntu22.04/arm64/share/doc/libpqxx/thread-safety.md @@ -0,0 +1,29 @@ +Thread safety {#thread-safety} +============= + +This library does not contain any locking code to protect objects against +simultaneous modification in multi-threaded programs. Therefore it is up +to you, the user of the library, to ensure that your threaded client +programs perform no conflicting operations concurrently. + +Most of the time this isn't hard. Result sets are immutable, so you can +share them between threads without problem. The main rule is: + +@li Treat a connection, together with any and all objects related to it, as +a "world" of its own. You should generally make sure that the same "world" +is never accessed by another thread while you're doing anything non-const +in there. + +That means: don't issue a query on a transaction while you're also opening +a subtransaction, don't access a cursor while you may also be committing, +and so on. + +In particular, cursors are tricky. It's easy to perform a non-const +operation without noticing. So, if you're going to share cursors or +cursor-related objects between threads, lock very conservatively! + +Use `pqxx::describe_thread_safety` to find out at runtime what level of +thread safety is implemented in your build and version of libpqxx. It +returns a `pqxx::thread_safety_model` describing what you can and cannot rely +on. A command-line utility `tools/pqxxthreadsafety` prints out the same +information. diff --git a/ext/libpqxx-7.7.3/libpqxx.pc.in b/ext/libpqxx-7.7.3/libpqxx.pc.in new file mode 100644 index 000000000..eb7dcff49 --- /dev/null +++ b/ext/libpqxx-7.7.3/libpqxx.pc.in @@ -0,0 +1,10 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: libpqxx +Description: C++ client API for the PostgreSQL database management system. +Version: @VERSION@ +Libs: -L${libdir} -lpqxx +Cflags: -I${includedir} diff --git a/ext/libpqxx-7.7.3/requirements.json b/ext/libpqxx-7.7.3/requirements.json new file mode 100644 index 000000000..28f8f8ad1 --- /dev/null +++ b/ext/libpqxx-7.7.3/requirements.json @@ -0,0 +1,9 @@ +{ + "description": "Minimum versions needed of various things.", + "c++": "17", + "libpq": "9.6", + "postgresql": "9.6", + "gcc": "8", + "clang": "11", + "msvc": "2019" +} diff --git a/ext/libpqxx-7.7.3/src/CMakeLists.txt b/ext/libpqxx-7.7.3/src/CMakeLists.txt new file mode 100644 index 000000000..1d697ab7c --- /dev/null +++ b/ext/libpqxx-7.7.3/src/CMakeLists.txt @@ -0,0 +1,91 @@ +if(NOT PostgreSQL_FOUND) + if(POLICY CMP0074) + cmake_policy(PUSH) + # CMP0074 is `OLD` by `cmake_minimum_required(VERSION 3.7)`, + # sets `NEW` to enable support CMake variable `PostgreSQL_ROOT`. + cmake_policy(SET CMP0074 NEW) + endif() + + find_package(PostgreSQL REQUIRED) + + if(POLICY CMP0074) + cmake_policy(POP) + endif() +endif() + +# When setting up the include paths, mention the binary tree's include +# directory *before* the source tree's include directory. If the source tree +# happens to contain autoconf-generated config headers, we should still prefer +# the ones in the binary tree. +macro(library_target_setup tgt) + target_include_directories(${tgt} + PUBLIC + $ + $ + $ + PRIVATE + ${PostgreSQL_INCLUDE_DIRS} + ) + target_link_libraries(${tgt} PRIVATE ${PostgreSQL_LIBRARIES}) + if(WIN32) + target_link_libraries(${tgt} PUBLIC wsock32 ws2_32) + endif() + install(TARGETS ${tgt} EXPORT libpqxx-targets + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + ) + + get_target_property(name ${tgt} NAME) + get_target_property(output_name ${tgt} OUTPUT_NAME) + if(NOT CMAKE_HOST_WIN32) + # Create library symlink + get_target_property(target_type ${tgt} TYPE) + if(target_type STREQUAL "SHARED_LIBRARY") + set(library_prefix ${CMAKE_SHARED_LIBRARY_PREFIX}) + set(library_suffix ${CMAKE_SHARED_LIBRARY_SUFFIX}) + elseif(target_type STREQUAL "STATIC_LIBRARY") + set(library_prefix ${CMAKE_STATIC_LIBRARY_PREFIX}) + set(library_suffix ${CMAKE_STATIC_LIBRARY_SUFFIX}) + endif() + + list(APPEND noop_command "${CMAKE_COMMAND}" "-E" "true") + list(APPEND create_symlink_command "${CMAKE_COMMAND}" "-E" "create_symlink" "${library_prefix}${output_name}${library_suffix}" "${library_prefix}${name}${library_suffix}") + # `add_custom_command()` does nothing if the `OUTPUT_NAME` and `NAME` + # properties are equal, otherwise it creates library symlink. + add_custom_command(TARGET ${tgt} POST_BUILD + COMMAND "$,${noop_command},${create_symlink_command}>" + VERBATIM + COMMAND_EXPAND_LISTS + ) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${library_prefix}${name}${library_suffix} + DESTINATION ${CMAKE_INSTALL_LIBDIR} + ) + endif() +endmacro() + +file(GLOB CXX_SOURCES *.cxx) + +add_library(pqxx ${CXX_SOURCES}) + +get_target_property(pqxx_target_type pqxx TYPE) +if(pqxx_target_type STREQUAL "SHARED_LIBRARY") + target_compile_definitions(pqxx PUBLIC PQXX_SHARED) +endif() + +set_target_properties( + pqxx PROPERTIES + OUTPUT_NAME $,pqxx,pqxx-${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}> +) +library_target_setup(pqxx) + +# install pkg-config file +set(prefix ${CMAKE_INSTALL_PREFIX}) +set(exec_prefix \${prefix}) +set(libdir "\${prefix}/${CMAKE_INSTALL_LIBDIR}") +set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}") +set(VERSION ${PROJECT_VERSION}) +configure_file(${PROJECT_SOURCE_DIR}/libpqxx.pc.in ${PROJECT_BINARY_DIR}/libpqxx.pc) +install(FILES ${PROJECT_BINARY_DIR}/libpqxx.pc + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig +) diff --git a/ext/libpqxx-7.7.3/src/Makefile.am b/ext/libpqxx-7.7.3/src/Makefile.am new file mode 100644 index 000000000..dfd520941 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/Makefile.am @@ -0,0 +1,44 @@ +lib_LTLIBRARIES = libpqxx.la +libpqxx_la_SOURCES = \ + array.cxx \ + binarystring.cxx \ + blob.cxx \ + connection.cxx \ + cursor.cxx \ + encodings.cxx \ + errorhandler.cxx \ + except.cxx \ + field.cxx \ + largeobject.cxx \ + notification.cxx \ + params.cxx \ + pipeline.cxx \ + result.cxx \ + robusttransaction.cxx \ + sql_cursor.cxx \ + strconv.cxx \ + stream_from.cxx \ + stream_to.cxx \ + subtransaction.cxx \ + time.cxx \ + transaction.cxx \ + transaction_base.cxx \ + row.cxx \ + util.cxx \ + version.cxx \ + wait.cxx + +libpqxx_version = -release $(PQXX_ABI) + +libpqxx_la_LDFLAGS = $(libpqxx_version) \ + -rpath $(libdir) \ + ${POSTGRES_LIB} + +AM_CPPFLAGS = \ + -I$(top_srcdir)/include -I$(top_builddir)/include ${POSTGRES_INCLUDE} + +# Override automatically generated list of default includes. It contains only +# unnecessary entries, and incorrectly mentions include/pqxx directly. +DEFAULT_INCLUDES= + +MAINTAINERCLEANFILES=Makefile.in diff --git a/ext/libpqxx-7.7.3/src/Makefile.in b/ext/libpqxx-7.7.3/src/Makefile.in new file mode 100644 index 000000000..3c0152727 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/Makefile.in @@ -0,0 +1,809 @@ +# Makefile.in generated by automake 1.16.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2021 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +subdir = src +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/config/m4/libtool.m4 \ + $(top_srcdir)/config/m4/ltoptions.m4 \ + $(top_srcdir)/config/m4/ltsugar.m4 \ + $(top_srcdir)/config/m4/ltversion.m4 \ + $(top_srcdir)/config/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs +CONFIG_HEADER = $(top_builddir)/include/pqxx/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(libdir)" +LTLIBRARIES = $(lib_LTLIBRARIES) +libpqxx_la_LIBADD = +am_libpqxx_la_OBJECTS = array.lo binarystring.lo blob.lo connection.lo \ + cursor.lo encodings.lo errorhandler.lo except.lo field.lo \ + largeobject.lo notification.lo params.lo pipeline.lo result.lo \ + robusttransaction.lo sql_cursor.lo strconv.lo stream_from.lo \ + stream_to.lo subtransaction.lo time.lo transaction.lo \ + transaction_base.lo row.lo util.lo version.lo wait.lo +libpqxx_la_OBJECTS = $(am_libpqxx_la_OBJECTS) +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am__v_lt_1 = +libpqxx_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(libpqxx_la_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +depcomp = $(SHELL) $(top_srcdir)/config/depcomp +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = ./$(DEPDIR)/array.Plo \ + ./$(DEPDIR)/binarystring.Plo ./$(DEPDIR)/blob.Plo \ + ./$(DEPDIR)/connection.Plo ./$(DEPDIR)/cursor.Plo \ + ./$(DEPDIR)/encodings.Plo ./$(DEPDIR)/errorhandler.Plo \ + ./$(DEPDIR)/except.Plo ./$(DEPDIR)/field.Plo \ + ./$(DEPDIR)/largeobject.Plo ./$(DEPDIR)/notification.Plo \ + ./$(DEPDIR)/params.Plo ./$(DEPDIR)/pipeline.Plo \ + ./$(DEPDIR)/result.Plo ./$(DEPDIR)/robusttransaction.Plo \ + ./$(DEPDIR)/row.Plo ./$(DEPDIR)/sql_cursor.Plo \ + ./$(DEPDIR)/strconv.Plo ./$(DEPDIR)/stream_from.Plo \ + ./$(DEPDIR)/stream_to.Plo ./$(DEPDIR)/subtransaction.Plo \ + ./$(DEPDIR)/time.Plo ./$(DEPDIR)/transaction.Plo \ + ./$(DEPDIR)/transaction_base.Plo ./$(DEPDIR)/util.Plo \ + ./$(DEPDIR)/version.Plo ./$(DEPDIR)/wait.Plo +am__mv = mv -f +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CXXFLAGS) $(CXXFLAGS) +AM_V_CXX = $(am__v_CXX_@AM_V@) +am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) +am__v_CXX_0 = @echo " CXX " $@; +am__v_CXX_1 = +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) +am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) +am__v_CXXLD_0 = @echo " CXXLD " $@; +am__v_CXXLD_1 = +SOURCES = $(libpqxx_la_SOURCES) +DIST_SOURCES = $(libpqxx_la_SOURCES) +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/config/depcomp \ + $(top_srcdir)/config/mkinstalldirs +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ETAGS = @ETAGS@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_DOT = @HAVE_DOT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR = @MKDIR@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PG_CONFIG = @PG_CONFIG@ +PKG_CONFIG = @PKG_CONFIG@ +POSTGRES_INCLUDE = @POSTGRES_INCLUDE@ +PQXXVERSION = @PQXXVERSION@ +PQXX_ABI = @PQXX_ABI@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +with_postgres_lib = @with_postgres_lib@ +lib_LTLIBRARIES = libpqxx.la +libpqxx_la_SOURCES = \ + array.cxx \ + binarystring.cxx \ + blob.cxx \ + connection.cxx \ + cursor.cxx \ + encodings.cxx \ + errorhandler.cxx \ + except.cxx \ + field.cxx \ + largeobject.cxx \ + notification.cxx \ + params.cxx \ + pipeline.cxx \ + result.cxx \ + robusttransaction.cxx \ + sql_cursor.cxx \ + strconv.cxx \ + stream_from.cxx \ + stream_to.cxx \ + subtransaction.cxx \ + time.cxx \ + transaction.cxx \ + transaction_base.cxx \ + row.cxx \ + util.cxx \ + version.cxx \ + wait.cxx + +libpqxx_version = -release $(PQXX_ABI) +libpqxx_la_LDFLAGS = $(libpqxx_version) \ + -rpath $(libdir) \ + ${POSTGRES_LIB} + +AM_CPPFLAGS = \ + -I$(top_srcdir)/include -I$(top_builddir)/include ${POSTGRES_INCLUDE} + + +# Override automatically generated list of default includes. It contains only +# unnecessary entries, and incorrectly mentions include/pqxx directly. +DEFAULT_INCLUDES = +MAINTAINERCLEANFILES = Makefile.in +all: all-am + +.SUFFIXES: +.SUFFIXES: .cxx .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu src/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +install-libLTLIBRARIES: $(lib_LTLIBRARIES) + @$(NORMAL_INSTALL) + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + list2=; for p in $$list; do \ + if test -f $$p; then \ + list2="$$list2 $$p"; \ + else :; fi; \ + done; \ + test -z "$$list2" || { \ + echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \ + $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ + } + +uninstall-libLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ + done + +clean-libLTLIBRARIES: + -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) + @list='$(lib_LTLIBRARIES)'; \ + locs=`for p in $$list; do echo $$p; done | \ + sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \ + sort -u`; \ + test -z "$$locs" || { \ + echo rm -f $${locs}; \ + rm -f $${locs}; \ + } + +libpqxx.la: $(libpqxx_la_OBJECTS) $(libpqxx_la_DEPENDENCIES) $(EXTRA_libpqxx_la_DEPENDENCIES) + $(AM_V_CXXLD)$(libpqxx_la_LINK) -rpath $(libdir) $(libpqxx_la_OBJECTS) $(libpqxx_la_LIBADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/array.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/binarystring.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/blob.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/connection.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cursor.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/encodings.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/errorhandler.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/except.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/field.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/largeobject.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/notification.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/params.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pipeline.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/result.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/robusttransaction.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/row.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sql_cursor.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/strconv.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/stream_from.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/stream_to.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/subtransaction.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/time.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/transaction.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/transaction_base.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/util.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/version.Plo@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/wait.Plo@am__quote@ # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) + +.cxx.o: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< + +.cxx.obj: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cxx.lo: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(LTLIBRARIES) +installdirs: + for dir in "$(DESTDIR)$(libdir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \ + mostlyclean-am + +distclean: distclean-am + -rm -f ./$(DEPDIR)/array.Plo + -rm -f ./$(DEPDIR)/binarystring.Plo + -rm -f ./$(DEPDIR)/blob.Plo + -rm -f ./$(DEPDIR)/connection.Plo + -rm -f ./$(DEPDIR)/cursor.Plo + -rm -f ./$(DEPDIR)/encodings.Plo + -rm -f ./$(DEPDIR)/errorhandler.Plo + -rm -f ./$(DEPDIR)/except.Plo + -rm -f ./$(DEPDIR)/field.Plo + -rm -f ./$(DEPDIR)/largeobject.Plo + -rm -f ./$(DEPDIR)/notification.Plo + -rm -f ./$(DEPDIR)/params.Plo + -rm -f ./$(DEPDIR)/pipeline.Plo + -rm -f ./$(DEPDIR)/result.Plo + -rm -f ./$(DEPDIR)/robusttransaction.Plo + -rm -f ./$(DEPDIR)/row.Plo + -rm -f ./$(DEPDIR)/sql_cursor.Plo + -rm -f ./$(DEPDIR)/strconv.Plo + -rm -f ./$(DEPDIR)/stream_from.Plo + -rm -f ./$(DEPDIR)/stream_to.Plo + -rm -f ./$(DEPDIR)/subtransaction.Plo + -rm -f ./$(DEPDIR)/time.Plo + -rm -f ./$(DEPDIR)/transaction.Plo + -rm -f ./$(DEPDIR)/transaction_base.Plo + -rm -f ./$(DEPDIR)/util.Plo + -rm -f ./$(DEPDIR)/version.Plo + -rm -f ./$(DEPDIR)/wait.Plo + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-libLTLIBRARIES + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f ./$(DEPDIR)/array.Plo + -rm -f ./$(DEPDIR)/binarystring.Plo + -rm -f ./$(DEPDIR)/blob.Plo + -rm -f ./$(DEPDIR)/connection.Plo + -rm -f ./$(DEPDIR)/cursor.Plo + -rm -f ./$(DEPDIR)/encodings.Plo + -rm -f ./$(DEPDIR)/errorhandler.Plo + -rm -f ./$(DEPDIR)/except.Plo + -rm -f ./$(DEPDIR)/field.Plo + -rm -f ./$(DEPDIR)/largeobject.Plo + -rm -f ./$(DEPDIR)/notification.Plo + -rm -f ./$(DEPDIR)/params.Plo + -rm -f ./$(DEPDIR)/pipeline.Plo + -rm -f ./$(DEPDIR)/result.Plo + -rm -f ./$(DEPDIR)/robusttransaction.Plo + -rm -f ./$(DEPDIR)/row.Plo + -rm -f ./$(DEPDIR)/sql_cursor.Plo + -rm -f ./$(DEPDIR)/strconv.Plo + -rm -f ./$(DEPDIR)/stream_from.Plo + -rm -f ./$(DEPDIR)/stream_to.Plo + -rm -f ./$(DEPDIR)/subtransaction.Plo + -rm -f ./$(DEPDIR)/time.Plo + -rm -f ./$(DEPDIR)/transaction.Plo + -rm -f ./$(DEPDIR)/transaction_base.Plo + -rm -f ./$(DEPDIR)/util.Plo + -rm -f ./$(DEPDIR)/version.Plo + -rm -f ./$(DEPDIR)/wait.Plo + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-libLTLIBRARIES + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ + clean-generic clean-libLTLIBRARIES clean-libtool cscopelist-am \ + ctags ctags-am distclean distclean-compile distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-libLTLIBRARIES install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags tags-am uninstall uninstall-am uninstall-libLTLIBRARIES + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/ext/libpqxx-7.7.3/src/array.cxx b/ext/libpqxx-7.7.3/src/array.cxx new file mode 100644 index 000000000..e35aaddce --- /dev/null +++ b/ext/libpqxx-7.7.3/src/array.cxx @@ -0,0 +1,240 @@ +/** Handling of SQL arrays. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include +#include +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/array.hxx" +#include "pqxx/except.hxx" +#include "pqxx/internal/array-composite.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/util.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace pqxx +{ +/// Scan to next glyph in the buffer. Assumes there is one. +[[nodiscard]] std::string::size_type +array_parser::scan_glyph(std::string::size_type pos) const +{ + return m_scan(std::data(m_input), std::size(m_input), pos); +} + + +/// Scan to next glyph in a substring. Assumes there is one. +std::string::size_type array_parser::scan_glyph( + std::string::size_type pos, std::string::size_type end) const +{ + return m_scan(std::data(m_input), end, pos); +} + + +/// Find the end of a single-quoted SQL string in an SQL array. +/** Call this while pointed at the opening quote. + * + * Returns the offset of the first character after the closing quote. + */ +std::string::size_type array_parser::scan_single_quoted_string() const +{ + assert(m_input[m_pos] == '\''); + auto const sz{std::size(m_input)}; + auto here{pqxx::internal::find_char<'\\', '\''>(m_scan, m_input, m_pos + 1)}; + while (here < sz) + { + char const c{m_input[here]}; + // Consume the slash or quote that we found. + ++here; + if (c == '\'') + { + // Single quote. + + // At end? + if (here >= sz) + return here; + + // SQL escapes single quotes by doubling them. Terrible idea, but it's + // what we have. Inspect the next character to find out whether this + // is the closing quote, or an escaped one inside the string. + if (m_input[here] != '\'') + return here; + // Check against embedded "'" byte in a multichar byte. If we do have a + // multibyte char, then we're still out of the string. + if (scan_glyph(here, sz) > here + 1) + PQXX_UNLIKELY return here; + + // We have a second quote. Consume it as well. + ++here; + } + else + { + assert(c == '\\'); + // Backslash escape. Skip ahead by one more character. + here = scan_glyph(here, sz); + } + // Race on to the next quote or backslash. + here = pqxx::internal::find_char<'\\', '\''>(m_scan, m_input, here); + } + throw argument_error{internal::concat("Null byte in SQL string: ", m_input)}; +} + + +/// Parse a single-quoted SQL string: un-quote it and un-escape it. +std::string +array_parser::parse_single_quoted_string(std::string::size_type end) const +{ + std::string output; + // Maximum output size is same as the input size, minus the opening and + // closing quotes. In the worst case, the real number could be half that. + // Usually it'll be a pretty close estimate. + output.reserve(end - m_pos - 2); + // XXX: find_char<'\\', '\''>(). + for (auto here = m_pos + 1, next = scan_glyph(here, end); here < end - 1; + here = next, next = scan_glyph(here, end)) + { + if (next - here == 1 and (m_input[here] == '\'' or m_input[here] == '\\')) + { + // Skip escape. (Performance-wise, we bet that these are relatively + // rare.) + PQXX_UNLIKELY + here = next; + next = scan_glyph(here, end); + } + + output.append(std::data(m_input) + here, std::data(m_input) + next); + } + + return output; +} + + +/// Find the end of a double-quoted SQL string in an SQL array. +std::string::size_type array_parser::scan_double_quoted_string() const +{ + return pqxx::internal::scan_double_quoted_string( + std::data(m_input), std::size(m_input), m_pos, m_scan); +} + + +/// Parse a double-quoted SQL string: un-quote it and un-escape it. +std::string +array_parser::parse_double_quoted_string(std::string::size_type end) const +{ + return pqxx::internal::parse_double_quoted_string( + std::data(m_input), end, m_pos, m_scan); +} + + +/// Find the end of an unquoted string in an SQL array. +/** Assumes UTF-8 or an ASCII-superset single-byte encoding. + */ +std::string::size_type array_parser::scan_unquoted_string() const +{ + return pqxx::internal::scan_unquoted_string<',', ';', '}'>( + std::data(m_input), std::size(m_input), m_pos, m_scan); +} + + +/// Parse an unquoted SQL string. +/** Here, the special unquoted value NULL means a null value, not a string + * that happens to spell "NULL". + */ +std::string +array_parser::parse_unquoted_string(std::string::size_type end) const +{ + return pqxx::internal::parse_unquoted_string( + std::data(m_input), end, m_pos, m_scan); +} + + +array_parser::array_parser( + std::string_view input, internal::encoding_group enc) : + m_input(input), m_scan(internal::get_glyph_scanner(enc)) +{} + + +std::pair array_parser::get_next() +{ + std::string value; + + if (m_pos >= std::size(m_input)) + return std::make_pair(juncture::done, value); + + juncture found; + std::string::size_type end; + + if (scan_glyph(m_pos) - m_pos > 1) + { + // Non-ASCII unquoted string. + end = scan_unquoted_string(); + value = parse_unquoted_string(end); + found = juncture::string_value; + } + else + switch (m_input[m_pos]) + { + case '\0': throw failure{"Unexpected zero byte in array."}; + case '{': + found = juncture::row_start; + end = scan_glyph(m_pos); + break; + case '}': + found = juncture::row_end; + end = scan_glyph(m_pos); + break; + case '\'': + found = juncture::string_value; + end = scan_single_quoted_string(); + value = parse_single_quoted_string(end); + break; + case '"': + found = juncture::string_value; + end = scan_double_quoted_string(); + value = parse_double_quoted_string(end); + break; + default: + end = scan_unquoted_string(); + value = parse_unquoted_string(end); + if (value == "NULL") + { + // In this one situation, as a special case, NULL means a null field, + // not a string that happens to spell "NULL". + value.clear(); + found = juncture::null_value; + } + else + { + // The normal case: we just parsed an unquoted string. The value is + // what we need. + PQXX_LIKELY + found = juncture::string_value; + } + break; + } + + // Skip a trailing field separator, if present. + if (end < std::size(m_input)) + { + auto next{scan_glyph(end)}; + if (next - end == 1 and (m_input[end] == ',' or m_input[end] == ';')) + PQXX_UNLIKELY + end = next; + } + + m_pos = end; + return std::make_pair(found, value); +} +} // namespace pqxx diff --git a/ext/libpqxx-7.7.3/src/binarystring.cxx b/ext/libpqxx-7.7.3/src/binarystring.cxx new file mode 100644 index 000000000..936437006 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/binarystring.cxx @@ -0,0 +1,108 @@ +/** Implementation of bytea (binary string) conversions. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include +#include +#include +#include +#include + +extern "C" +{ +#include +} + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/binarystring.hxx" +#include "pqxx/field.hxx" +#include "pqxx/strconv.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace +{ +/// Copy data to a heap-allocated buffer. +std::shared_ptr + PQXX_COLD copy_to_buffer(void const *data, std::size_t len) +{ + void *const output{malloc(len + 1)}; + if (output == nullptr) + throw std::bad_alloc{}; + static_cast(output)[len] = '\0'; + memcpy(static_cast(output), data, len); + return {static_cast(output), std::free}; +} +} // namespace + + +PQXX_COLD pqxx::binarystring::binarystring(field const &F) +{ + unsigned char const *data{ + reinterpret_cast(F.c_str())}; + m_buf = + std::shared_ptr{PQunescapeBytea(data, &m_size), PQfreemem}; + if (m_buf == nullptr) + throw std::bad_alloc{}; +} + + +pqxx::binarystring::binarystring(std::string_view s) : + m_buf{copy_to_buffer(std::data(s), std::size(s))}, m_size{std::size(s)} +{} + + +pqxx::binarystring::binarystring(void const *binary_data, std::size_t len) : + m_buf{copy_to_buffer(binary_data, len)}, m_size{len} +{} + + +bool pqxx::binarystring::operator==(binarystring const &rhs) const noexcept +{ + return (std::size(rhs) == size()) and + (std::memcmp(data(), std::data(rhs), size()) == 0); +} + + +pqxx::binarystring & +pqxx::binarystring::operator=(binarystring const &rhs) = default; + +PQXX_COLD pqxx::binarystring::const_reference +pqxx::binarystring::at(size_type n) const +{ + if (n >= m_size) + { + if (m_size == 0) + throw std::out_of_range{"Accessing empty binarystring"}; + throw std::out_of_range{ + "binarystring index out of range: " + to_string(n) + + " (should be below " + to_string(m_size) + ")"}; + } + return data()[n]; +} + + +PQXX_COLD void pqxx::binarystring::swap(binarystring &rhs) +{ + m_buf.swap(rhs.m_buf); + + // This part very obviously can't go wrong, so do it last + auto const s{m_size}; + m_size = rhs.m_size; + rhs.m_size = s; +} + + +std::string pqxx::binarystring::str() const +{ + return std::string{get(), m_size}; +} diff --git a/ext/libpqxx-7.7.3/src/blob.cxx b/ext/libpqxx-7.7.3/src/blob.cxx new file mode 100644 index 000000000..1492d9107 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/blob.cxx @@ -0,0 +1,337 @@ +#include "pqxx-source.hxx" + +#include +#include +#include + +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/blob.hxx" +#include "pqxx/except.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/gates/connection-largeobject.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace +{ +constexpr int INV_WRITE{0x00020000}, INV_READ{0x00040000}; +} // namespace + + +pqxx::internal::pq::PGconn * +pqxx::blob::raw_conn(pqxx::connection *conn) noexcept +{ + pqxx::internal::gate::connection_largeobject gate{*conn}; + return gate.raw_connection(); +} + + +pqxx::internal::pq::PGconn * +pqxx::blob::raw_conn(pqxx::dbtransaction const &tx) noexcept +{ + return raw_conn(&tx.conn()); +} + + +std::string pqxx::blob::errmsg(connection const *conn) +{ + pqxx::internal::gate::const_connection_largeobject gate{*conn}; + return gate.error_message(); +} + + +pqxx::blob pqxx::blob::open_internal(dbtransaction &tx, oid id, int mode) +{ + auto &conn{tx.conn()}; + int fd{lo_open(raw_conn(&conn), id, mode)}; + if (fd == -1) + throw pqxx::failure{internal::concat( + "Could not open binary large object ", id, ": ", errmsg(&conn))}; + return {conn, fd}; +} + + +pqxx::oid pqxx::blob::create(dbtransaction &tx, oid id) +{ + oid actual_id{lo_create(raw_conn(tx), id)}; + if (actual_id == 0) + throw failure{internal::concat( + "Could not create binary large object: ", errmsg(&tx.conn()))}; + return actual_id; +} + + +void pqxx::blob::remove(dbtransaction &tx, oid id) +{ + if (id == 0) + throw usage_error{"Trying to delete binary large object without an ID."}; + if (lo_unlink(raw_conn(tx), id) == -1) + throw failure{internal::concat( + "Could not delete large object ", id, ": ", errmsg(&tx.conn()))}; +} + + +pqxx::blob pqxx::blob::open_r(dbtransaction &tx, oid id) +{ + return open_internal(tx, id, INV_READ); +} + + +pqxx::blob pqxx::blob::open_w(dbtransaction &tx, oid id) +{ + return open_internal(tx, id, INV_WRITE); +} + + +pqxx::blob pqxx::blob::open_rw(dbtransaction &tx, oid id) +{ + return open_internal(tx, id, INV_READ | INV_WRITE); +} + + +pqxx::blob::blob(blob &&other) : + m_conn{std::exchange(other.m_conn, nullptr)}, + m_fd{std::exchange(other.m_fd, -1)} +{} + + +pqxx::blob &pqxx::blob::operator=(blob &&other) +{ + if (m_fd != -1) + lo_close(raw_conn(m_conn), m_fd); + m_conn = std::exchange(other.m_conn, nullptr); + m_fd = std::exchange(other.m_fd, -1); + return *this; +} + + +pqxx::blob::~blob() +{ + try + { + close(); + } + catch (std::exception const &e) + { + if (m_conn != nullptr) + PQXX_UNLIKELY + m_conn->process_notice(internal::concat( + "Failure while closing binary large object: ", e.what(), "\n")); + } +} + + +void pqxx::blob::close() +{ + if (m_fd != -1) + { + lo_close(raw_conn(m_conn), m_fd); + m_fd = -1; + m_conn = nullptr; + } +} + + +std::size_t pqxx::blob::raw_read(std::byte buf[], std::size_t size) +{ + if (m_conn == nullptr) + throw usage_error{"Attempt to read from a closed binary large object."}; + if (size > chunk_limit) + throw range_error{ + "Reads from a binary large object must be less than 2 GB at once."}; + auto data{reinterpret_cast(buf)}; + int received{lo_read(raw_conn(m_conn), m_fd, data, size)}; + if (received < 0) + throw failure{ + internal::concat("Could not read from binary large object: ", errmsg())}; + return static_cast(received); +} + + +std::size_t +pqxx::blob::read(std::basic_string &buf, std::size_t size) +{ + buf.resize(size); + auto const received{raw_read(std::data(buf), size)}; + buf.resize(received); + return static_cast(received); +} + + +void pqxx::blob::raw_write(std::byte const buf[], std::size_t size) +{ + if (m_conn == nullptr) + throw usage_error{"Attempt to write to a closed binary large object."}; + if (size > chunk_limit) + throw range_error{ + "Writes to a binary large object must be less than 2 GB at once."}; + auto ptr{reinterpret_cast(buf)}; + int written{lo_write(raw_conn(m_conn), m_fd, ptr, size)}; + if (written < 0) + throw failure{ + internal::concat("Write to binary large object failed: ", errmsg())}; +} + + +void pqxx::blob::resize(std::int64_t size) +{ + if (m_conn == nullptr) + throw usage_error{"Attempt to resize a closed binary large object."}; + if (lo_truncate64(raw_conn(m_conn), m_fd, size) < 0) + throw failure{ + internal::concat("Binary large object truncation failed: ", errmsg())}; +} + + +std::int64_t pqxx::blob::tell() const +{ + if (m_conn == nullptr) + throw usage_error{"Attempt to tell() a closed binary large object."}; + std::int64_t offset{lo_tell64(raw_conn(m_conn), m_fd)}; + if (offset < 0) + throw failure{internal::concat( + "Error reading binary large object position: ", errmsg())}; + return offset; +} + + +std::int64_t pqxx::blob::seek(std::int64_t offset, int whence) +{ + if (m_conn == nullptr) + throw usage_error{"Attempt to seek() a closed binary large object."}; + std::int64_t seek_result{lo_lseek64(raw_conn(m_conn), m_fd, offset, whence)}; + if (seek_result < 0) + throw failure{internal::concat( + "Error during seek on binary large object: ", errmsg())}; + return seek_result; +} + + +std::int64_t pqxx::blob::seek_abs(std::int64_t offset) +{ + return this->seek(offset, SEEK_SET); +} + + +std::int64_t pqxx::blob::seek_rel(std::int64_t offset) +{ + return this->seek(offset, SEEK_CUR); +} + + +std::int64_t pqxx::blob::seek_end(std::int64_t offset) +{ + return this->seek(offset, SEEK_END); +} + + +pqxx::oid pqxx::blob::from_buf( + dbtransaction &tx, std::basic_string_view data, oid id) +{ + oid actual_id{create(tx, id)}; + try + { + open_w(tx, actual_id).write(data); + } + catch (std::exception const &) + { + try + { + remove(tx, id); + } + catch (std::exception const &e) + { + try + { + tx.conn().process_notice(internal::concat( + "Could not clean up partially created large object ", id, ": ", + e.what())); + } + catch (std::exception const &) + {} + } + throw; + } + return actual_id; +} + + +void pqxx::blob::append_from_buf( + dbtransaction &tx, std::basic_string_view data, oid id) +{ + if (std::size(data) > chunk_limit) + throw range_error{ + "Writes to a binary large object must be less than 2 GB at once."}; + blob b{open_w(tx, id)}; + b.seek_end(); + b.write(data); +} + + +void pqxx::blob::to_buf( + dbtransaction &tx, oid id, std::basic_string &buf, + std::size_t max_size) +{ + open_r(tx, id).read(buf, max_size); +} + + +std::size_t pqxx::blob::append_to_buf( + dbtransaction &tx, oid id, std::int64_t offset, + std::basic_string &buf, std::size_t append_max) +{ + if (append_max > chunk_limit) + throw range_error{ + "Reads from a binary large object must be less than 2 GB at once."}; + auto b{open_r(tx, id)}; + b.seek_abs(offset); + auto const org_size{std::size(buf)}; + buf.resize(org_size + append_max); + try + { + auto here{reinterpret_cast(std::data(buf) + org_size)}; + auto chunk{static_cast( + lo_read(b.raw_conn(b.m_conn), b.m_fd, here, append_max))}; + buf.resize(org_size + chunk); + return chunk; + } + catch (std::exception const &) + { + buf.resize(org_size); + throw; + } +} + + +pqxx::oid pqxx::blob::from_file(dbtransaction &tx, char const path[]) +{ + auto id{lo_import(raw_conn(tx), path)}; + if (id == 0) + throw failure{internal::concat( + "Could not import '", path, "' as a binary large object: ", errmsg(tx))}; + return id; +} + + +pqxx::oid pqxx::blob::from_file(dbtransaction &tx, char const path[], oid id) +{ + auto actual_id{lo_import_with_oid(raw_conn(tx), path, id)}; + if (actual_id == 0) + throw failure{internal::concat( + "Could not import '", path, "' as binary large object ", id, ": ", + errmsg(tx))}; + return actual_id; +} + + +void pqxx::blob::to_file(dbtransaction &tx, oid id, char const path[]) +{ + if (lo_export(raw_conn(tx), id, path) < 0) + throw failure{internal::concat( + "Could not export binary large object ", id, " to file '", path, + "': ", errmsg(tx))}; +} diff --git a/ext/libpqxx-7.7.3/src/connection.cxx b/ext/libpqxx-7.7.3/src/connection.cxx new file mode 100644 index 000000000..bef8c79aa --- /dev/null +++ b/ext/libpqxx-7.7.3/src/connection.cxx @@ -0,0 +1,1274 @@ +/** Implementation of the pqxx::connection class. + * + * pqxx::connection encapsulates a connection to a database. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// For fcntl(). +#if __has_include() +# include +#endif +#if __has_include() +# include +#endif + +// For ioctlsocket(). +#if defined(_WIN32) && __has_include() +# include +#endif + +extern "C" +{ +#include +} + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/binarystring.hxx" +#include "pqxx/internal/wait.hxx" +#include "pqxx/nontransaction.hxx" +#include "pqxx/notification.hxx" +#include "pqxx/pipeline.hxx" +#include "pqxx/result.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/transaction.hxx" + +#include "pqxx/internal/gates/errorhandler-connection.hxx" +#include "pqxx/internal/gates/result-connection.hxx" +#include "pqxx/internal/gates/result-creation.hxx" + +#include "pqxx/internal/header-post.hxx" + + +extern "C" +{ + // The PQnoticeProcessor that receives an error or warning from libpq and + // sends it to the appropriate connection for processing. + void pqxx_notice_processor(void *conn, char const *msg) noexcept + { + reinterpret_cast(conn)->process_notice(msg); + } + + + // There's no way in libpq to disable a connection's notice processor. So, + // set an inert one to get the same effect. + void inert_notice_processor(void *, char const *) noexcept {} +} // extern "C" + +using namespace std::literals; + +std::string PQXX_COLD +pqxx::encrypt_password(char const user[], char const password[]) +{ + std::unique_ptr> p{ + PQencryptPassword(password, user), PQfreemem}; + return {p.get()}; +} + + +pqxx::connection::connection(connection &&rhs) : + m_conn{rhs.m_conn}, m_unique_id{rhs.m_unique_id} +{ + rhs.check_movable(); + rhs.m_conn = nullptr; +} + + +pqxx::connection::connection( + connection::connect_mode, zview connection_string) : + m_conn{PQconnectStart(connection_string.c_str())} +{ + if (m_conn == nullptr) + throw std::bad_alloc{}; + if (status() == CONNECTION_BAD) + throw pqxx::broken_connection{PQerrorMessage(m_conn)}; +} + + +std::pair pqxx::connection::poll_connect() +{ + switch (PQconnectPoll(m_conn)) + { + case PGRES_POLLING_FAILED: + throw pqxx::broken_connection{PQerrorMessage(m_conn)}; + case PGRES_POLLING_READING: return std::make_pair(true, false); + case PGRES_POLLING_WRITING: return std::make_pair(false, true); + case PGRES_POLLING_OK: + if (not is_open()) + throw pqxx::broken_connection{PQerrorMessage(m_conn)}; + return std::make_pair(false, false); + case PGRES_POLLING_ACTIVE: + throw internal_error{ + "Nonblocking connection poll returned obsolete 'active' state."}; + default: + throw internal_error{ + "Nonblocking connection poll returned unknown value."}; + } +} + +void pqxx::connection::complete_init() +{ + if (m_conn == nullptr) + throw std::bad_alloc{}; + try + { + if (not is_open()) + throw broken_connection{PQerrorMessage(m_conn)}; + + set_up_state(); + } + catch (std::exception const &) + { + PQfinish(m_conn); + m_conn = nullptr; + throw; + } +} + + +void pqxx::connection::init(char const options[]) +{ + m_conn = PQconnectdb(options); + complete_init(); +} + + +void pqxx::connection::init(char const *params[], char const *values[]) +{ + m_conn = PQconnectdbParams(params, values, 0); + complete_init(); +} + + +void pqxx::connection::check_movable() const +{ + if (m_trans) + throw pqxx::usage_error{"Moving a connection with a transaction open."}; + if (not std::empty(m_errorhandlers)) + throw pqxx::usage_error{ + "Moving a connection with error handlers registered."}; + if (not std::empty(m_receivers)) + throw pqxx::usage_error{ + "Moving a connection with notification receivers registered."}; +} + + +void pqxx::connection::check_overwritable() const +{ + if (m_trans) + throw pqxx::usage_error{ + "Moving a connection onto one with a transaction open."}; + if (not std::empty(m_errorhandlers)) + throw pqxx::usage_error{ + "Moving a connection onto one with error handlers registered."}; + if (not std::empty(m_receivers)) + throw usage_error{ + "Moving a connection onto one " + "with notification receivers registered."}; +} + + +pqxx::connection &pqxx::connection::operator=(connection &&rhs) +{ + check_overwritable(); + rhs.check_movable(); + + close(); + + m_conn = std::exchange(rhs.m_conn, nullptr); + m_unique_id = rhs.m_unique_id; + + return *this; +} + + +pqxx::result pqxx::connection::make_result( + internal::pq::PGresult *pgr, std::shared_ptr const &query, + std::string_view desc) +{ + if (pgr == nullptr) + { + if (is_open()) + throw failure(err_msg()); + else + throw broken_connection{"Lost connection to the database server."}; + } + internal::encoding_group enc; + try + { + enc = internal::enc_group(encoding_id()); + } + catch (std::exception const &) + { + // Don't let the PGresult leak. + // TODO: Can we just accept a unique_ptr instead? + internal::clear_result(pgr); + throw; + } + auto const r{pqxx::internal::gate::result_creation::create(pgr, query, enc)}; + pqxx::internal::gate::result_creation{r}.check_status(desc); + return r; +} + + +int PQXX_COLD pqxx::connection::backendpid() const &noexcept +{ + return (m_conn == nullptr) ? 0 : PQbackendPID(m_conn); +} + + +namespace +{ +PQXX_PURE int socket_of(::pqxx::internal::pq::PGconn const *c) noexcept +{ + return (c == nullptr) ? -1 : PQsocket(c); +} +} // namespace + + +int pqxx::connection::sock() const &noexcept +{ + return socket_of(m_conn); +} + + +int PQXX_COLD pqxx::connection::protocol_version() const noexcept +{ + return (m_conn == nullptr) ? 0 : PQprotocolVersion(m_conn); +} + + +int PQXX_COLD pqxx::connection::server_version() const noexcept +{ + return PQserverVersion(m_conn); +} + + +void pqxx::connection::set_variable( + std::string_view var, std::string_view value) & +{ + exec(internal::concat("SET ", quote_name(var), "=", value)); +} + + +std::string pqxx::connection::get_variable(std::string_view var) +{ + return exec(internal::concat("SHOW ", quote_name(var))) + .at(0) + .at(0) + .as(std::string{}); +} + + +std::string pqxx::connection::get_var(std::string_view var) +{ + // (Variables can't be null, so far as I can make out.) + return exec(internal::concat("SHOW "sv, quote_name(var)))[0][0] + .as(); +} + + +/** Set up various parts of logical connection state that may need to be + * recovered because the physical connection to the database was lost and is + * being reset, or that may not have been initialized yet. + */ +void pqxx::connection::set_up_state() +{ + if (auto const proto_ver{protocol_version()}; proto_ver < 3) + { + if (proto_ver == 0) + throw broken_connection{"No connection."}; + else + throw feature_not_supported{ + "Unsupported frontend/backend protocol version; 3.0 is the minimum."}; + } + + if (server_version() <= 90000) + throw feature_not_supported{ + "Unsupported server version; 9.0 is the minimum."}; + + // The default notice processor in libpq writes to stderr. Ours does + // nothing. + // If the caller registers an error handler, this gets replaced with an + // error handler that walks down the connection's chain of handlers. We + // don't do that by default because there's a danger: libpq may call the + // notice processor via a result object, even after the connection has been + // destroyed and the handlers list no longer exists. + PQXX_LIKELY + PQsetNoticeProcessor(m_conn, inert_notice_processor, nullptr); +} + + +bool pqxx::connection::is_open() const noexcept +{ + return status() == CONNECTION_OK; +} + + +void pqxx::connection::process_notice_raw(char const msg[]) noexcept +{ + if ((msg == nullptr) or (*msg == '\0')) + return; + auto const rbegin = std::crbegin(m_errorhandlers), + rend = std::crend(m_errorhandlers); + for (auto i{rbegin}; (i != rend) and (**i)(msg); ++i) + ; +} + + +void pqxx::connection::process_notice(char const msg[]) noexcept +{ + if (msg == nullptr) + return; + zview const view{msg}; + if (std::empty(view)) + return; + else if (msg[std::size(view) - 1] == '\n') + process_notice_raw(msg); + else + // Newline is missing. Let the zview version of the code add it. + PQXX_UNLIKELY + process_notice(view); +} + + +void pqxx::connection::process_notice(zview msg) noexcept +{ + if (std::empty(msg)) + return; + else if (msg[std::size(msg) - 1] == '\n') + process_notice_raw(msg.c_str()); + else + try + { + // Add newline. + std::string buf; + buf.reserve(std::size(msg) + 1); + buf.assign(msg); + buf.push_back('\n'); + process_notice_raw(buf.c_str()); + } + catch (std::exception const &) + { + // If nothing else works, try writing the message without the newline. + PQXX_UNLIKELY + process_notice_raw(msg.c_str()); + } +} + + +void PQXX_COLD pqxx::connection::trace(FILE *out) noexcept +{ + if (m_conn) + { + if (out) + PQtrace(m_conn, out); + else + PQuntrace(m_conn); + } +} + + +void PQXX_COLD pqxx::connection::add_receiver(pqxx::notification_receiver *n) +{ + if (n == nullptr) + throw argument_error{"Null receiver registered"}; + + // Add to receiver list and attempt to start listening. + auto const p{m_receivers.find(n->channel())}; + auto const new_value{receiver_list::value_type{n->channel(), n}}; + + if (p == std::end(m_receivers)) + { + // Not listening on this event yet, start doing so. + auto const lq{std::make_shared( + internal::concat("LISTEN ", quote_name(n->channel())))}; + make_result(PQexec(m_conn, lq->c_str()), lq, *lq); + m_receivers.insert(new_value); + } + else + { + m_receivers.insert(p, new_value); + } +} + + +void PQXX_COLD +pqxx::connection::remove_receiver(pqxx::notification_receiver *T) noexcept +{ + if (T == nullptr) + return; + + try + { + auto needle{ + std::pair{T->channel(), T}}; + auto R{m_receivers.equal_range(needle.first)}; + auto i{find(R.first, R.second, needle)}; + + if (i == R.second) + { + PQXX_UNLIKELY + process_notice(internal::concat( + "Attempt to remove unknown receiver '", needle.first, "'")); + } + else + { + // Erase first; otherwise a notification for the same receiver may yet + // come in and wreak havoc. Thanks Dragan Milenkovic. + bool const gone{R.second == ++R.first}; + m_receivers.erase(i); + if (gone) + exec(internal::concat("UNLISTEN ", quote_name(needle.first)).c_str()); + } + } + catch (std::exception const &e) + { + PQXX_UNLIKELY + process_notice(e.what()); + } +} + + +bool pqxx::connection::consume_input() noexcept +{ + return PQconsumeInput(m_conn) != 0; +} + + +bool pqxx::connection::is_busy() const noexcept +{ + return PQisBusy(m_conn) != 0; +} + + +void PQXX_COLD pqxx::connection::cancel_query() +{ + using pointer = std::unique_ptr>; + pointer cancel{PQgetCancel(m_conn), PQfreeCancel}; + if (cancel == nullptr) + PQXX_UNLIKELY + throw std::bad_alloc{}; + + std::array errbuf; + auto const err{errbuf.data()}; + auto const c{ + PQcancel(cancel.get(), err, static_cast(std::size(errbuf)))}; + if (c == 0) + PQXX_UNLIKELY + throw pqxx::sql_error{std::string{err, std::size(errbuf)}, "[cancel]"}; +} + + +namespace +{ +// C++20: std::span? +/// Get error string for a given @c errno value. +template +char const *PQXX_COLD +error_string(int err_num, std::array &buffer) +{ + // Not entirely clear whether strerror_s will be in std or global namespace. + using namespace std; + +#if defined(PQXX_HAVE_STERROR_S) || defined(PQXX_HAVE_STRERROR_R) +# if defined(PQXX_HAVE_STRERROR_S) + auto const err_result{strerror_s(std::data(buffer), BYTES, err_num)}; +# else + auto const err_result{strerror_r(err_num, std::data(buffer), BYTES)}; +# endif + if constexpr (std::is_same_v, char *>) + { + // GNU version of strerror_r; returns the error string, which may or may + // not reside within buffer. + return err_result; + } + else + { + // Either strerror_s or POSIX strerror_r; returns an error code. + // Sorry for being lazy here: Not reporting error string for the case + // where we can't retrieve an error string. + if (err_result == 0) + return std::data(buffer); + else + return "Compound errors."; + } + +#else + // Fallback case, hopefully for no actual platforms out there. + pqxx::ignore_unused(err_num, buffer); + return "(No error information available.)"; +#endif +} +} // namespace + + +#if defined(_WIN32) || __has_include() +void pqxx::connection::set_blocking(bool block) & +{ + auto const fd{sock()}; +# if defined _WIN32 + unsigned long mode{not block}; + if (::ioctlsocket(fd, FIONBIO, &mode) != 0) + { + std::array errbuf; + char const *err{error_string(WSAGetLastError(), errbuf)}; + throw broken_connection{ + internal::concat("Could not set socket's blocking mode: ", err)}; + } +# else // _WIN32 + std::array errbuf; + auto flags{::fcntl(fd, F_GETFL, 0)}; + if (flags == -1) + { + char const *const err{error_string(errno, errbuf)}; + throw broken_connection{ + internal::concat("Could not get socket state: ", err)}; + } + if (block) + flags |= O_NONBLOCK; + else + flags &= ~O_NONBLOCK; + if (::fcntl(fd, F_SETFL, flags) == -1) + { + char const *const err{error_string(errno, errbuf)}; + throw broken_connection{ + internal::concat("Could not set socket's blocking mode: ", err)}; + } +# endif // _WIN32 +} +#endif // defined(_WIN32) || __has_include() + + +void PQXX_COLD +pqxx::connection::set_verbosity(error_verbosity verbosity) &noexcept +{ + PQsetErrorVerbosity(m_conn, static_cast(verbosity)); +} + + +namespace +{ +/// Unique pointer to PGnotify. +using notify_ptr = std::unique_ptr>; + + +/// Get one notification from a connection, or null. +notify_ptr get_notif(pqxx::internal::pq::PGconn *conn) +{ + return notify_ptr(PQnotifies(conn), PQfreemem); +} +} // namespace + + +int pqxx::connection::get_notifs() +{ + if (not consume_input()) + throw broken_connection{"Connection lost."}; + + // Even if somehow we receive notifications during our transaction, don't + // deliver them. + if (m_trans) + PQXX_UNLIKELY + return 0; + + int notifs = 0; + for (auto N{get_notif(m_conn)}; N.get(); N = get_notif(m_conn)) + { + notifs++; + + auto const Hit{m_receivers.equal_range(std::string{N->relname})}; + if (Hit.second != Hit.first) + { + std::string payload{N->extra}; + for (auto i{Hit.first}; i != Hit.second; ++i) try + { + (*i->second)(payload, N->be_pid); + } + catch (std::exception const &e) + { + try + { + process_notice(internal::concat( + "Exception in notification receiver '", i->first, + "': ", e.what(), "\n")); + } + catch (std::bad_alloc const &) + { + // Out of memory. Try to get the message out in a more robust way. + process_notice( + "Exception in notification receiver, " + "and also ran out of memory\n"); + } + catch (std::exception const &) + { + process_notice( + "Exception in notification receiver " + "(compounded by other error)\n"); + } + } + } + + N.reset(); + } + return notifs; +} + + +char const *PQXX_COLD pqxx::connection::dbname() const +{ + return PQdb(m_conn); +} + + +char const *PQXX_COLD pqxx::connection::username() const +{ + return PQuser(m_conn); +} + + +char const *PQXX_COLD pqxx::connection::hostname() const +{ + return PQhost(m_conn); +} + + +char const *PQXX_COLD pqxx::connection::port() const +{ + return PQport(m_conn); +} + + +char const *pqxx::connection::err_msg() const noexcept +{ + return (m_conn == nullptr) ? "No connection to database" : + PQerrorMessage(m_conn); +} + + +void PQXX_COLD pqxx::connection::register_errorhandler(errorhandler *handler) +{ + // Set notice processor on demand, i.e. only when the caller actually + // registers an error handler. + // We do this just to make it less likely that users fall into the trap + // where a result object may hold a notice processor derived from its parent + // connection which has already been destroyed. Our notice processor goes + // through the connection's list of error handlers. If the connection object + // has already been destroyed though, that list no longer exists. + // By setting the notice processor on demand, we absolve users who never + // register an error handler from ahving to care about this nasty subtlety. + if (std::empty(m_errorhandlers)) + PQsetNoticeProcessor(m_conn, pqxx_notice_processor, this); + m_errorhandlers.push_back(handler); +} + + +void PQXX_COLD +pqxx::connection::unregister_errorhandler(errorhandler *handler) noexcept +{ + // The errorhandler itself will take care of nulling its pointer to this + // connection. + m_errorhandlers.remove(handler); + if (std::empty(m_errorhandlers)) + PQsetNoticeProcessor(m_conn, inert_notice_processor, nullptr); +} + + +std::vector + PQXX_COLD pqxx::connection::get_errorhandlers() const +{ + return {std::begin(m_errorhandlers), std::end(m_errorhandlers)}; +} + + +pqxx::result +pqxx::connection::exec(std::string_view query, std::string_view desc) +{ + return exec(std::make_shared(query), desc); +} + + +pqxx::result pqxx::connection::exec( + std::shared_ptr query, std::string_view desc) +{ + auto const res{make_result(PQexec(m_conn, query->c_str()), query, desc)}; + get_notifs(); + return res; +} + + +std::string pqxx::connection::encrypt_password( + char const user[], char const password[], char const *algorithm) +{ +#if defined(PQXX_HAVE_PQENCRYPTPASSWORDCONN) + { + auto const buf{PQencryptPasswordConn(m_conn, password, user, algorithm)}; + std::unique_ptr> ptr{ + buf, [](char const *x) { PQfreemem(const_cast(x)); }}; + return std::string(ptr.get()); + } +#else + { + // No PQencryptPasswordConn. Fall back on the old PQencryptPassword... + // unless the caller selects a different algorithm. + if (algorithm != nullptr and std::strcmp(algorithm, "md5") != 0) + throw feature_not_supported{ + "Could not encrypt password: available libpq version does not support " + "algorithms other than md5."}; +# include "pqxx/internal/ignore-deprecated-pre.hxx" + return pqxx::encrypt_password(user, password); +# include "pqxx/internal/ignore-deprecated-post.hxx" + } +#endif // PQXX_HAVE_PQENCRYPTPASSWORDCONN +} + + +void pqxx::connection::prepare(char const name[], char const definition[]) & +{ + auto const q{std::make_shared( + pqxx::internal::concat("[PREPARE ", name, "]"))}; + + auto const r{ + make_result(PQprepare(m_conn, name, definition, 0, nullptr), q, *q)}; +} + + +void pqxx::connection::prepare(char const definition[]) & +{ + this->prepare("", definition); +} + + +void pqxx::connection::unprepare(std::string_view name) +{ + exec(internal::concat("DEALLOCATE ", quote_name(name))); +} + + +pqxx::result pqxx::connection::exec_prepared( + std::string_view statement, internal::c_params const &args) +{ + auto const q{std::make_shared(statement)}; + auto const pq_result{PQexecPrepared( + m_conn, q->c_str(), + check_cast(std::size(args.values), "exec_prepared"sv), + args.values.data(), args.lengths.data(), + reinterpret_cast(args.formats.data()), + static_cast(format::text))}; + auto const r{make_result(pq_result, q, statement)}; + get_notifs(); + return r; +} + + +void pqxx::connection::close() +{ + try + { + if (m_trans) + PQXX_UNLIKELY + process_notice(internal::concat( + "Closing connection while ", + internal::describe_object("transaction"sv, m_trans->name()), + " is still open.")); + + if (not std::empty(m_receivers)) + { + PQXX_UNLIKELY + process_notice("Closing connection with outstanding receivers."); + m_receivers.clear(); + } + + std::list old_handlers; + m_errorhandlers.swap(old_handlers); + auto const rbegin{std::crbegin(old_handlers)}, + rend{std::crend(old_handlers)}; + for (auto i{rbegin}; i != rend; ++i) + pqxx::internal::gate::errorhandler_connection{**i}.unregister(); + + PQfinish(m_conn); + m_conn = nullptr; + } + catch (std::exception const &) + { + m_conn = nullptr; + throw; + } +} + + +int pqxx::connection::status() const noexcept +{ + return PQstatus(m_conn); +} + + +namespace +{ +/// Return a name for t, if t is non-null and has a name; or empty string. +std::string_view get_name(pqxx::transaction_base const *t) +{ + return (t == nullptr) ? ""sv : t->name(); +} +} // namespace + + +void pqxx::connection::register_transaction(transaction_base *t) +{ + internal::check_unique_register( + m_trans, "transaction", get_name(m_trans), t, "transaction", get_name(t)); + m_trans = t; +} + + +void pqxx::connection::unregister_transaction(transaction_base *t) noexcept +{ + try + { + internal::check_unique_unregister( + m_trans, "transaction", get_name(m_trans), t, "transaction", + get_name(t)); + } + catch (std::exception const &e) + { + process_notice(e.what()); + } + m_trans = nullptr; +} + + +std::pair>, std::size_t> +pqxx::connection::read_copy_line() +{ + char *buf{nullptr}; + + // Allocate once, re-use across invocations. + static auto const q{std::make_shared("[END COPY]")}; + + auto const line_len{PQgetCopyData(m_conn, &buf, false)}; + switch (line_len) + { + case -2: // Error. + throw failure{ + internal::concat("Reading of table data failed: ", err_msg())}; + + case -1: // End of COPY. + make_result(PQgetResult(m_conn), q, *q); + return {}; + + case 0: // "Come back later." + throw internal_error{"table read inexplicably went asynchronous"}; + + default: // Success, got buffer size. + // Line size includes a trailing zero, which we ignore. + auto const text_len{static_cast(line_len) - 1}; + return std::make_pair( + std::unique_ptr>{buf, PQfreemem}, + text_len); + } +} + + +void pqxx::connection::write_copy_line(std::string_view line) +{ + static std::string const err_prefix{"Error writing to table: "}; + auto const size{check_cast( + internal::ssize(line), "Line in stream_to is too long to process."sv)}; + if (PQputCopyData(m_conn, line.data(), size) <= 0) + PQXX_UNLIKELY + throw failure{err_prefix + err_msg()}; + if (PQputCopyData(m_conn, "\n", 1) <= 0) + PQXX_UNLIKELY + throw failure{err_prefix + err_msg()}; +} + + +void pqxx::connection::end_copy_write() +{ + int res{PQputCopyEnd(m_conn, nullptr)}; + switch (res) + { + case -1: + throw failure{internal::concat("Write to table failed: ", err_msg())}; + case 0: throw internal_error{"table write is inexplicably asynchronous"}; + case 1: + // Normal termination. Retrieve result object. + break; + + default: + throw internal_error{ + internal::concat("unexpected result ", res, " from PQputCopyEnd()")}; + } + + static auto const q{std::make_shared("[END COPY]")}; + make_result(PQgetResult(m_conn), q, *q); +} + + +void pqxx::connection::start_exec(char const query[]) +{ + if (PQsendQuery(m_conn, query) == 0) + PQXX_UNLIKELY + throw failure{err_msg()}; +} + + +pqxx::internal::pq::PGresult *pqxx::connection::get_result() +{ + return PQgetResult(m_conn); +} + + +size_t pqxx::connection::esc_to_buf(std::string_view text, char *buf) const +{ + int err{0}; + auto const copied{ + PQescapeStringConn(m_conn, buf, text.data(), std::size(text), &err)}; + if (err) + PQXX_UNLIKELY + throw argument_error{err_msg()}; + return copied; +} + + +std::string pqxx::connection::esc(std::string_view text) const +{ + std::string buf; + buf.resize(2 * std::size(text) + 1); + auto const copied{esc_to_buf(text, buf.data())}; + buf.resize(copied); + return buf; +} + + +std::string PQXX_COLD +pqxx::connection::esc_raw(unsigned char const bin[], std::size_t len) const +{ + return pqxx::internal::esc_bin(binary_cast(bin, len)); +} + + +std::string +pqxx::connection::esc_raw(std::basic_string_view bin) const +{ + return pqxx::internal::esc_bin(bin); +} + + +std::string PQXX_COLD pqxx::connection::unesc_raw(char const text[]) const +{ + if (text[0] == '\\' and text[1] == 'x') + { + // Hex-escaped format. + std::string buf; + buf.resize(pqxx::internal::size_unesc_bin(std::strlen(text))); + pqxx::internal::unesc_bin( + std::string_view{text}, reinterpret_cast(buf.data())); + return buf; + } + else + { + // Legacy escape format. + // TODO: Remove legacy support. + std::size_t len; + auto bytes{const_cast( + reinterpret_cast(text))}; + std::unique_ptr> const + ptr{PQunescapeBytea(bytes, &len), PQfreemem}; + return std::string{ptr.get(), ptr.get() + len}; + } +} + + +std::string PQXX_COLD +pqxx::connection::quote_raw(unsigned char const bin[], std::size_t len) const +{ + return internal::concat("'", esc_raw(binary_cast(bin, len)), "'::bytea"); +} + + +std::string +pqxx::connection::quote_raw(std::basic_string_view bytes) const +{ + return internal::concat("'", esc_raw(bytes), "'::bytea"); +} + + +std::string PQXX_COLD pqxx::connection::quote(binarystring const &b) const +{ + return quote(b.bytes_view()); +} + + +std::string pqxx::connection::quote(std::basic_string_view b) const +{ + return internal::concat("'", esc_raw(b), "'::bytea"); +} + + +std::string pqxx::connection::quote_name(std::string_view identifier) const +{ + std::unique_ptr> buf{ + PQescapeIdentifier(m_conn, identifier.data(), std::size(identifier)), + PQfreemem}; + if (buf.get() == nullptr) + PQXX_UNLIKELY + throw failure{err_msg()}; + return std::string{buf.get()}; +} + + +std::string pqxx::connection::quote_table(std::string_view table_name) const +{ + return this->quote_name(table_name); +} + + +std::string pqxx::connection::quote_table(table_path path) const +{ + return separated_list( + ".", std::begin(path), std::end(path), + [this](auto name) { return this->quote_name(*name); }); +} + + +std::string +pqxx::connection::esc_like(std::string_view text, char escape_char) const +{ + std::string out; + out.reserve(std::size(text)); + internal::for_glyphs( + internal::enc_group(encoding_id()), + [&out, escape_char](char const *gbegin, char const *gend) { + if ((gend - gbegin == 1) and (*gbegin == '_' or *gbegin == '%')) + // We're not expecting a lot of wildcards in a string. Usually. + PQXX_UNLIKELY + out.push_back(escape_char); + + for (; gbegin != gend; ++gbegin) out.push_back(*gbegin); + }, + text.data(), std::size(text)); + return out; +} + + +int pqxx::connection::await_notification() +{ + int notifs = get_notifs(); + if (notifs == 0) + { + PQXX_LIKELY + internal::wait_fd(socket_of(m_conn), true, false, 10, 0); + notifs = get_notifs(); + } + return notifs; +} + + +int pqxx::connection::await_notification( + std::time_t seconds, long microseconds) +{ + int notifs = get_notifs(); + if (notifs == 0) + { + PQXX_LIKELY + internal::wait_fd( + socket_of(m_conn), true, false, + check_cast(seconds, "Seconds out of range."), + check_cast(microseconds, "Microseconds out of range.")); + return get_notifs(); + } + return notifs; +} + + +std::string pqxx::connection::adorn_name(std::string_view n) +{ + auto const id{to_string(++m_unique_id)}; + if (std::empty(n)) + return pqxx::internal::concat("x", id); + else + return pqxx::internal::concat(n, "_", id); +} + + +std::string pqxx::connection::get_client_encoding() const +{ + return internal::name_encoding(encoding_id()); +} + + +void PQXX_COLD pqxx::connection::set_client_encoding(char const encoding[]) & +{ + switch (auto const retval{PQsetClientEncoding(m_conn, encoding)}; retval) + { + case 0: + // OK. + PQXX_LIKELY + break; + case -1: + PQXX_UNLIKELY + if (is_open()) + throw failure{"Setting client encoding failed."}; + else + throw broken_connection{"Lost connection to the database server."}; + default: + PQXX_UNLIKELY + throw internal_error{internal::concat( + "Unexpected result from PQsetClientEncoding: ", retval)}; + } +} + + +int pqxx::connection::encoding_id() const +{ + int const enc{PQclientEncoding(m_conn)}; + if (enc == -1) + { + // PQclientEncoding does not query the database, but it does check for + // broken connections. And unfortunately, we check the encoding right + // *before* checking a query result for failure. So, we need to handle + // connection failure here and it will apply in lots of places. + // TODO: Make pqxx::result::result(...) do all the checking. + PQXX_UNLIKELY + if (is_open()) + throw failure{"Could not obtain client encoding."}; + else + throw broken_connection{"Lost connection to the database server."}; + } + PQXX_LIKELY + return enc; +} + + +pqxx::result pqxx::connection::exec_params( + std::string_view query, internal::c_params const &args) +{ + auto const q{std::make_shared(query)}; + auto const pq_result{PQexecParams( + m_conn, q->c_str(), + check_cast(std::size(args.values), "exec_params"sv), nullptr, + args.values.data(), args.lengths.data(), + reinterpret_cast(args.formats.data()), + static_cast(format::text))}; + auto const r{make_result(pq_result, q)}; + get_notifs(); + return r; +} + + +namespace +{ +/// Get the prevailing default value for a connection parameter. +char const *get_default(PQconninfoOption const &opt) noexcept +{ + if (opt.envvar == nullptr) + { + // There's no environment variable for this setting. The only default is + // the one that was compiled in. + return opt.compiled; + } + // As of C++11, std::getenv() uses thread-local storage, so it should be + // thread-safe. MSVC still warns about it though. +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4996) +#endif + char const *var{std::getenv(opt.envvar)}; +#if defined(_MSC_VER) +# pragma warning(pop) +#endif + if (var == nullptr) + { + // There's an environment variable for this setting, but it's not set. + return opt.compiled; + } + + // The environment variable is the prevailing default. + return var; +} +} // namespace + + +std::string pqxx::connection::connection_string() const +{ + if (m_conn == nullptr) + PQXX_UNLIKELY + throw usage_error{"Can't get connection string: connection is not open."}; + + std::unique_ptr< + PQconninfoOption, std::function> const params{ + PQconninfo(m_conn), PQconninfoFree}; + if (params.get() == nullptr) + PQXX_UNLIKELY + throw std::bad_alloc{}; + + std::string buf; + for (std::size_t i{0}; params.get()[i].keyword != nullptr; ++i) + { + auto const param{params.get()[i]}; + if (param.val != nullptr) + { + auto const default_val{get_default(param)}; + if ( + (default_val == nullptr) or (std::strcmp(param.val, default_val) != 0)) + { + if (not std::empty(buf)) + buf.push_back(' '); + buf += param.keyword; + buf.push_back('='); + buf += param.val; + } + } + } + return buf; +} + + +#if defined(_WIN32) || __has_include() +pqxx::connecting::connecting(zview connection_string) : + m_conn{connection::connect_nonblocking, connection_string} +{} +#endif // defined(_WIN32) || __has_include( + + +#if defined(_WIN32) || __has_include() +void pqxx::connecting::process() & +{ + auto const [reading, writing]{m_conn.poll_connect()}; + m_reading = reading; + m_writing = writing; +} +#endif // defined(_WIN32) || __has_include( + + +#if defined(_WIN32) || __has_include() +pqxx::connection pqxx::connecting::produce() && +{ + if (!done()) + throw usage_error{ + "Tried to produce a nonblocking connection before it was done."}; + m_conn.complete_init(); + return std::move(m_conn); +} +#endif // defined(_WIN32) || __has_include( diff --git a/ext/libpqxx-7.7.3/src/cursor.cxx b/ext/libpqxx-7.7.3/src/cursor.cxx new file mode 100644 index 000000000..28e8ba42f --- /dev/null +++ b/ext/libpqxx-7.7.3/src/cursor.cxx @@ -0,0 +1,339 @@ +/** Implementation of libpqxx STL-style cursor classes. + * + * These classes wrap SQL cursors in STL-like interfaces. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/cursor.hxx" +#include "pqxx/internal/gates/icursor_iterator-icursorstream.hxx" +#include "pqxx/internal/gates/icursorstream-icursor_iterator.hxx" +#include "pqxx/result.hxx" +#include "pqxx/strconv.hxx" +#include "pqxx/transaction.hxx" + +#include "pqxx/internal/header-post.hxx" + + +pqxx::cursor_base::difference_type pqxx::cursor_base::all() noexcept +{ + // Implemented out-of-line so we don't fall afoul of Visual Studio defining + // min() and max() macros, which turn this expression into malformed code: + return std::numeric_limits::max() - 1; +} + + +pqxx::cursor_base::difference_type pqxx::cursor_base::backward_all() noexcept +{ + // Implemented out-of-line so we don't fall afoul of Visual Studio defining + // min() and max() macros, which turn this expression into malformed code: + return std::numeric_limits::min() + 1; +} + + +pqxx::cursor_base::cursor_base( + connection &context, std::string_view Name, bool embellish_name) : + m_name{embellish_name ? context.adorn_name(Name) : Name} +{} + + +pqxx::result::size_type +pqxx::internal::obtain_stateless_cursor_size(sql_cursor &cur) +{ + if (cur.endpos() == -1) + cur.move(cursor_base::all()); + return result::size_type(cur.endpos() - 1); +} + + +pqxx::result pqxx::internal::stateless_cursor_retrieve( + sql_cursor &cur, result::difference_type size, + result::difference_type begin_pos, result::difference_type end_pos) +{ + if (begin_pos < 0 or begin_pos > size) + throw range_error{"Starting position out of range"}; + + if (end_pos < -1) + end_pos = -1; + else if (end_pos > size) + end_pos = size; + + if (begin_pos == end_pos) + return cur.empty_result(); + + int const direction{((begin_pos < end_pos) ? 1 : -1)}; + cur.move((begin_pos - direction) - (cur.pos() - 1)); + return cur.fetch(end_pos - begin_pos); +} + + +pqxx::icursorstream::icursorstream( + transaction_base &context, std::string_view query, std::string_view basename, + difference_type sstride) : + m_cur{ + context, + query, + basename, + cursor_base::forward_only, + cursor_base::read_only, + cursor_base::owned, + false}, + m_stride{sstride}, + m_realpos{0}, + m_reqpos{0}, + m_iterators{nullptr}, + m_done{false} +{ + set_stride(sstride); +} + + +pqxx::icursorstream::icursorstream( + transaction_base &context, field const &cname, difference_type sstride, + cursor_base::ownership_policy op) : + m_cur{context, cname.c_str(), op}, + m_stride{sstride}, + m_realpos{0}, + m_reqpos{0}, + m_iterators{nullptr}, + m_done{false} +{ + set_stride(sstride); +} + + +void pqxx::icursorstream::set_stride(difference_type stride) & +{ + if (stride < 1) + throw argument_error{ + internal::concat("Attempt to set cursor stride to ", stride)}; + m_stride = stride; +} + + +pqxx::result pqxx::icursorstream::fetchblock() +{ + result const r{m_cur.fetch(m_stride)}; + m_realpos += std::size(r); + if (std::empty(r)) + m_done = true; + return r; +} + + +pqxx::icursorstream &pqxx::icursorstream::ignore(std::streamsize n) & +{ + auto offset{m_cur.move(difference_type(n))}; + m_realpos += offset; + if (offset < n) + m_done = true; + return *this; +} + + +pqxx::icursorstream::size_type pqxx::icursorstream::forward(size_type n) +{ + m_reqpos += difference_type(n) * m_stride; + return icursorstream::size_type(m_reqpos); +} + + +void pqxx::icursorstream::insert_iterator(icursor_iterator *i) noexcept +{ + pqxx::internal::gate::icursor_iterator_icursorstream{*i}.set_next( + m_iterators); + if (m_iterators != nullptr) + pqxx::internal::gate::icursor_iterator_icursorstream{*m_iterators} + .set_prev(i); + m_iterators = i; +} + + +void pqxx::icursorstream::remove_iterator(icursor_iterator *i) const noexcept +{ + pqxx::internal::gate::icursor_iterator_icursorstream igate{*i}; + if (i == m_iterators) + { + m_iterators = igate.get_next(); + if (m_iterators != nullptr) + pqxx::internal::gate::icursor_iterator_icursorstream{*m_iterators} + .set_prev(nullptr); + } + else + { + auto prev{igate.get_prev()}, next{igate.get_next()}; + pqxx::internal::gate::icursor_iterator_icursorstream{*prev}.set_next(next); + if (next != nullptr) + pqxx::internal::gate::icursor_iterator_icursorstream{*next}.set_prev( + prev); + } + igate.set_prev(nullptr); + igate.set_next(nullptr); +} + + +void pqxx::icursorstream::service_iterators(difference_type topos) +{ + if (topos < m_realpos) + return; + + using todolist = std::multimap; + todolist todo; + for (icursor_iterator *i{m_iterators}, *next; i != nullptr; i = next) + { + pqxx::internal::gate::icursor_iterator_icursorstream gate{*i}; + auto const ipos{gate.pos()}; + if (ipos >= m_realpos and ipos <= topos) + todo.insert(todolist::value_type(ipos, i)); + next = gate.get_next(); + } + auto const todo_end = std::end(todo); + for (auto i{std::begin(todo)}; i != todo_end;) + { + auto const readpos{i->first}; + if (readpos > m_realpos) + ignore(readpos - m_realpos); + result const r{fetchblock()}; + for (; i != todo_end and i->first == readpos; ++i) + pqxx::internal::gate::icursor_iterator_icursorstream{*i->second}.fill(r); + } +} + + +pqxx::icursor_iterator::icursor_iterator() noexcept : m_pos{0} {} + + +pqxx::icursor_iterator::icursor_iterator(istream_type &s) noexcept : + m_stream{&s}, + m_pos{difference_type( + pqxx::internal::gate::icursorstream_icursor_iterator(s).forward(0))} +{ + pqxx::internal::gate::icursorstream_icursor_iterator{*m_stream} + .insert_iterator(this); +} + + +pqxx::icursor_iterator::icursor_iterator(icursor_iterator const &rhs) noexcept + : + m_stream{rhs.m_stream}, m_here{rhs.m_here}, m_pos{rhs.m_pos} +{ + if (m_stream != nullptr) + pqxx::internal::gate::icursorstream_icursor_iterator{*m_stream} + .insert_iterator(this); +} + + +pqxx::icursor_iterator::~icursor_iterator() noexcept +{ + if (m_stream != nullptr) + pqxx::internal::gate::icursorstream_icursor_iterator{*m_stream} + .remove_iterator(this); +} + + +pqxx::icursor_iterator pqxx::icursor_iterator::operator++(int) +{ + icursor_iterator old{*this}; + m_pos = difference_type( + pqxx::internal::gate::icursorstream_icursor_iterator{*m_stream}.forward()); + m_here.clear(); + return old; +} + + +pqxx::icursor_iterator &pqxx::icursor_iterator::operator++() +{ + m_pos = difference_type( + pqxx::internal::gate::icursorstream_icursor_iterator{*m_stream}.forward()); + m_here.clear(); + return *this; +} + + +pqxx::icursor_iterator &pqxx::icursor_iterator::operator+=(difference_type n) +{ + if (n <= 0) + { + PQXX_UNLIKELY + if (n == 0) + return *this; + throw argument_error{"Advancing icursor_iterator by negative offset."}; + } + PQXX_LIKELY + m_pos = difference_type( + pqxx::internal::gate::icursorstream_icursor_iterator{*m_stream}.forward( + icursorstream::size_type(n))); + m_here.clear(); + return *this; +} + + +pqxx::icursor_iterator & +pqxx::icursor_iterator::operator=(icursor_iterator const &rhs) noexcept +{ + if (rhs.m_stream == m_stream) + { + PQXX_UNLIKELY + m_here = rhs.m_here; + m_pos = rhs.m_pos; + } + else + { + PQXX_LIKELY + if (m_stream != nullptr) + pqxx::internal::gate::icursorstream_icursor_iterator{*m_stream} + .remove_iterator(this); + m_here = rhs.m_here; + m_pos = rhs.m_pos; + m_stream = rhs.m_stream; + if (m_stream != nullptr) + pqxx::internal::gate::icursorstream_icursor_iterator{*m_stream} + .insert_iterator(this); + } + return *this; +} + + +bool pqxx::icursor_iterator::operator==(icursor_iterator const &rhs) const +{ + if (m_stream == rhs.m_stream) + return pos() == rhs.pos(); + if (m_stream != nullptr and rhs.m_stream != nullptr) + return false; + refresh(); + rhs.refresh(); + return std::empty(m_here) and std::empty(rhs.m_here); +} + + +bool pqxx::icursor_iterator::operator<(icursor_iterator const &rhs) const +{ + if (m_stream == rhs.m_stream) + return pos() < rhs.pos(); + refresh(); + rhs.refresh(); + return not std::empty(m_here); +} + + +void pqxx::icursor_iterator::refresh() const +{ + if (m_stream != nullptr) + pqxx::internal::gate::icursorstream_icursor_iterator{*m_stream} + .service_iterators(pos()); +} + + +void pqxx::icursor_iterator::fill(result const &r) +{ + m_here = r; +} diff --git a/ext/libpqxx-7.7.3/src/encodings.cxx b/ext/libpqxx-7.7.3/src/encodings.cxx new file mode 100644 index 000000000..99f038724 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/encodings.cxx @@ -0,0 +1,839 @@ +/** Implementation of string encodings support + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include +#include +#include + +extern "C" +{ +#include +} + + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/except.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/encodings.hxx" +#include "pqxx/strconv.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace pqxx +{ +PQXX_DECLARE_ENUM_CONVERSION(pqxx::internal::encoding_group); +} + + +// Internal helper functions +namespace +{ +/// Extract byte from buffer, return as unsigned char. +constexpr PQXX_PURE unsigned char +get_byte(char const buffer[], std::size_t offset) noexcept +{ + return static_cast(buffer[offset]); +} + + +[[noreturn]] void throw_for_encoding_error( + char const *encoding_name, char const buffer[], std::size_t start, + std::size_t count) +{ + std::stringstream s; + s << "Invalid byte sequence for encoding " << encoding_name << " at byte " + << start << ": " << std::hex << std::setw(2) << std::setfill('0'); + for (std::size_t i{0}; i < count; ++i) + { + s << "0x" << static_cast(get_byte(buffer, start + i)); + if (i + 1 < count) + s << " "; + } + throw pqxx::argument_error{s.str()}; +} + + +/// Does value lie between bottom and top, inclusive? +constexpr PQXX_PURE bool +between_inc(unsigned char value, unsigned bottom, unsigned top) +{ + return value >= bottom and value <= top; +} + + +/* +EUC-JP and EUC-JIS-2004 represent slightly different code points but iterate +the same: + * https://en.wikipedia.org/wiki/Extended_Unix_Code#EUC-JP + * http://x0213.org/codetable/index.en.html +*/ +PQXX_PURE std::size_t next_seq_for_euc_jplike( + char const buffer[], std::size_t buffer_len, std::size_t start, + char const encoding_name[]) +{ + if (start >= buffer_len) + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80) + return start + 1; + + if (start + 2 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error(encoding_name, buffer, start, 1); + + auto const byte2{get_byte(buffer, start + 1)}; + if (byte1 == 0x8e) + { + if (not between_inc(byte2, 0xa1, 0xfe)) + PQXX_UNLIKELY + throw_for_encoding_error(encoding_name, buffer, start, 2); + + return start + 2; + } + + if (between_inc(byte1, 0xa1, 0xfe)) + { + if (not between_inc(byte2, 0xa1, 0xfe)) + PQXX_UNLIKELY + throw_for_encoding_error(encoding_name, buffer, start, 2); + + return start + 2; + } + + if (byte1 == 0x8f and start + 3 <= buffer_len) + { + auto const byte3{get_byte(buffer, start + 2)}; + if ( + not between_inc(byte2, 0xa1, 0xfe) or not between_inc(byte3, 0xa1, 0xfe)) + PQXX_UNLIKELY + throw_for_encoding_error(encoding_name, buffer, start, 3); + + return start + 3; + } + + throw_for_encoding_error(encoding_name, buffer, start, 1); +} + +/* +As far as I can tell, for the purposes of iterating the only difference between +SJIS and SJIS-2004 is increased range in the first byte of two-byte sequences +(0xEF increased to 0xFC). Officially, that is; apparently the version of SJIS +used by Postgres has the same range as SJIS-2004. They both have increased +range over the documented versions, not having the even/odd restriction for the +first byte in 2-byte sequences. +*/ +// https://en.wikipedia.org/wiki/Shift_JIS#Shift_JIS_byte_map +// http://x0213.org/codetable/index.en.html +PQXX_PURE std::size_t next_seq_for_sjislike( + char const buffer[], std::size_t buffer_len, std::size_t start, + char const *encoding_name) +{ + if (start >= buffer_len) + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80 or between_inc(byte1, 0xa1, 0xdf)) + return start + 1; + + if ( + not between_inc(byte1, 0x81, 0x9f) and not between_inc(byte1, 0xe0, 0xfc)) + PQXX_UNLIKELY + throw_for_encoding_error(encoding_name, buffer, start, 1); + + if (start + 2 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error(encoding_name, buffer, start, buffer_len - start); + + auto const byte2{get_byte(buffer, start + 1)}; + if (byte2 == 0x7f) + PQXX_UNLIKELY + throw_for_encoding_error(encoding_name, buffer, start, 2); + + if (between_inc(byte2, 0x40, 0x9e) or between_inc(byte2, 0x9f, 0xfc)) + return start + 2; + + PQXX_UNLIKELY + throw_for_encoding_error(encoding_name, buffer, start, 2); +} +} // namespace + + +// Implement template specializations first. +namespace pqxx::internal +{ +template struct glyph_scanner +{ + PQXX_PURE static std::size_t + call(char const buffer[], std::size_t buffer_len, std::size_t start); +}; + +template<> struct glyph_scanner +{ + static PQXX_PURE constexpr std::size_t + call(char const /* buffer */[], std::size_t buffer_len, std::size_t start) + { + if (start >= buffer_len) + return std::string::npos; + else + return start + 1; + } +}; + +// https://en.wikipedia.org/wiki/Big5#Organization +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + if (start >= buffer_len) + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80) + return start + 1; + + if (not between_inc(byte1, 0x81, 0xfe) or (start + 2 > buffer_len)) + PQXX_UNLIKELY + throw_for_encoding_error("BIG5", buffer, start, 1); + + auto const byte2{get_byte(buffer, start + 1)}; + if ( + not between_inc(byte2, 0x40, 0x7e) and not between_inc(byte2, 0xa1, 0xfe)) + PQXX_UNLIKELY + throw_for_encoding_error("BIG5", buffer, start, 2); + + return start + 2; +} + +/* +The PostgreSQL documentation claims that the EUC_* encodings are 1-3 bytes +each, but other documents explain that the EUC sets can contain 1-(2,3,4) bytes +depending on the specific extension: + EUC_CN : 1-2 + EUC_JP : 1-3 + EUC_JIS_2004: 1-2 + EUC_KR : 1-2 + EUC_TW : 1-4 +*/ + +// https://en.wikipedia.org/wiki/GB_2312#EUC-CN +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + if (start >= buffer_len) + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80) + return start + 1; + + if (not between_inc(byte1, 0xa1, 0xf7) or start + 2 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("EUC_CN", buffer, start, 1); + + auto const byte2{get_byte(buffer, start + 1)}; + if (not between_inc(byte2, 0xa1, 0xfe)) + PQXX_UNLIKELY + throw_for_encoding_error("EUC_CN", buffer, start, 2); + + return start + 2; +} + + +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + return next_seq_for_euc_jplike(buffer, buffer_len, start, "EUC_JP"); +} + + +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + return next_seq_for_euc_jplike(buffer, buffer_len, start, "EUC_JIS_2004"); +} + + +// https://en.wikipedia.org/wiki/Extended_Unix_Code#EUC-KR +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + if (start >= buffer_len) + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80) + return start + 1; + + if (not between_inc(byte1, 0xa1, 0xfe) or start + 2 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("EUC_KR", buffer, start, 1); + + auto const byte2{get_byte(buffer, start + 1)}; + if (not between_inc(byte2, 0xa1, 0xfe)) + PQXX_UNLIKELY + throw_for_encoding_error("EUC_KR", buffer, start, 1); + + return start + 2; +} + +// https://en.wikipedia.org/wiki/Extended_Unix_Code#EUC-TW +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + if (start >= buffer_len) + PQXX_UNLIKELY + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80) + return start + 1; + + if (start + 2 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("EUC_KR", buffer, start, 1); + + auto const byte2{get_byte(buffer, start + 1)}; + if (between_inc(byte1, 0xa1, 0xfe)) + { + if (not between_inc(byte2, 0xa1, 0xfe)) + PQXX_UNLIKELY + throw_for_encoding_error("EUC_KR", buffer, start, 2); + + return start + 2; + } + + if (byte1 != 0x8e or start + 4 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("EUC_KR", buffer, start, 1); + + if ( + between_inc(byte2, 0xa1, 0xb0) and + between_inc(get_byte(buffer, start + 2), 0xa1, 0xfe) and + between_inc(get_byte(buffer, start + 3), 0xa1, 0xfe)) + return start + 4; + + PQXX_UNLIKELY + throw_for_encoding_error("EUC_KR", buffer, start, 4); +} + +// https://en.wikipedia.org/wiki/GB_18030#Mapping +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + if (start >= buffer_len) + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80) + return start + 1; + if (byte1 == 0x80) + throw_for_encoding_error("GB18030", buffer, start, buffer_len - start); + + if (start + 2 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("GB18030", buffer, start, buffer_len - start); + + auto const byte2{get_byte(buffer, start + 1)}; + if (between_inc(byte2, 0x40, 0xfe)) + { + if (byte2 == 0x7f) + PQXX_UNLIKELY + throw_for_encoding_error("GB18030", buffer, start, 2); + + return start + 2; + } + + if (start + 4 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("GB18030", buffer, start, buffer_len - start); + + if ( + between_inc(byte2, 0x30, 0x39) and + between_inc(get_byte(buffer, start + 2), 0x81, 0xfe) and + between_inc(get_byte(buffer, start + 3), 0x30, 0x39)) + return start + 4; + + PQXX_UNLIKELY + throw_for_encoding_error("GB18030", buffer, start, 4); +} + +// https://en.wikipedia.org/wiki/GBK_(character_encoding)#Encoding +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + if (start >= buffer_len) + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80) + return start + 1; + + if (start + 2 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("GBK", buffer, start, 1); + + auto const byte2{get_byte(buffer, start + 1)}; + if ( + (between_inc(byte1, 0xa1, 0xa9) and between_inc(byte2, 0xa1, 0xfe)) or + (between_inc(byte1, 0xb0, 0xf7) and between_inc(byte2, 0xa1, 0xfe)) or + (between_inc(byte1, 0x81, 0xa0) and between_inc(byte2, 0x40, 0xfe) and + byte2 != 0x7f) or + (between_inc(byte1, 0xaa, 0xfe) and between_inc(byte2, 0x40, 0xa0) and + byte2 != 0x7f) or + (between_inc(byte1, 0xa8, 0xa9) and between_inc(byte2, 0x40, 0xa0) and + byte2 != 0x7f) or + (between_inc(byte1, 0xaa, 0xaf) and between_inc(byte2, 0xa1, 0xfe)) or + (between_inc(byte1, 0xf8, 0xfe) and between_inc(byte2, 0xa1, 0xfe)) or + (between_inc(byte1, 0xa1, 0xa7) and between_inc(byte2, 0x40, 0xa0) and + byte2 != 0x7f)) + return start + 2; + + PQXX_UNLIKELY + throw_for_encoding_error("GBK", buffer, start, 2); +} + +/* +The PostgreSQL documentation claims that the JOHAB encoding is 1-3 bytes, but +"CJKV Information Processing" describes it (actually just the Hangul portion) +as "three five-bit segments" that reside inside 16 bits (2 bytes). + +CJKV Information Processing by Ken Lunde, pg. 269: + + https://bit.ly/2BEOu5V +*/ +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + if (start >= buffer_len) + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80) + return start + 1; + + if (start + 2 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("JOHAB", buffer, start, 1); + + auto const byte2{get_byte(buffer, start)}; + if ( + (between_inc(byte1, 0x84, 0xd3) and + (between_inc(byte2, 0x41, 0x7e) or between_inc(byte2, 0x81, 0xfe))) or + ((between_inc(byte1, 0xd8, 0xde) or between_inc(byte1, 0xe0, 0xf9)) and + (between_inc(byte2, 0x31, 0x7e) or between_inc(byte2, 0x91, 0xfe)))) + return start + 2; + + PQXX_UNLIKELY + throw_for_encoding_error("JOHAB", buffer, start, 2); +} + +/* +PostgreSQL's MULE_INTERNAL is the emacs rather than Xemacs implementation; +see the server/mb/pg_wchar.h PostgreSQL header file. +This is implemented according to the description in said header file, but I was +unable to get it to successfully iterate a MULE-encoded test CSV generated +using PostgreSQL 9.2.23. Use this at your own risk. +*/ +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + if (start >= buffer_len) + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80) + return start + 1; + + if (start + 2 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("MULE_INTERNAL", buffer, start, 1); + + auto const byte2{get_byte(buffer, start + 1)}; + if (between_inc(byte1, 0x81, 0x8d) and byte2 >= 0xa0) + return start + 2; + + if (start + 3 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("MULE_INTERNAL", buffer, start, 2); + + if ( + ((byte1 == 0x9a and between_inc(byte2, 0xa0, 0xdf)) or + (byte1 == 0x9b and between_inc(byte2, 0xe0, 0xef)) or + (between_inc(byte1, 0x90, 0x99) and byte2 >= 0xa0)) and + (byte2 >= 0xa0)) + return start + 3; + + if (start + 4 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("MULE_INTERNAL", buffer, start, 3); + + if ( + ((byte1 == 0x9c and between_inc(byte2, 0xf0, 0xf4)) or + (byte1 == 0x9d and between_inc(byte2, 0xf5, 0xfe))) and + get_byte(buffer, start + 2) >= 0xa0 and + get_byte(buffer, start + 4) >= 0xa0) + return start + 4; + + PQXX_UNLIKELY + throw_for_encoding_error("MULE_INTERNAL", buffer, start, 4); +} + +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + return next_seq_for_sjislike(buffer, buffer_len, start, "SJIS"); +} + +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + return next_seq_for_sjislike(buffer, buffer_len, start, "SHIFT_JIS_2004"); +} + +// https://en.wikipedia.org/wiki/Unified_Hangul_Code +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + if (start >= buffer_len) + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80) + return start + 1; + + if (start + 2 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("UHC", buffer, start, buffer_len - start); + + auto const byte2{get_byte(buffer, start + 1)}; + if (between_inc(byte1, 0x80, 0xc6)) + { + if ( + between_inc(byte2, 0x41, 0x5a) or between_inc(byte2, 0x61, 0x7a) or + between_inc(byte2, 0x80, 0xfe)) + return start + 2; + + PQXX_UNLIKELY + throw_for_encoding_error("UHC", buffer, start, 2); + } + + if (between_inc(byte1, 0xa1, 0xfe)) + { + if (not between_inc(byte2, 0xa1, 0xfe)) + PQXX_UNLIKELY + throw_for_encoding_error("UHC", buffer, start, 2); + + return start + 2; + } + + throw_for_encoding_error("UHC", buffer, start, 1); +} + +// https://en.wikipedia.org/wiki/UTF-8#Description +template<> +PQXX_PURE std::size_t glyph_scanner::call( + char const buffer[], std::size_t buffer_len, std::size_t start) +{ + if (start >= buffer_len) + return std::string::npos; + + auto const byte1{get_byte(buffer, start)}; + if (byte1 < 0x80) + return start + 1; + + if (start + 2 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("UTF8", buffer, start, buffer_len - start); + + auto const byte2{get_byte(buffer, start + 1)}; + if (between_inc(byte1, 0xc0, 0xdf)) + { + if (not between_inc(byte2, 0x80, 0xbf)) + PQXX_UNLIKELY + throw_for_encoding_error("UTF8", buffer, start, 2); + + return start + 2; + } + + if (start + 3 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("UTF8", buffer, start, buffer_len - start); + + auto const byte3{get_byte(buffer, start + 2)}; + if (between_inc(byte1, 0xe0, 0xef)) + { + if (between_inc(byte2, 0x80, 0xbf) and between_inc(byte3, 0x80, 0xbf)) + return start + 3; + + PQXX_UNLIKELY + throw_for_encoding_error("UTF8", buffer, start, 3); + } + + if (start + 4 > buffer_len) + PQXX_UNLIKELY + throw_for_encoding_error("UTF8", buffer, start, buffer_len - start); + + if (between_inc(byte1, 0xf0, 0xf7)) + { + if ( + between_inc(byte2, 0x80, 0xbf) and between_inc(byte3, 0x80, 0xbf) and + between_inc(get_byte(buffer, start + 3), 0x80, 0xbf)) + return start + 4; + + PQXX_UNLIKELY + throw_for_encoding_error("UTF8", buffer, start, 4); + } + + PQXX_UNLIKELY + throw_for_encoding_error("UTF8", buffer, start, 1); +} + + +PQXX_PURE char const *name_encoding(int encoding_id) +{ + return pg_encoding_to_char(encoding_id); +} + + +encoding_group enc_group(int libpq_enc_id) +{ + return enc_group(name_encoding(libpq_enc_id)); +} + + +encoding_group enc_group(std::string_view encoding_name) +{ + struct mapping + { + std::string_view const name; + encoding_group const group; + constexpr mapping(std::string_view n, encoding_group g) : name{n}, group{g} + {} + constexpr bool operator<(mapping const &rhs) const + { + return name < rhs.name; + } + }; + + // C++20: Once compilers are ready, go full constexpr, leave to the compiler. + auto const sz{std::size(encoding_name)}; + if (sz > 0u) + switch (encoding_name[0]) + { + case 'B': + if (encoding_name == "BIG5"sv) + return encoding_group::BIG5; + PQXX_UNLIKELY + break; + case 'E': + // C++20: Use string_view::starts_with(). + if ((sz >= 6u) and (encoding_name.substr(0, 4) == "EUC_"sv)) + { + auto const subtype{encoding_name.substr(4)}; + static constexpr std::array subtypes{ + mapping{"CN"sv, encoding_group::EUC_CN}, + mapping{"JIS_2004"sv, encoding_group::EUC_JIS_2004}, + mapping{"JP"sv, encoding_group::EUC_JP}, + mapping{"KR"sv, encoding_group::EUC_KR}, + mapping{"TW"sv, encoding_group::EUC_TW}, + }; + for (auto const &m : subtypes) + if (m.name == subtype) + return m.group; + } + PQXX_UNLIKELY + break; + case 'G': + if (encoding_name == "GB18030"sv) + return encoding_group::GB18030; + else if (encoding_name == "GBK"sv) + return encoding_group::GBK; + PQXX_UNLIKELY + break; + case 'I': + // We know iso-8859-X, where 5 <= X < 9. They're all monobyte encodings. + if ((sz == 10) and (encoding_name.substr(0, 9) == "ISO_8859_"sv)) + { + char const subtype{encoding_name[9]}; + if (('5' <= subtype) and (subtype < '9')) + return encoding_group::MONOBYTE; + } + PQXX_UNLIKELY + break; + case 'J': + if (encoding_name == "JOHAB"sv) + return encoding_group::JOHAB; + PQXX_UNLIKELY + break; + case 'K': + if ((encoding_name == "KOI8R"sv) or (encoding_name == "KOI8U"sv)) + return encoding_group::MONOBYTE; + PQXX_UNLIKELY + break; + case 'L': + // We know LATIN1 through LATIN10. + if (encoding_name.substr(0, 5) == "LATIN"sv) + { + auto const subtype{encoding_name.substr(5)}; + if (subtype.size() == 1) + { + char const n{subtype[0]}; + if (('1' <= n) and (n <= '9')) + return encoding_group::MONOBYTE; + } + else if (subtype == "10"sv) + { + return encoding_group::MONOBYTE; + } + } + PQXX_UNLIKELY + break; + case 'M': + if (encoding_name == "MULE_INTERNAL"sv) + return encoding_group::MULE_INTERNAL; + PQXX_UNLIKELY + break; + case 'S': + if (encoding_name == "SHIFT_JIS_2004"sv) + return encoding_group::SHIFT_JIS_2004; + else if (encoding_name == "SJIS"sv) + return encoding_group::SJIS; + else if (encoding_name == "SQL_ASCII"sv) + return encoding_group::MONOBYTE; + PQXX_UNLIKELY + break; + case 'U': + if (encoding_name == "UHC"sv) + return encoding_group::UHC; + else if (encoding_name == "UTF8"sv) + return encoding_group::UTF8; + PQXX_UNLIKELY + break; + case 'W': + if (encoding_name.substr(0, 3) == "WIN"sv) + { + auto const subtype{encoding_name.substr(3)}; + static constexpr std::array subtypes{ + "866"sv, "874"sv, "1250"sv, "1251"sv, "1252"sv, "1253"sv, + "1254"sv, "1255"sv, "1256"sv, "1257"sv, "1258"sv, + }; + for (auto const n : subtypes) + if (n == subtype) + return encoding_group::MONOBYTE; + } + PQXX_UNLIKELY + break; + default: PQXX_UNLIKELY break; + } + PQXX_UNLIKELY + throw std::invalid_argument{ + internal::concat("Unrecognized encoding: '", encoding_name, "'.")}; +} + + +/// Look up instantiation @c T::call at runtime. +/** Here, "T" is a struct template with a static member function "call", whose + * type is "F". + * + * The return value is a pointer to the "call" member function for the + * instantiation of T for encoding group enc. + */ +template class T, typename F> +constexpr inline F *for_encoding(encoding_group enc) +{ +#define CASE_GROUP(ENC) \ + case encoding_group::ENC: return T::call + + switch (enc) + { + PQXX_LIKELY CASE_GROUP(MONOBYTE); + CASE_GROUP(BIG5); + CASE_GROUP(EUC_CN); + CASE_GROUP(EUC_JP); + CASE_GROUP(EUC_JIS_2004); + CASE_GROUP(EUC_KR); + CASE_GROUP(EUC_TW); + CASE_GROUP(GB18030); + CASE_GROUP(GBK); + CASE_GROUP(JOHAB); + CASE_GROUP(MULE_INTERNAL); + CASE_GROUP(SJIS); + CASE_GROUP(SHIFT_JIS_2004); + CASE_GROUP(UHC); + PQXX_LIKELY CASE_GROUP(UTF8); + } + PQXX_UNLIKELY + throw pqxx::usage_error{ + internal::concat("Unsupported encoding group code ", enc, ".")}; + +#undef CASE_GROUP +} + + +PQXX_PURE glyph_scanner_func *get_glyph_scanner(encoding_group enc) +{ + return for_encoding(enc); +} + + +template struct char_finder +{ + constexpr static PQXX_PURE std::size_t + call(std::string_view haystack, char needle, std::size_t start) + { + auto const buffer{std::data(haystack)}; + auto const size{std::size(haystack)}; + for (auto here{start}; here + 1 <= size; + here = glyph_scanner::call(buffer, size, here)) + { + if (haystack[here] == needle) + return here; + } + return std::string::npos; + } +}; + + +template struct string_finder +{ + PQXX_PURE constexpr static PQXX_PURE std::size_t + call(std::string_view haystack, std::string_view needle, std::size_t start) + { + auto const buffer{std::data(haystack)}; + auto const size{std::size(haystack)}; + auto const needle_size{std::size(needle)}; + for (auto here{start}; here + needle_size <= size; + here = glyph_scanner::call(buffer, size, here)) + { + if (std::memcmp(buffer + here, std::data(needle), needle_size) == 0) + return here; + } + return std::string::npos; + } +}; + +#undef DISPATCH_ENCODING_OPERATION +} // namespace pqxx::internal diff --git a/ext/libpqxx-7.7.3/src/errorhandler.cxx b/ext/libpqxx-7.7.3/src/errorhandler.cxx new file mode 100644 index 000000000..de120ff3d --- /dev/null +++ b/ext/libpqxx-7.7.3/src/errorhandler.cxx @@ -0,0 +1,43 @@ +/** Implementation of pqxx::errorhandler and helpers. + * + * pqxx::errorhandler allows programs to receive errors and warnings. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/connection.hxx" +#include "pqxx/errorhandler.hxx" +#include "pqxx/internal/gates/connection-errorhandler.hxx" + +#include "pqxx/internal/header-post.hxx" + + +pqxx::errorhandler::errorhandler(connection &conn) : m_home{&conn} +{ + pqxx::internal::gate::connection_errorhandler{*m_home}.register_errorhandler( + this); +} + + +pqxx::errorhandler::~errorhandler() +{ + unregister(); +} + + +void pqxx::errorhandler::unregister() noexcept +{ + if (m_home != nullptr) + { + pqxx::internal::gate::connection_errorhandler connection_gate{*m_home}; + m_home = nullptr; + connection_gate.unregister_errorhandler(this); + } +} diff --git a/ext/libpqxx-7.7.3/src/except.cxx b/ext/libpqxx-7.7.3/src/except.cxx new file mode 100644 index 000000000..4974e8ac0 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/except.cxx @@ -0,0 +1,126 @@ +/** Implementation of libpqxx exception classes. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/except.hxx" +#include "pqxx/internal/concat.hxx" + +#include "pqxx/internal/header-post.hxx" + +pqxx::failure::failure(std::string const &whatarg) : + std::runtime_error{whatarg} +{} + + +pqxx::broken_connection::broken_connection() : + failure{"Connection to database failed."} +{} + + +pqxx::broken_connection::broken_connection(std::string const &whatarg) : + failure{whatarg} +{} + + +pqxx::variable_set_to_null::variable_set_to_null() : + variable_set_to_null{ + "Attempt to set a variable to null. This is not allowed."} +{} + + +pqxx::variable_set_to_null::variable_set_to_null(std::string const &whatarg) : + failure{whatarg} +{} + + +pqxx::sql_error::sql_error( + std::string const &whatarg, std::string const &Q, char const sqlstate[]) : + failure{whatarg}, m_query{Q}, m_sqlstate{sqlstate ? sqlstate : ""} +{} + + +pqxx::sql_error::~sql_error() noexcept = default; + + +PQXX_PURE std::string const &pqxx::sql_error::query() const noexcept +{ + return m_query; +} + + +PQXX_PURE std::string const &pqxx::sql_error::sqlstate() const noexcept +{ + return m_sqlstate; +} + + +pqxx::in_doubt_error::in_doubt_error(std::string const &whatarg) : + failure{whatarg} +{} + + +pqxx::transaction_rollback::transaction_rollback( + std::string const &whatarg, std::string const &q, char const sqlstate[]) : + sql_error{whatarg, q, sqlstate} +{} + + +pqxx::serialization_failure::serialization_failure( + std::string const &whatarg, std::string const &q, char const sqlstate[]) : + transaction_rollback{whatarg, q, sqlstate} +{} + + +pqxx::statement_completion_unknown::statement_completion_unknown( + std::string const &whatarg, std::string const &q, char const sqlstate[]) : + transaction_rollback{whatarg, q, sqlstate} +{} + + +pqxx::deadlock_detected::deadlock_detected( + std::string const &whatarg, std::string const &q, char const sqlstate[]) : + transaction_rollback{whatarg, q, sqlstate} +{} + + +pqxx::internal_error::internal_error(std::string const &whatarg) : + std::logic_error{internal::concat("libpqxx internal error: ", whatarg)} +{} + + +pqxx::usage_error::usage_error(std::string const &whatarg) : + std::logic_error{whatarg} +{} + + +pqxx::argument_error::argument_error(std::string const &whatarg) : + invalid_argument{whatarg} +{} + + +pqxx::conversion_error::conversion_error(std::string const &whatarg) : + domain_error{whatarg} +{} + + +pqxx::conversion_overrun::conversion_overrun(std::string const &whatarg) : + conversion_error{whatarg} +{} + + +pqxx::range_error::range_error(std::string const &whatarg) : + out_of_range{whatarg} +{} + + +pqxx::blob_already_exists::blob_already_exists(std::string const &whatarg) : + failure{whatarg} +{} diff --git a/ext/libpqxx-7.7.3/src/field.cxx b/ext/libpqxx-7.7.3/src/field.cxx new file mode 100644 index 000000000..f5026ced2 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/field.cxx @@ -0,0 +1,80 @@ +/** Implementation of the pqxx::field class. + * + * pqxx::field refers to a field in a query result. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/field.hxx" +#include "pqxx/internal/libpq-forward.hxx" +#include "pqxx/result.hxx" +#include "pqxx/row.hxx" + +#include "pqxx/internal/header-post.hxx" + + +pqxx::field::field(pqxx::row const &r, pqxx::row::size_type c) noexcept : + m_col{c}, m_home{r.m_result}, m_row{r.m_index} +{} + + +bool PQXX_COLD pqxx::field::operator==(field const &rhs) const +{ + if (is_null() and rhs.is_null()) + return true; + if (is_null() != rhs.is_null()) + return false; + auto const s{size()}; + return (s == std::size(rhs)) and (std::memcmp(c_str(), rhs.c_str(), s) == 0); +} + + +char const *pqxx::field::name() const & +{ + return home().column_name(col()); +} + + +pqxx::oid pqxx::field::type() const +{ + return home().column_type(col()); +} + + +pqxx::oid pqxx::field::table() const +{ + return home().column_table(col()); +} + + +pqxx::row::size_type pqxx::field::table_column() const +{ + return home().table_column(col()); +} + + +char const *pqxx::field::c_str() const & +{ + return home().get_value(idx(), col()); +} + + +bool pqxx::field::is_null() const noexcept +{ + return home().get_is_null(idx(), col()); +} + + +pqxx::field::size_type pqxx::field::size() const noexcept +{ + return home().get_length(idx(), col()); +} diff --git a/ext/libpqxx-7.7.3/src/largeobject.cxx b/ext/libpqxx-7.7.3/src/largeobject.cxx new file mode 100644 index 000000000..c57de3c73 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/largeobject.cxx @@ -0,0 +1,322 @@ +/** Implementation of the Large Objects interface. + * + * Allows direct access to large objects, as well as though I/O streams. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include +#include + +extern "C" +{ +#include +} + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/connection.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/gates/connection-largeobject.hxx" +#include "pqxx/largeobject.hxx" + +#include "pqxx/internal/header-post.hxx" + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + +namespace +{ +constexpr inline int PQXX_COLD std_mode_to_pq_mode(std::ios::openmode mode) +{ + /// Mode bits, copied from libpq-fs.h so that we no longer need that header. + constexpr int INV_WRITE{0x00020000}, INV_READ{0x00040000}; + + return ((mode & std::ios::in) ? INV_READ : 0) | + ((mode & std::ios::out) ? INV_WRITE : 0); +} + + +constexpr int PQXX_COLD std_dir_to_pq_dir(std::ios::seekdir dir) noexcept +{ + if constexpr ( + static_cast(std::ios::beg) == int(SEEK_SET) and + static_cast(std::ios::cur) == int(SEEK_CUR) and + static_cast(std::ios::end) == int(SEEK_END)) + { + // Easy optimisation: they're the same constants. This is actually the + // case for the gcc I'm using. + return dir; + } + else + switch (dir) + { + case std::ios::beg: return SEEK_SET; break; + case std::ios::cur: return SEEK_CUR; break; + case std::ios::end: return SEEK_END; break; + + // Shouldn't happen, but may silence compiler warning. + default: return dir; break; + } +} +} // namespace + + +PQXX_COLD pqxx::largeobject::largeobject(dbtransaction &t) +{ + // (Mode is ignored as of postgres 8.1.) + m_id = lo_creat(raw_connection(t), 0); + if (m_id == oid_none) + { + int const err{errno}; + if (err == ENOMEM) + throw std::bad_alloc{}; + throw failure{internal::concat( + "Could not create large object: ", reason(t.conn(), err))}; + } +} + + +PQXX_COLD +pqxx::largeobject::largeobject(dbtransaction &t, std::string_view file) +{ + m_id = lo_import(raw_connection(t), std::data(file)); + if (m_id == oid_none) + { + int const err{errno}; + if (err == ENOMEM) + throw std::bad_alloc{}; + throw failure{internal::concat( + "Could not import file '", file, + "' to large object: ", reason(t.conn(), err))}; + } +} + + +PQXX_COLD pqxx::largeobject::largeobject(largeobjectaccess const &o) noexcept : + m_id{o.id()} +{} + + +void PQXX_COLD +pqxx::largeobject::to_file(dbtransaction &t, std::string_view file) const +{ + if (id() == oid_none) + throw usage_error{"No object selected."}; + if (lo_export(raw_connection(t), id(), std::data(file)) == -1) + { + int const err{errno}; + if (err == ENOMEM) + throw std::bad_alloc{}; + throw failure{internal::concat( + "Could not export large object ", m_id, " to file '", file, + "': ", reason(t.conn(), err))}; + } +} + + +void PQXX_COLD pqxx::largeobject::remove(dbtransaction &t) const +{ + if (id() == oid_none) + throw usage_error{"No object selected."}; + if (lo_unlink(raw_connection(t), id()) == -1) + { + int const err{errno}; + if (err == ENOMEM) + throw std::bad_alloc{}; + throw failure{internal::concat( + "Could not delete large object ", m_id, ": ", reason(t.conn(), err))}; + } +} + + +pqxx::internal::pq::PGconn *PQXX_COLD +pqxx::largeobject::raw_connection(dbtransaction const &t) +{ + return pqxx::internal::gate::connection_largeobject{t.conn()} + .raw_connection(); +} + + +std::string PQXX_COLD +pqxx::largeobject::reason(connection const &c, int err) const +{ + if (err == ENOMEM) + return "Out of memory"; + return pqxx::internal::gate::const_connection_largeobject{c}.error_message(); +} + + +PQXX_COLD +pqxx::largeobjectaccess::largeobjectaccess(dbtransaction &t, openmode mode) : + largeobject{t}, m_trans{t} +{ + open(mode); +} + + +PQXX_COLD pqxx::largeobjectaccess::largeobjectaccess( + dbtransaction &t, oid o, openmode mode) : + largeobject{o}, m_trans{t} +{ + open(mode); +} + + +PQXX_COLD pqxx::largeobjectaccess::largeobjectaccess( + dbtransaction &t, largeobject o, openmode mode) : + largeobject{o}, m_trans{t} +{ + open(mode); +} + + +PQXX_COLD pqxx::largeobjectaccess::largeobjectaccess( + dbtransaction &t, std::string_view file, openmode mode) : + largeobject{t, file}, m_trans{t} +{ + open(mode); +} + + +pqxx::largeobjectaccess::size_type PQXX_COLD +pqxx::largeobjectaccess::seek(size_type dest, seekdir dir) +{ + auto const res{cseek(dest, dir)}; + if (res == -1) + { + int const err{errno}; + if (err == ENOMEM) + throw std::bad_alloc{}; + if (id() == oid_none) + throw usage_error{"No object selected."}; + throw failure{ + internal::concat("Error seeking in large object: ", reason(err))}; + } + + return res; +} + + +pqxx::largeobjectaccess::pos_type PQXX_COLD +pqxx::largeobjectaccess::cseek(off_type dest, seekdir dir) noexcept +{ + return lo_lseek64(raw_connection(), m_fd, dest, std_dir_to_pq_dir(dir)); +} + + +pqxx::largeobjectaccess::pos_type PQXX_COLD +pqxx::largeobjectaccess::cwrite(char const buf[], std::size_t len) noexcept +{ + return std::max(lo_write(raw_connection(), m_fd, buf, len), -1); +} + + +pqxx::largeobjectaccess::pos_type PQXX_COLD +pqxx::largeobjectaccess::cread(char buf[], std::size_t len) noexcept +{ + return std::max(lo_read(raw_connection(), m_fd, buf, len), -1); +} + + +pqxx::largeobjectaccess::pos_type PQXX_COLD +pqxx::largeobjectaccess::ctell() const noexcept +{ + return lo_tell64(raw_connection(), m_fd); +} + + +void PQXX_COLD +pqxx::largeobjectaccess::write(char const buf[], std::size_t len) +{ + if (id() == oid_none) + throw usage_error{"No object selected."}; + if (auto const bytes{cwrite(buf, len)}; internal::cmp_less(bytes, len)) + { + int const err{errno}; + if (err == ENOMEM) + throw std::bad_alloc{}; + if (bytes < 0) + throw failure{internal::concat( + "Error writing to large object #", id(), ": ", reason(err))}; + if (bytes == 0) + throw failure{internal::concat( + "Could not write to large object #", id(), ": ", reason(err))}; + + throw failure{internal::concat( + "Wanted to write ", len, " bytes to large object #", id(), + "; could only write ", bytes, ".")}; + } +} + + +pqxx::largeobjectaccess::size_type PQXX_COLD +pqxx::largeobjectaccess::read(char buf[], std::size_t len) +{ + if (id() == oid_none) + throw usage_error{"No object selected."}; + auto const bytes{cread(buf, len)}; + if (bytes < 0) + { + int const err{errno}; + if (err == ENOMEM) + throw std::bad_alloc{}; + throw failure{internal::concat( + "Error reading from large object #", id(), ": ", reason(err))}; + } + return bytes; +} + + +void PQXX_COLD pqxx::largeobjectaccess::open(openmode mode) +{ + if (id() == oid_none) + throw usage_error{"No object selected."}; + m_fd = lo_open(raw_connection(), id(), std_mode_to_pq_mode(mode)); + if (m_fd < 0) + { + int const err{errno}; + if (err == ENOMEM) + throw std::bad_alloc{}; + throw failure{internal::concat( + "Could not open large object ", id(), ": ", reason(err))}; + } +} + + +void PQXX_COLD pqxx::largeobjectaccess::close() noexcept +{ + if (m_fd >= 0) + lo_close(raw_connection(), m_fd); +} + + +pqxx::largeobjectaccess::size_type PQXX_COLD +pqxx::largeobjectaccess::tell() const +{ + auto const res{ctell()}; + if (res == -1) + throw failure{reason(errno)}; + return res; +} + + +std::string PQXX_COLD pqxx::largeobjectaccess::reason(int err) const +{ + if (m_fd == -1) + return "No object opened."; + return largeobject::reason(m_trans.conn(), err); +} + + +void PQXX_COLD pqxx::largeobjectaccess::process_notice(zview s) noexcept +{ + m_trans.process_notice(s); +} + +#include "pqxx/internal/ignore-deprecated-post.hxx" diff --git a/ext/libpqxx-7.7.3/src/notification.cxx b/ext/libpqxx-7.7.3/src/notification.cxx new file mode 100644 index 000000000..eb8d5e1a1 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/notification.cxx @@ -0,0 +1,35 @@ +/** Implementation of the pqxx::notification_receiever class. + * + * pqxx::notification_receiver processes notifications. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/internal/gates/connection-notification_receiver.hxx" +#include "pqxx/notification.hxx" + +#include "pqxx/internal/header-post.hxx" + + +pqxx::notification_receiver::notification_receiver( + connection &c, std::string_view channel) : + m_conn{c}, m_channel{channel} +{ + pqxx::internal::gate::connection_notification_receiver{c}.add_receiver(this); +} + + +pqxx::notification_receiver::~notification_receiver() +{ + pqxx::internal::gate::connection_notification_receiver{this->conn()} + .remove_receiver(this); +} diff --git a/ext/libpqxx-7.7.3/src/params.cxx b/ext/libpqxx-7.7.3/src/params.cxx new file mode 100644 index 000000000..bd3fc108c --- /dev/null +++ b/ext/libpqxx-7.7.3/src/params.cxx @@ -0,0 +1,122 @@ +/* Implementations related to prepared and parameterised statements. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/params.hxx" + +#include "pqxx/internal/header-post.hxx" + + +void pqxx::internal::c_params::reserve(std::size_t n) & +{ + values.reserve(n); + lengths.reserve(n); + formats.reserve(n); +} + + +void pqxx::params::reserve(std::size_t n) & +{ + m_params.reserve(n); +} + + +void pqxx::params::append() & +{ + m_params.emplace_back(nullptr); +} + + +void pqxx::params::append(zview value) & +{ + m_params.emplace_back(value); +} + + +void pqxx::params::append(std::string const &value) & +{ + m_params.emplace_back(value); +} + + +void pqxx::params::append(std::string &&value) & +{ + m_params.emplace_back(std::move(value)); +} + + +void pqxx::params::append(params const &value) & +{ + this->reserve(std::size(value.m_params) + std::size(this->m_params)); + for (auto const ¶m : value.m_params) m_params.emplace_back(param); +} + + +void pqxx::params::append(std::basic_string_view value) & +{ + m_params.emplace_back(value); +} + + +void pqxx::params::append(std::basic_string const &value) & +{ + m_params.emplace_back(value); +} + + +void pqxx::params::append(std::basic_string &&value) & +{ + m_params.emplace_back(std::move(value)); +} + + +void PQXX_COLD pqxx::params::append(binarystring const &value) & +{ + m_params.push_back(entry{value.bytes_view()}); +} + + +void pqxx::params::append(params &&value) & +{ + this->reserve(std::size(value.m_params) + std::size(this->m_params)); + for (auto const ¶m : value.m_params) + m_params.emplace_back(std::move(param)); + value.m_params.clear(); +} + + +pqxx::internal::c_params pqxx::params::make_c_params() const +{ + pqxx::internal::c_params p; + p.reserve(std::size(m_params)); + for (auto const ¶m : m_params) + std::visit( + [&p](auto const &value) { + using T = strip_t; + + if constexpr (std::is_same_v) + { + p.values.push_back(nullptr); + p.lengths.push_back(0); + } + else + { + p.values.push_back(reinterpret_cast(std::data(value))); + p.lengths.push_back( + check_cast(internal::ssize(value), s_overflow)); + } + + p.formats.push_back(param_format(value)); + }, + param); + + return p; +} diff --git a/ext/libpqxx-7.7.3/src/pipeline.cxx b/ext/libpqxx-7.7.3/src/pipeline.cxx new file mode 100644 index 000000000..854b1a70e --- /dev/null +++ b/ext/libpqxx-7.7.3/src/pipeline.cxx @@ -0,0 +1,448 @@ +/** Implementation of the pqxx::pipeline class. + * + * Throughput-optimized query interface. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/dbtransaction.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/gates/connection-pipeline.hxx" +#include "pqxx/internal/gates/result-creation.hxx" +#include "pqxx/internal/gates/result-pipeline.hxx" +#include "pqxx/pipeline.hxx" +#include "pqxx/separated_list.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace +{ +std::string const theSeparator{"; "}; +std::string const theDummyValue{"1"}; +std::string const theDummyQuery{"SELECT " + theDummyValue + theSeparator}; +} // namespace + + +void pqxx::pipeline::init() +{ + m_encoding = internal::enc_group(m_trans.conn().encoding_id()); + m_issuedrange = make_pair(std::end(m_queries), std::end(m_queries)); + attach(); +} + + +pqxx::pipeline::~pipeline() noexcept +{ + try + { + cancel(); + } + catch (std::exception const &) + {} + detach(); +} + + +void pqxx::pipeline::attach() +{ + if (not registered()) + register_me(); +} + + +void pqxx::pipeline::detach() +{ + if (registered()) + unregister_me(); +} + + +pqxx::pipeline::query_id pqxx::pipeline::insert(std::string_view q) & +{ + attach(); + query_id const qid{generate_id()}; + auto const i{m_queries.insert(std::make_pair(qid, Query(q))).first}; + + if (m_issuedrange.second == std::end(m_queries)) + { + m_issuedrange.second = i; + if (m_issuedrange.first == std::end(m_queries)) + m_issuedrange.first = i; + } + m_num_waiting++; + + if (m_num_waiting > m_retain) + { + if (have_pending()) + receive_if_available(); + if (not have_pending()) + issue(); + } + + return qid; +} + + +void pqxx::pipeline::complete() +{ + if (have_pending()) + receive(m_issuedrange.second); + if (m_num_waiting and (m_error == qid_limit())) + { + issue(); + receive(std::end(m_queries)); + } + detach(); +} + + +void pqxx::pipeline::flush() +{ + if (not std::empty(m_queries)) + { + if (have_pending()) + receive(m_issuedrange.second); + m_issuedrange.first = m_issuedrange.second = std::end(m_queries); + m_num_waiting = 0; + m_dummy_pending = false; + m_queries.clear(); + } + detach(); +} + + +void PQXX_COLD pqxx::pipeline::cancel() +{ + while (have_pending()) + { + pqxx::internal::gate::connection_pipeline(m_trans.conn()).cancel_query(); + auto canceled_query{m_issuedrange.first}; + ++m_issuedrange.first; + m_queries.erase(canceled_query); + } +} + + +bool pqxx::pipeline::is_finished(pipeline::query_id q) const +{ + if (m_queries.find(q) == std::end(m_queries)) + throw std::logic_error{ + internal::concat("Requested status for unknown query '", q, "'.")}; + return (QueryMap::const_iterator(m_issuedrange.first) == + std::end(m_queries)) or + (q < m_issuedrange.first->first and q < m_error); +} + + +std::pair pqxx::pipeline::retrieve() +{ + if (std::empty(m_queries)) + throw std::logic_error{"Attempt to retrieve result from empty pipeline."}; + return retrieve(std::begin(m_queries)); +} + + +int pqxx::pipeline::retain(int retain_max) & +{ + if (retain_max < 0) + throw range_error{internal::concat( + "Attempt to make pipeline retain ", retain_max, " queries")}; + + int const oldvalue{m_retain}; + m_retain = retain_max; + + if (m_num_waiting >= m_retain) + resume(); + + return oldvalue; +} + + +void pqxx::pipeline::resume() & +{ + if (have_pending()) + receive_if_available(); + if (not have_pending() and m_num_waiting) + { + issue(); + receive_if_available(); + } +} + + +pqxx::pipeline::query_id pqxx::pipeline::generate_id() +{ + if (m_q_id == qid_limit()) + throw std::overflow_error{"Too many queries went through pipeline."}; + ++m_q_id; + return m_q_id; +} + + +void pqxx::pipeline::issue() +{ + // Retrieve that null result for the last query, if needed. + obtain_result(); + + // Don't issue anything if we've encountered an error. + if (m_error < qid_limit()) + return; + + // Start with oldest query (lowest id) not in previous issue range. + auto oldest{m_issuedrange.second}; + + // Construct cumulative query string for entire batch. + auto cum{separated_list( + theSeparator, oldest, std::end(m_queries), + [](QueryMap::const_iterator i) { return i->second.query; })}; + auto const num_issued{ + QueryMap::size_type(std::distance(oldest, std::end(m_queries)))}; + bool const prepend_dummy{num_issued > 1}; + if (prepend_dummy) + cum = theDummyQuery + cum; + + pqxx::internal::gate::connection_pipeline{m_trans.conn()}.start_exec( + cum.c_str()); + + // Since we managed to send out these queries, update state to reflect this. + m_dummy_pending = prepend_dummy; + m_issuedrange.first = oldest; + m_issuedrange.second = std::end(m_queries); + m_num_waiting -= check_cast(num_issued, "pipeline issue()"sv); +} + + +void PQXX_COLD pqxx::pipeline::internal_error(std::string const &err) +{ + set_error_at(0); + throw pqxx::internal_error{err}; +} + + +bool pqxx::pipeline::obtain_result(bool expect_none) +{ + pqxx::internal::gate::connection_pipeline gate{m_trans.conn()}; + auto const r{gate.get_result()}; + if (r == nullptr) + { + if (have_pending() and not expect_none) + { + PQXX_UNLIKELY + set_error_at(m_issuedrange.first->first); + m_issuedrange.second = m_issuedrange.first; + } + return false; + } + + result const res{pqxx::internal::gate::result_creation::create( + r, std::begin(m_queries)->second.query, m_encoding)}; + + if (not have_pending()) + { + PQXX_UNLIKELY + set_error_at(std::begin(m_queries)->first); + throw std::logic_error{ + "Got more results from pipeline than there were queries."}; + } + + // Must be the result for the oldest pending query. + if (not std::empty(m_issuedrange.first->second.res)) + PQXX_UNLIKELY + internal_error("Multiple results for one query."); + + m_issuedrange.first->second.res = res; + ++m_issuedrange.first; + + return true; +} + + +void pqxx::pipeline::obtain_dummy() +{ + // Allocate once, re-use across invocations. + static auto const text{ + std::make_shared("[DUMMY PIPELINE QUERY]")}; + + pqxx::internal::gate::connection_pipeline gate{m_trans.conn()}; + auto const r{gate.get_result()}; + m_dummy_pending = false; + + if (r == nullptr) + PQXX_UNLIKELY + internal_error("Pipeline got no result from backend when it expected one."); + + result R{pqxx::internal::gate::result_creation::create(r, text, m_encoding)}; + + bool OK{false}; + try + { + pqxx::internal::gate::result_creation{R}.check_status(); + OK = true; + } + catch (sql_error const &) + {} + if (OK) + { + PQXX_LIKELY + if (std::size(R) > 1) + PQXX_UNLIKELY + internal_error("Unexpected result for dummy query in pipeline."); + + if (R.at(0).at(0).as() != theDummyValue) + PQXX_UNLIKELY + internal_error("Dummy query in pipeline returned unexpected value."); + return; + } + + // TODO: Can we actually re-issue statements after a failure? + /* Execution of this batch failed. + * + * When we send multiple statements in one go, the backend treats them as a + * single transaction. So the entire batch was effectively rolled back. + * + * Since none of the queries in the batch were actually executed, we can + * afford to replay them one by one until we find the exact query that + * caused the error. This gives us not only a more specific error message + * to report, but also tells us which query to report it for. + */ + // First, give the whole batch the same syntax error message, in case all + // else is going to fail. + for (auto i{m_issuedrange.first}; i != m_issuedrange.second; ++i) + i->second.res = R; + + // Remember where the end of this batch was + auto const stop{m_issuedrange.second}; + + // Retrieve that null result for the last query, if needed + obtain_result(true); + + // Reset internal state to forget botched batch attempt + m_num_waiting += check_cast( + std::distance(m_issuedrange.first, stop), "pipeline obtain_dummy()"sv); + m_issuedrange.second = m_issuedrange.first; + + // Issue queries in failed batch one at a time. + unregister_me(); + try + { + do { + m_num_waiting--; + auto const query{*m_issuedrange.first->second.query}; + auto &holder{m_issuedrange.first->second}; + holder.res = m_trans.exec(query); + pqxx::internal::gate::result_creation{holder.res}.check_status(); + ++m_issuedrange.first; + } while (m_issuedrange.first != stop); + } + catch (std::exception const &) + { + auto const thud{m_issuedrange.first->first}; + ++m_issuedrange.first; + m_issuedrange.second = m_issuedrange.first; + auto q{m_issuedrange.first}; + set_error_at((q == std::end(m_queries)) ? thud + 1 : q->first); + } +} + + +std::pair +pqxx::pipeline::retrieve(pipeline::QueryMap::iterator q) +{ + if (q == std::end(m_queries)) + throw std::logic_error{"Attempt to retrieve result for unknown query."}; + + if (q->first >= m_error) + throw std::runtime_error{ + "Could not complete query in pipeline due to error in earlier query."}; + + // If query hasn't issued yet, do it now. + if ( + m_issuedrange.second != std::end(m_queries) and + (q->first >= m_issuedrange.second->first)) + { + if (have_pending()) + receive(m_issuedrange.second); + if (m_error == qid_limit()) + issue(); + } + + // If result not in yet, get it; else get at least whatever's convenient. + if (have_pending()) + { + if (q->first >= m_issuedrange.first->first) + { + auto suc{q}; + ++suc; + receive(suc); + } + else + { + receive_if_available(); + } + } + + if (q->first >= m_error) + throw std::runtime_error{ + "Could not complete query in pipeline due to error in earlier query."}; + + // Don't leave the backend idle if there are queries waiting to be issued. + if (m_num_waiting and not have_pending() and (m_error == qid_limit())) + issue(); + + result const R{q->second.res}; + auto const P{std::make_pair(q->first, R)}; + + m_queries.erase(q); + + pqxx::internal::gate::result_creation{R}.check_status(); + return P; +} + + +void pqxx::pipeline::get_further_available_results() +{ + pqxx::internal::gate::connection_pipeline gate{m_trans.conn()}; + while (not gate.is_busy() and obtain_result()) + if (not gate.consume_input()) + throw broken_connection{}; +} + + +void pqxx::pipeline::receive_if_available() +{ + pqxx::internal::gate::connection_pipeline gate{m_trans.conn()}; + if (not gate.consume_input()) + throw broken_connection{}; + if (gate.is_busy()) + return; + + if (m_dummy_pending) + obtain_dummy(); + if (have_pending()) + get_further_available_results(); +} + + +void pqxx::pipeline::receive(pipeline::QueryMap::const_iterator stop) +{ + if (m_dummy_pending) + obtain_dummy(); + + while (obtain_result() and + QueryMap::const_iterator{m_issuedrange.first} != stop) + ; + + // Also haul in any remaining "targets of opportunity". + if (QueryMap::const_iterator{m_issuedrange.first} == stop) + get_further_available_results(); +} diff --git a/ext/libpqxx-7.7.3/src/pqxx-source.hxx b/ext/libpqxx-7.7.3/src/pqxx-source.hxx new file mode 100644 index 000000000..1a8f5fb11 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/pqxx-source.hxx @@ -0,0 +1,30 @@ +/* Compiler settings for compiling libpqxx itself. + * + * Include this header in every source file that goes into the libpqxx library + * binary, and nowhere else. + * + * To ensure this, include this file once, as the very first header, in each + * compilation unit for the library. + * + * DO NOT INCLUDE THIS FILE when building client programs. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ + +// Workarounds & definitions needed to compile libpqxx into a library. +#include "pqxx/config-internal-compiler.h" + +#ifdef _WIN32 + +# ifdef PQXX_SHARED +// We're building libpqxx as a shared library. +# undef PQXX_LIBEXPORT +# define PQXX_LIBEXPORT __declspec(dllexport) +# define PQXX_PRIVATE __declspec() +# endif // PQXX_SHARED + +#endif // _WIN32 diff --git a/ext/libpqxx-7.7.3/src/result.cxx b/ext/libpqxx-7.7.3/src/result.cxx new file mode 100644 index 000000000..86a739004 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/result.cxx @@ -0,0 +1,536 @@ +/** Implementation of the pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include +#include + +extern "C" +{ +#include +} + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/except.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/result_iterator.hxx" +#include "pqxx/result.hxx" +#include "pqxx/row.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace pqxx +{ +PQXX_DECLARE_ENUM_CONVERSION(ExecStatusType); +} + +std::string const pqxx::result::s_empty_string; + + +/// C++ wrapper for libpq's PQclear. +void pqxx::internal::clear_result(pq::PGresult const *data) +{ + PQclear(const_cast(data)); +} + + +pqxx::result::result( + pqxx::internal::pq::PGresult *rhs, std::shared_ptr query, + internal::encoding_group enc) : + m_data{make_data_pointer(rhs)}, m_query{query}, m_encoding(enc) +{} + + +bool pqxx::result::operator==(result const &rhs) const noexcept +{ + if (&rhs == this) + PQXX_UNLIKELY return true; + auto const s{size()}; + if (std::size(rhs) != s) + return false; + for (size_type i{0}; i < s; ++i) + if ((*this)[i] != rhs[i]) + return false; + return true; +} + + +pqxx::result::const_reverse_iterator pqxx::result::rbegin() const +{ + return const_reverse_iterator{end()}; +} + + +pqxx::result::const_reverse_iterator pqxx::result::crbegin() const +{ + return rbegin(); +} + + +pqxx::result::const_reverse_iterator pqxx::result::rend() const +{ + return const_reverse_iterator{begin()}; +} + + +pqxx::result::const_reverse_iterator pqxx::result::crend() const +{ + return rend(); +} + + +pqxx::result::const_iterator pqxx::result::begin() const noexcept +{ + return {this, 0}; +} + + +pqxx::result::const_iterator pqxx::result::cbegin() const noexcept +{ + return begin(); +} + + +pqxx::result::size_type pqxx::result::size() const noexcept +{ + return (m_data.get() == nullptr) ? + 0 : + static_cast(PQntuples(m_data.get())); +} + + +bool pqxx::result::empty() const noexcept +{ + return (m_data.get() == nullptr) or (PQntuples(m_data.get()) == 0); +} + + +pqxx::result::reference pqxx::result::front() const noexcept +{ + return row{*this, 0, columns()}; +} + + +pqxx::result::reference pqxx::result::back() const noexcept +{ + return row{*this, size() - 1, columns()}; +} + + +void pqxx::result::swap(result &rhs) noexcept +{ + m_data.swap(rhs.m_data); + m_query.swap(rhs.m_query); +} + + +pqxx::row pqxx::result::operator[](result_size_type i) const noexcept +{ + return row{*this, i, columns()}; +} + + +#if defined(PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT) +pqxx::field pqxx::result::operator[]( + result_size_type row_num, row_size_type col_num) const noexcept +{ + return {*this, row_num, field_num}; +} +#endif + + +pqxx::row pqxx::result::at(pqxx::result::size_type i) const +{ + if (i >= size()) + throw range_error{"Row number out of range."}; + return operator[](i); +} + + +pqxx::field pqxx::result::at( + pqxx::result_size_type row_num, pqxx::row_size_type col_num) const +{ + if (row_num >= size()) + throw range_error{"Row number out of range."}; + if (col_num >= columns()) + throw range_error{"Column out of range."}; + return {*this, row_num, col_num}; +} + + +namespace +{ +/// C string comparison. +inline bool equal(char const lhs[], char const rhs[]) +{ + return strcmp(lhs, rhs) == 0; +} +} // namespace + +void PQXX_COLD pqxx::result::throw_sql_error( + std::string const &Err, std::string const &Query) const +{ + // Try to establish more precise error type, and throw corresponding + // type of exception. + char const *const code{PQresultErrorField(m_data.get(), PG_DIAG_SQLSTATE)}; + if (code == nullptr) + { + // No SQLSTATE at all. Can this even happen? + // Let's assume the connection is no longer usable. + throw broken_connection{Err}; + } + + switch (code[0]) + { + PQXX_UNLIKELY + case '\0': + // SQLSTATE is empty. We may have seen this happen in one + // circumstance: a client-side socket timeout (while using the + // tcp_user_timeout connection option). Unfortunately in that case the + // connection was just fine, so we had no real way of detecting the + // problem. (Trying to continue to use the connection does break + // though, so I feel justified in panicking.) + throw broken_connection{Err}; + + case '0': + switch (code[1]) + { + case 'A': throw feature_not_supported{Err, Query, code}; + case '8': throw broken_connection{Err}; + case 'L': + case 'P': throw insufficient_privilege{Err, Query, code}; + } + break; + case '2': + switch (code[1]) + { + case '2': throw data_exception{Err, Query, code}; + case '3': + if (equal(code, "23001")) + throw restrict_violation{Err, Query, code}; + if (equal(code, "23502")) + throw not_null_violation{Err, Query, code}; + if (equal(code, "23503")) + throw foreign_key_violation{Err, Query, code}; + if (equal(code, "23505")) + throw unique_violation{Err, Query, code}; + if (equal(code, "23514")) + throw check_violation{Err, Query, code}; + throw integrity_constraint_violation{Err, Query, code}; + case '4': throw invalid_cursor_state{Err, Query, code}; + case '6': throw invalid_sql_statement_name{Err, Query, code}; + } + break; + case '3': + switch (code[1]) + { + case '4': throw invalid_cursor_name{Err, Query, code}; + } + break; + case '4': + switch (code[1]) + { + case '0': + if (equal(code, "40000")) + throw transaction_rollback{Err, Query, code}; + if (equal(code, "40001")) + throw serialization_failure{Err, Query, code}; + if (equal(code, "40003")) + throw statement_completion_unknown{Err, Query, code}; + if (equal(code, "40P01")) + throw deadlock_detected{Err, Query, code}; + break; + case '2': + if (equal(code, "42501")) + throw insufficient_privilege{Err, Query}; + if (equal(code, "42601")) + throw syntax_error{Err, Query, code, errorposition()}; + if (equal(code, "42703")) + throw undefined_column{Err, Query, code}; + if (equal(code, "42883")) + throw undefined_function{Err, Query, code}; + if (equal(code, "42P01")) + throw undefined_table{Err, Query, code}; + } + break; + case '5': + switch (code[1]) + { + case '3': + if (equal(code, "53100")) + throw disk_full{Err, Query, code}; + if (equal(code, "53200")) + throw out_of_memory{Err, Query, code}; + if (equal(code, "53300")) + throw too_many_connections{Err}; + throw insufficient_resources{Err, Query, code}; + } + break; + + case 'P': + if (equal(code, "P0001")) + throw plpgsql_raise{Err, Query, code}; + if (equal(code, "P0002")) + throw plpgsql_no_data_found{Err, Query, code}; + if (equal(code, "P0003")) + throw plpgsql_too_many_rows{Err, Query, code}; + throw plpgsql_error{Err, Query, code}; + } + + // Unknown error code. + throw sql_error{Err, Query, code}; +} + +void pqxx::result::check_status(std::string_view desc) const +{ + if (auto err{status_error()}; not std::empty(err)) + { + PQXX_UNLIKELY + if (not std::empty(desc)) + err = pqxx::internal::concat("Failure during '", desc, "': ", err); + throw_sql_error(err, query()); + } +} + + +std::string pqxx::result::status_error() const +{ + if (m_data.get() == nullptr) + throw failure{"No result set given."}; + + std::string err; + + switch (PQresultStatus(m_data.get())) + { + case PGRES_EMPTY_QUERY: // The string sent to the backend was empty. + case PGRES_COMMAND_OK: // Successful completion, no result data. + case PGRES_TUPLES_OK: // The query successfully executed. + break; + + case PGRES_COPY_OUT: // Copy Out (from server) data transfer started. + case PGRES_COPY_IN: // Copy In (to server) data transfer started. + break; + + case PGRES_BAD_RESPONSE: // The server's response was not understood. + case PGRES_NONFATAL_ERROR: + case PGRES_FATAL_ERROR: err = PQresultErrorMessage(m_data.get()); break; + + default: + throw internal_error{internal::concat( + "pqxx::result: Unrecognized response code ", + PQresultStatus(m_data.get()))}; + } + return err; +} + + +char const *pqxx::result::cmd_status() const noexcept +{ + return PQcmdStatus(const_cast(m_data.get())); +} + + +std::string const &pqxx::result::query() const &noexcept +{ + return (m_query.get() == nullptr) ? s_empty_string : *m_query; +} + + +pqxx::oid pqxx::result::inserted_oid() const +{ + if (m_data.get() == nullptr) + throw usage_error{ + "Attempt to read oid of inserted row without an INSERT result"}; + return PQoidValue(const_cast(m_data.get())); +} + + +pqxx::result::size_type pqxx::result::affected_rows() const +{ + auto const rows_str{ + PQcmdTuples(const_cast(m_data.get()))}; + return (rows_str[0] == '\0') ? 0 : size_type(atoi(rows_str)); +} + + +char const *pqxx::result::get_value( + pqxx::result::size_type row, pqxx::row::size_type col) const +{ + return PQgetvalue(m_data.get(), row, col); +} + + +bool pqxx::result::get_is_null( + pqxx::result::size_type row, pqxx::row::size_type col) const +{ + return PQgetisnull(m_data.get(), row, col) != 0; +} + +pqxx::field::size_type pqxx::result::get_length( + pqxx::result::size_type row, pqxx::row::size_type col) const noexcept +{ + return static_cast( + PQgetlength(m_data.get(), row, col)); +} + + +pqxx::oid pqxx::result::column_type(row::size_type col_num) const +{ + oid const t{PQftype(m_data.get(), col_num)}; + if (t == oid_none) + throw argument_error{internal::concat( + "Attempt to retrieve type of nonexistent column ", col_num, + " of query result.")}; + return t; +} + + +pqxx::row::size_type pqxx::result::column_number(zview col_name) const +{ + auto const n{PQfnumber( + const_cast(m_data.get()), col_name.c_str())}; + if (n == -1) + throw argument_error{ + internal::concat("Unknown column name: '", col_name, "'.")}; + + return static_cast(n); +} + + +pqxx::oid pqxx::result::column_table(row::size_type col_num) const +{ + oid const t{PQftable(m_data.get(), col_num)}; + + /* If we get oid_none, it may be because the column is computed, or because + * we got an invalid row number. + */ + if (t == oid_none and col_num >= columns()) + throw argument_error{internal::concat( + "Attempt to retrieve table ID for column ", col_num, " out of ", + columns())}; + + return t; +} + + +pqxx::row::size_type pqxx::result::table_column(row::size_type col_num) const +{ + auto const n{row::size_type(PQftablecol(m_data.get(), col_num))}; + if (n != 0) + PQXX_LIKELY + return n - 1; + + // Failed. Now find out why, so we can throw a sensible exception. + auto const col_str{to_string(col_num)}; + if (col_num > columns()) + throw range_error{ + internal::concat("Invalid column index in table_column(): ", col_str)}; + + if (m_data.get() == nullptr) + throw usage_error{internal::concat( + "Can't query origin of column ", col_str, + ": result is not initialized.")}; + + throw usage_error{internal::concat( + "Can't query origin of column ", col_str, + ": not derived from table column.")}; +} + + +int pqxx::result::errorposition() const +{ + int pos{-1}; + if (m_data.get()) + { + auto const p{PQresultErrorField( + const_cast(m_data.get()), + PG_DIAG_STATEMENT_POSITION)}; + if (p) + pos = from_string(p); + } + return pos; +} + + +char const *pqxx::result::column_name(pqxx::row::size_type number) const & +{ + auto const n{PQfname(m_data.get(), number)}; + if (n == nullptr) + { + PQXX_UNLIKELY + if (m_data.get() == nullptr) + throw usage_error{"Queried column name on null result."}; + throw range_error{internal::concat( + "Invalid column number: ", number, " (maximum is ", (columns() - 1), + ").")}; + } + return n; +} + + +pqxx::row::size_type pqxx::result::columns() const noexcept +{ + auto ptr{const_cast(m_data.get())}; + return (ptr == nullptr) ? 0 : row::size_type(PQnfields(ptr)); +} + + +// const_result_iterator + +pqxx::const_result_iterator pqxx::const_result_iterator::operator++(int) +{ + const_result_iterator old{*this}; + m_index++; + return old; +} + + +pqxx::const_result_iterator pqxx::const_result_iterator::operator--(int) +{ + const_result_iterator old{*this}; + m_index--; + return old; +} + + +pqxx::result::const_iterator +pqxx::result::const_reverse_iterator::base() const noexcept +{ + iterator_type tmp{*this}; + return ++tmp; +} + + +pqxx::const_reverse_result_iterator +pqxx::const_reverse_result_iterator::operator++(int) +{ + const_reverse_result_iterator tmp{*this}; + iterator_type::operator--(); + return tmp; +} + + +pqxx::const_reverse_result_iterator +pqxx::const_reverse_result_iterator::operator--(int) +{ + const_reverse_result_iterator tmp{*this}; + iterator_type::operator++(); + return tmp; +} + + +template<> std::string pqxx::to_string(field const &value) +{ + return {value.c_str(), std::size(value)}; +} diff --git a/ext/libpqxx-7.7.3/src/robusttransaction.cxx b/ext/libpqxx-7.7.3/src/robusttransaction.cxx new file mode 100644 index 000000000..5bbfd64f8 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/robusttransaction.cxx @@ -0,0 +1,221 @@ +/** Implementation of the pqxx::robusttransaction class. + * + * pqxx::robusttransaction is a slower but safer transaction class. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include +#include +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/connection.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/wait.hxx" +#include "pqxx/nontransaction.hxx" +#include "pqxx/result.hxx" +#include "pqxx/robusttransaction.hxx" + +#include "pqxx/internal/header-post.hxx" + + +using namespace std::literals; + +namespace +{ +using pqxx::operator"" _zv; + +/// Statuses in which we may find our transaction. +/** There's also "in the future," but it manifests as an error, not as an + * actual status. + */ +enum tx_stat +{ + tx_unknown, + tx_committed, + tx_aborted, + tx_in_progress, +}; + + +constexpr auto committed{"committed"_zv}, aborted{"aborted"_zv}, + in_progress{"in progress"_zv}; + + +/// Parse a nonempty transaction status string. +constexpr tx_stat parse_status(std::string_view text) noexcept +{ + switch (text[0]) + { + case 'a': + if (text == aborted) + PQXX_LIKELY return tx_aborted; + break; + case 'c': + if (text == committed) + PQXX_LIKELY return tx_committed; + break; + case 'i': + if (text == in_progress) + PQXX_LIKELY return tx_in_progress; + break; + } + return tx_unknown; +} + + +tx_stat query_status(std::string const &xid, std::string const &conn_str) +{ + static std::string const name{"robusttxck"sv}; + auto const query{pqxx::internal::concat("SELECT txid_status(", xid, ")")}; + pqxx::connection c{conn_str}; + pqxx::nontransaction w{c, name}; + auto const status_row{w.exec1(query)}; + auto const status_field{status_row[0]}; + if (std::size(status_field) == 0) + throw pqxx::internal_error{"Transaction status string is empty."}; + auto const status{parse_status(status_field.as())}; + if (status == tx_unknown) + throw pqxx::internal_error{pqxx::internal::concat( + "Unknown transaction status string: ", status_field.view())}; + return status; +} +} // namespace + + +void pqxx::internal::basic_robusttransaction::init(zview begin_command) +{ + static auto const txid_q{ + std::make_shared("SELECT txid_current()"sv)}; + m_backendpid = conn().backendpid(); + direct_exec(begin_command); + direct_exec(txid_q)[0][0].to(m_xid); +} + + +pqxx::internal::basic_robusttransaction::basic_robusttransaction( + connection &c, zview begin_command, std::string_view tname) : + dbtransaction(c, tname), m_conn_string{c.connection_string()} +{ + init(begin_command); +} + + +pqxx::internal::basic_robusttransaction::basic_robusttransaction( + connection &c, zview begin_command) : + dbtransaction(c), m_conn_string{c.connection_string()} +{ + init(begin_command); +} + + +pqxx::internal::basic_robusttransaction::~basic_robusttransaction() = default; + + +void pqxx::internal::basic_robusttransaction::do_commit() +{ + static auto const check_constraints_q{ + std::make_shared("SET CONSTRAINTS ALL IMMEDIATE"sv)}, + commit_q{std::make_shared("COMMIT"sv)}; + // Check constraints before sending the COMMIT to the database, so as to + // minimise our in-doubt window. + try + { + direct_exec(check_constraints_q); + } + catch (std::exception const &) + { + do_abort(); + throw; + } + + // Here comes the in-doubt window. If we lose our connection here, we'll be + // left clueless as to what happened on the backend. It may have received + // the commit command and completed the transaction, and ended up with a + // success it could not report back to us. Or it may have noticed the broken + // connection and aborted the transaction. It may even still be executing + // the commit, only to fail later. + // + // All this uncertainty requires some special handling, and that s what makes + // robusttransaction what it is. + try + { + direct_exec(commit_q); + + // If we make it here, great. Normal, successful commit. + return; + } + catch (broken_connection const &) + { + // Oops, lost connection at the crucial moment. Fall through to in-doubt + // handling below. + } + catch (std::exception const &) + { + if (conn().is_open()) + { + // Commit failed, for some other reason. + do_abort(); + throw; + } + // Otherwise, fall through to in-doubt handling. + } + + // If we get here, we're in doubt. Figure out what happened. + + int const max_attempts{500}; + static_assert(max_attempts > 0); + + tx_stat stat; + for (int attempts{0}; attempts < max_attempts; + ++attempts, pqxx::internal::wait_for(300u)) + { + stat = tx_unknown; + try + { + stat = query_status(m_xid, m_conn_string); + } + catch (pqxx::broken_connection const &) + { + // Swallow the error. Pause and retry. + } + switch (stat) + { + case tx_unknown: + // We were unable to reconnect and query transaction status. + // Stay in it for another attempt. + return; + case tx_committed: + // Success! We're done. + return; + case tx_aborted: + // Aborted. We're done. + do_abort(); + return; + case tx_in_progress: + // The transaction is still running. Stick around until we know what + // transpires. + break; + } + } + + // Okay, this has taken too long. Give up, report in-doubt state. + throw in_doubt_error{internal::concat( + "Transaction ", name(), " (with transaction ID ", m_xid, + ") " + "lost connection while committing. It's impossible to tell whether " + "it committed, or aborted, or is still running. " + "Attempts to find out its outcome have failed. " + "The backend process on the server had process ID ", + m_backendpid, + ". " + "You may be able to check what happened to that process.")}; +} diff --git a/ext/libpqxx-7.7.3/src/row.cxx b/ext/libpqxx-7.7.3/src/row.cxx new file mode 100644 index 000000000..f0974c839 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/row.cxx @@ -0,0 +1,250 @@ +/** Implementation of the pqxx::result class and support classes. + * + * pqxx::result represents the set of result rows from a database query. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include + +extern "C" +{ +#include +} + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/except.hxx" +#include "pqxx/result.hxx" +#include "pqxx/row.hxx" + +#include "pqxx/internal/header-post.hxx" + + +pqxx::row::row( + result const &r, result::size_type index, size_type cols) noexcept : + m_result{r}, m_index{index}, m_end{cols} +{} + + +pqxx::row::const_iterator pqxx::row::begin() const noexcept +{ + return {*this, m_begin}; +} + + +pqxx::row::const_iterator pqxx::row::cbegin() const noexcept +{ + return begin(); +} + + +pqxx::row::const_iterator pqxx::row::end() const noexcept +{ + return {*this, m_end}; +} + + +pqxx::row::const_iterator pqxx::row::cend() const noexcept +{ + return end(); +} + + +pqxx::row::reference pqxx::row::front() const noexcept +{ + return field{m_result, m_index, m_begin}; +} + + +pqxx::row::reference pqxx::row::back() const noexcept +{ + return field{m_result, m_index, m_end - 1}; +} + + +pqxx::row::const_reverse_iterator pqxx::row::rbegin() const +{ + return const_reverse_row_iterator{end()}; +} + + +pqxx::row::const_reverse_iterator pqxx::row::crbegin() const +{ + return rbegin(); +} + + +pqxx::row::const_reverse_iterator pqxx::row::rend() const +{ + return const_reverse_row_iterator{begin()}; +} + + +pqxx::row::const_reverse_iterator pqxx::row::crend() const +{ + return rend(); +} + + +bool pqxx::row::operator==(row const &rhs) const noexcept +{ + if (&rhs == this) + return true; + auto const s{size()}; + if (std::size(rhs) != s) + return false; + for (size_type i{0}; i < s; ++i) + if ((*this)[i] != rhs[i]) + return false; + return true; +} + + +pqxx::row::reference pqxx::row::operator[](size_type i) const noexcept +{ + return field{m_result, m_index, m_begin + i}; +} + + +pqxx::row::reference pqxx::row::operator[](zview col_name) const +{ + return at(col_name); +} + + +void pqxx::row::swap(row &rhs) noexcept +{ + auto const i{m_index}; + auto const b{m_begin}; + auto const e{m_end}; + m_result.swap(rhs.m_result); + m_index = rhs.m_index; + m_begin = rhs.m_begin; + m_end = rhs.m_end; + rhs.m_index = i; + rhs.m_begin = b; + rhs.m_end = e; +} + + +pqxx::field pqxx::row::at(zview col_name) const +{ + return {m_result, m_index, m_begin + column_number(col_name)}; +} + + +pqxx::field pqxx::row::at(pqxx::row::size_type i) const +{ + if (i >= size()) + throw range_error{"Invalid field number."}; + + return operator[](i); +} + + +pqxx::oid pqxx::row::column_type(size_type col_num) const +{ + return m_result.column_type(m_begin + col_num); +} + + +pqxx::oid pqxx::row::column_table(size_type col_num) const +{ + return m_result.column_table(m_begin + col_num); +} + + +pqxx::row::size_type pqxx::row::table_column(size_type col_num) const +{ + return m_result.table_column(m_begin + col_num); +} + + +pqxx::row::size_type pqxx::row::column_number(zview col_name) const +{ + auto const n{m_result.column_number(col_name)}; + if (n >= m_end) + throw argument_error{ + "Column '" + std::string{col_name} + "' falls outside slice."}; + if (n >= m_begin) + return n - m_begin; + + // This deals with a really nasty possibility: that the column name occurs + // twice - once before the beginning of the slice, and once inside the slice. + char const *const adapted_name{m_result.column_name(n)}; + for (auto i{m_begin}; i < m_end; ++i) + if (strcmp(adapted_name, m_result.column_name(i)) == 0) + return i - m_begin; + + // Didn't find any? Recurse just to produce the same error message. + return result{}.column_number(col_name); +} + + +pqxx::row PQXX_COLD pqxx::row::slice(size_type sbegin, size_type send) const +{ + if (sbegin > send or send > size()) + throw range_error{"Invalid field range."}; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + row res{*this}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + res.m_begin = m_begin + sbegin; + res.m_end = m_begin + send; + return res; +} + + +bool PQXX_COLD pqxx::row::empty() const noexcept +{ + return m_begin == m_end; +} + + +pqxx::const_row_iterator pqxx::const_row_iterator::operator++(int) +{ + auto const old{*this}; + m_col++; + return old; +} + + +pqxx::const_row_iterator pqxx::const_row_iterator::operator--(int) +{ + auto const old{*this}; + m_col--; + return old; +} + + +pqxx::const_row_iterator +pqxx::const_reverse_row_iterator::base() const noexcept +{ + iterator_type tmp{*this}; + return ++tmp; +} + + +pqxx::const_reverse_row_iterator +pqxx::const_reverse_row_iterator::operator++(int) +{ + auto tmp{*this}; + operator++(); + return tmp; +} + + +pqxx::const_reverse_row_iterator +pqxx::const_reverse_row_iterator::operator--(int) +{ + auto tmp{*this}; + operator--(); + return tmp; +} diff --git a/ext/libpqxx-7.7.3/src/sql_cursor.cxx b/ext/libpqxx-7.7.3/src/sql_cursor.cxx new file mode 100644 index 000000000..1e5d6a790 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/sql_cursor.cxx @@ -0,0 +1,276 @@ +/** Implementation of libpqxx STL-style cursor classes. + * + * These classes wrap SQL cursors in STL-like interfaces. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/cursor.hxx" +#include "pqxx/internal/encodings.hxx" +#include "pqxx/internal/gates/connection-sql_cursor.hxx" +#include "pqxx/internal/gates/transaction-sql_cursor.hxx" + +#include "pqxx/internal/header-post.hxx" + + +using namespace std::literals; + +namespace +{ +/// Is this character a "useless trailing character" in a query? +/** A character is "useless" at the end of a query if it is either whitespace + * or a semicolon. + */ +inline bool useless_trail(char c) +{ + return isspace(c) or c == ';'; +} + + +/// Find end of nonempty query, stripping off any trailing semicolon. +/** When executing a normal query, a trailing semicolon is meaningless but + * won't hurt. That's why we can't rule out that some code may include one. + * + * But for cursor queries, a trailing semicolon is a problem. The query gets + * embedded in a larger statement, which a semicolon would break into two. + * We'll have to remove it if present. + * + * A trailing semicolon may not actually be at the end. It could be masked by + * subsequent whitespace. If there's also a comment though, that's the + * caller's own lookout. We can't guard against every possible mistake, and + * text processing is actually remarkably sensitive to mistakes in a + * multi-encoding world. + * + * If there is a trailing semicolon, this function returns its offset. If + * there are more than one, it returns the offset of the first one. If there + * is no trailing semicolon, it returns the length of the query string. + * + * The query must be nonempty. + */ +std::string::size_type +find_query_end(std::string_view query, pqxx::internal::encoding_group enc) +{ + auto const text{std::data(query)}; + auto const size{std::size(query)}; + std::string::size_type end; + if (enc == pqxx::internal::encoding_group::MONOBYTE) + { + // This is an encoding where we can scan backwards from the end. + for (end = std::size(query); end > 0 and useless_trail(text[end - 1]); + --end) + ; + } + else + { + // Complex encoding. We only know how to iterate forwards, so start from + // the beginning. + end = 0; + + pqxx::internal::for_glyphs( + enc, + [text, &end](char const *gbegin, char const *gend) { + if (gend - gbegin > 1 or not useless_trail(*gbegin)) + end = std::string::size_type(gend - text); + }, + text, size); + } + + return end; +} +} // namespace + + +pqxx::internal::sql_cursor::sql_cursor( + transaction_base &t, std::string_view query, std::string_view cname, + cursor_base::access_policy ap, cursor_base::update_policy up, + cursor_base::ownership_policy op, bool hold) : + cursor_base{t.conn(), cname}, + m_home{t.conn()}, + m_adopted{false}, + m_at_end{-1}, + m_pos{0} +{ + if (&t.conn() != &m_home) + throw internal_error{"Cursor in wrong connection"}; + + if (std::empty(query)) + throw usage_error{"Cursor has empty query."}; + auto const enc{enc_group(t.conn().encoding_id())}; + auto const qend{find_query_end(query, enc)}; + if (qend == 0) + throw usage_error{"Cursor has effectively empty query."}; + query.remove_suffix(std::size(query) - qend); + + std::string const cq{internal::concat( + "DECLARE "sv, t.quote_name(name()), " "sv, + ((ap == cursor_base::forward_only) ? "NO "sv : ""sv), "SCROLL CURSOR "sv, + (hold ? "WITH HOLD "sv : ""sv), "FOR "sv, query, " "sv, + ((up == cursor_base::update) ? "FOR UPDATE "sv : "FOR READ ONLY "sv))}; + + t.exec(cq); + + // Now that we're here in the starting position, keep a copy of an empty + // result. That may come in handy later, because we may not be able to + // construct an empty result with all the right metadata due to the weird + // meaning of "FETCH 0." + init_empty_result(t); + + m_ownership = op; +} + + +pqxx::internal::sql_cursor::sql_cursor( + transaction_base &t, std::string_view cname, + cursor_base::ownership_policy op) : + cursor_base{t.conn(), cname, false}, + m_home{t.conn()}, + m_empty_result{}, + m_adopted{true}, + m_at_end{0}, + m_pos{-1} +{ + m_adopted = true; + m_ownership = op; +} + + +void pqxx::internal::sql_cursor::close() noexcept +{ + if (m_ownership == cursor_base::owned) + { + try + { + gate::connection_sql_cursor{m_home}.exec( + internal::concat("CLOSE "sv, m_home.quote_name(name())).c_str()); + } + catch (std::exception const &) + {} + m_ownership = cursor_base::loose; + } +} + + +void pqxx::internal::sql_cursor::init_empty_result(transaction_base &t) +{ + if (pos() != 0) + throw internal_error{"init_empty_result() from bad pos()."}; + m_empty_result = + t.exec(internal::concat("FETCH 0 IN "sv, m_home.quote_name(name()))); +} + + +/// Compute actual displacement based on requested and reported displacements. +pqxx::internal::sql_cursor::difference_type pqxx::internal::sql_cursor::adjust( + difference_type hoped, difference_type actual) +{ + if (actual < 0) + throw internal_error{"Negative rows in cursor movement."}; + if (hoped == 0) + return 0; + int const direction{((hoped < 0) ? -1 : 1)}; + bool hit_end{false}; + if (actual != labs(hoped)) + { + if (actual > labs(hoped)) + throw internal_error{"Cursor displacement larger than requested."}; + + // If we see fewer rows than requested, then we've hit an end (on either + // side) of the result set. Wether we make an extra step to a one-past-end + // position or whether we're already there depends on where we were + // previously: if our last move was in the same direction and also fell + // short, we're already at a one-past-end row. + if (m_at_end != direction) + ++actual; + + // If we hit the beginning, make sure our position calculation ends up + // at zero (even if we didn't previously know where we were!), and if we + // hit the other end, register the fact that we now know where the end + // of the result set is. + if (direction > 0) + hit_end = true; + else if (m_pos == -1) + m_pos = actual; + else if (m_pos != actual) + throw internal_error{internal::concat( + "Moved back to beginning, but wrong position: hoped=", hoped, + ", actual=", actual, ", m_pos=", m_pos, ", direction=", direction, + ".")}; + + m_at_end = direction; + } + else + { + m_at_end = 0; + } + + if (m_pos >= 0) + m_pos += direction * actual; + if (hit_end) + { + if (m_endpos >= 0 and m_pos != m_endpos) + throw internal_error{"Inconsistent cursor end positions."}; + m_endpos = m_pos; + } + return direction * actual; +} + + +pqxx::result pqxx::internal::sql_cursor::fetch( + difference_type rows, difference_type &displacement) +{ + if (rows == 0) + { + displacement = 0; + return m_empty_result; + } + auto const query{pqxx::internal::concat( + "FETCH "sv, stridestring(rows), " IN "sv, m_home.quote_name(name()))}; + auto const r{gate::connection_sql_cursor{m_home}.exec(query.c_str())}; + displacement = adjust(rows, difference_type(std::size(r))); + return r; +} + + +pqxx::cursor_base::difference_type pqxx::internal::sql_cursor::move( + difference_type rows, difference_type &displacement) +{ + if (rows == 0) + { + displacement = 0; + return 0; + } + + auto const query{pqxx::internal::concat( + "MOVE "sv, stridestring(rows), " IN "sv, m_home.quote_name(name()))}; + auto const r{gate::connection_sql_cursor{m_home}.exec(query.c_str())}; + auto d{static_cast(r.affected_rows())}; + displacement = adjust(rows, d); + return d; +} + + +std::string pqxx::internal::sql_cursor::stridestring(difference_type n) +{ + /* Some special-casing for ALL and BACKWARD ALL here. We used to use numeric + * "infinities" for difference_type for this (the highest and lowest possible + * values for "long"), but for PostgreSQL 8.0 at least, the backend appears + * to expect a 32-bit number and fails to parse large 64-bit numbers. We + * could change the alias to match this behaviour, but that would break + * if/when Postgres is changed to accept 64-bit displacements. + */ + static std::string const All{"ALL"}, BackAll{"BACKWARD ALL"}; + if (n >= cursor_base::all()) + return All; + else if (n <= cursor_base::backward_all()) + return BackAll; + return to_string(n); +} diff --git a/ext/libpqxx-7.7.3/src/strconv.cxx b/ext/libpqxx-7.7.3/src/strconv.cxx new file mode 100644 index 000000000..2bd206b2a --- /dev/null +++ b/ext/libpqxx-7.7.3/src/strconv.cxx @@ -0,0 +1,785 @@ +/** Implementation of string conversions. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if __has_include() +# include +#endif + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/except.hxx" +#include "pqxx/strconv.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace +{ +#if !defined(PQXX_HAVE_CHARCONV_FLOAT) +/// Do we have fully functional thread_local support? +/** When building with libcxxrt on clang, you can't create thread_local objects + * of non-POD types. Any attempt will result in a link error. + */ +constexpr bool have_thread_local +{ +# if defined(PQXX_HAVE_THREAD_LOCAL) + true +# else + false +# endif +}; +#endif + + +/// String comparison between string_view. +constexpr inline bool equal(std::string_view lhs, std::string_view rhs) +{ + return lhs.compare(rhs) == 0; +} + + +/// The lowest possible value of integral type T. +template constexpr T bottom{std::numeric_limits::min()}; + +/// The highest possible value of integral type T. +template constexpr T top{std::numeric_limits::max()}; + +/// Write nonnegative integral value at end of buffer. Return start. +/** Assumes a sufficiently large buffer. + * + * Includes a single trailing null byte, right before @c *end. + */ +template constexpr inline char *nonneg_to_buf(char *end, T value) +{ + char *pos = end; + *--pos = '\0'; + do { + *--pos = pqxx::internal::number_to_digit(int(value % 10)); + value = T(value / 10); + } while (value > 0); + return pos; +} + + +/// Write negative version of value at end of buffer. Return start. +/** Like @c nonneg_to_buf, but prefixes a minus sign. + */ +template constexpr inline char *neg_to_buf(char *end, T value) +{ + char *pos = nonneg_to_buf(end, value); + *--pos = '-'; + return pos; +} + + +/// Write lowest possible negative value at end of buffer. +/** Like @c neg_to_buf, but for the special case of the bottom value. + */ +template constexpr inline char *bottom_to_buf(char *end) +{ + static_assert(std::is_signed_v); + + // This is the hard case. In two's-complement systems, which includes + // any modern-day system I can think of, a signed type's bottom value + // has no positive equivalent. Luckily the C++ standards committee can't + // think of any exceptions either, so it's the required representation as + // of C++20. We'll assume it right now, while still on C++17. + static_assert(-(bottom + 1) == top); + + // The unsigned version of T does have the unsigned version of bottom. + using unsigned_t = std::make_unsigned_t; + + // Careful though. If we tried to negate value in order to promote to + // unsigned_t, the value will overflow, which means behaviour is + // undefined. Promotion of a negative value to an unsigned type is + // well-defined, given a representation, so let's do that: + constexpr auto positive{static_cast(bottom)}; + + // As luck would have it, in two's complement, this gives us exactly the + // value we want. + static_assert(positive == top / 2 + 1); + + // So the only thing we need to do differently from the regular negative + // case is to skip that overflowing negation and promote to an unsigned type! + return neg_to_buf(end, positive); +} + + +#if defined(PQXX_HAVE_CHARCONV_INT) || defined(PQXX_HAVE_CHARCONV_FLOAT) +/// Call to_chars, report errors as exceptions, add zero, return pointer. +template +[[maybe_unused]] inline char * +wrap_to_chars(char *begin, char *end, T const &value) +{ + auto res{std::to_chars(begin, end - 1, value)}; + if (res.ec != std::errc()) + PQXX_UNLIKELY + switch (res.ec) + { + case std::errc::value_too_large: + throw pqxx::conversion_overrun{ + "Could not convert " + pqxx::type_name + + " to string: " + "buffer too small (" + + pqxx::to_string(end - begin) + " bytes)."}; + default: + throw pqxx::conversion_error{ + "Could not convert " + pqxx::type_name + " to string."}; + } + // No need to check for overrun here: we never even told to_chars about that + // last byte in the buffer, so it didn't get used up. + *res.ptr++ = '\0'; + return res.ptr; +} +#endif +} // namespace + + +namespace pqxx::internal +{ +template +zview integral_traits::to_buf(char *begin, char *end, T const &value) +{ + static_assert(std::is_integral_v); + auto const space{end - begin}, + need{static_cast(size_buffer(value))}; + if (space < need) + throw conversion_overrun{ + "Could not convert " + type_name + + " to string: " + "buffer too small. " + + pqxx::internal::state_buffer_overrun(space, need)}; + + char *pos; + if constexpr (std::is_unsigned_v) + pos = nonneg_to_buf(end, value); + else if (value >= 0) + pos = nonneg_to_buf(end, value); + else if (value > bottom) + pos = neg_to_buf(end, -value); + else + pos = bottom_to_buf(end); + + return {pos, end - pos - 1}; +} + + +template zview integral_traits::to_buf(char *, char *, short const &); +template zview integral_traits::to_buf( + char *, char *, unsigned short const &); +template zview integral_traits::to_buf(char *, char *, int const &); +template zview +integral_traits::to_buf(char *, char *, unsigned const &); +template zview integral_traits::to_buf(char *, char *, long const &); +template zview +integral_traits::to_buf(char *, char *, unsigned long const &); +template zview +integral_traits::to_buf(char *, char *, long long const &); +template zview integral_traits::to_buf( + char *, char *, unsigned long long const &); + + +template +char *integral_traits::into_buf(char *begin, char *end, T const &value) +{ +#if defined(PQXX_HAVE_CHARCONV_INT) + // This is exactly what to_chars is good at. Trust standard library + // implementers to optimise better than we can. + return wrap_to_chars(begin, end, value); +#else + return generic_into_buf(begin, end, value); +#endif +} + + +template char *integral_traits::into_buf(char *, char *, short const &); +template char *integral_traits::into_buf( + char *, char *, unsigned short const &); +template char *integral_traits::into_buf(char *, char *, int const &); +template char * +integral_traits::into_buf(char *, char *, unsigned const &); +template char *integral_traits::into_buf(char *, char *, long const &); +template char *integral_traits::into_buf( + char *, char *, unsigned long const &); +template char * +integral_traits::into_buf(char *, char *, long long const &); +template char *integral_traits::into_buf( + char *, char *, unsigned long long const &); +} // namespace pqxx::internal + + +namespace pqxx::internal +{ +std::string demangle_type_name(char const raw[]) +{ +#if defined(PQXX_HAVE_CXA_DEMANGLE) + // We've got __cxa_demangle. Use it to get a friendlier type name. + int status{0}; + + // We've seen this fail on FreeBSD 11.3 (see #361). Trying to throw a + // meaningful exception only made things worse. So in case of error, just + // fall back to the raw name. + // + // When __cxa_demangle fails, it's guaranteed to return null. + char *demangled{abi::__cxa_demangle(raw, nullptr, nullptr, &status)}; +#else + static constexpr char *demangled{nullptr}; +#endif + std::string const name{(demangled == nullptr) ? raw : demangled}; + + // Check for nullness to work around jemalloc bug (see #508). + if (demangled != nullptr) + std::free(demangled); + return name; +} + +void PQXX_COLD throw_null_conversion(std::string const &type) +{ + throw conversion_error{"Attempt to convert null to " + type + "."}; +} + + +std::string PQXX_COLD state_buffer_overrun(int have_bytes, int need_bytes) +{ + // We convert these in standard library terms, not for the localisation + // so much as to avoid "error cycles," if these values in turn should fail + // to get enough buffer space. + std::stringstream have, need; + have << have_bytes; + need << need_bytes; + return "Have " + have.str() + " bytes, need " + need.str() + "."; +} +} // namespace pqxx::internal + + +namespace +{ +#if defined(PQXX_HAVE_CHARCONV_INT) || defined(PQXX_HAVE_CHARCONV_FLOAT) +template +[[maybe_unused]] inline TYPE from_string_arithmetic(std::string_view in) +{ + char const *here; + auto const end{std::data(in) + std::size(in)}; + + // Skip whitespace. This is not the proper way to do it, but I see no way + // that any of the supported encodings could ever produce a valid character + // whose byte sequence would confuse this code. + for (here = std::data(in); here < end and (*here == ' ' or *here == '\t'); + ++here) + ; + + TYPE out{}; + auto const res{std::from_chars(here, end, out)}; + if (res.ec == std::errc() and res.ptr == end) + PQXX_LIKELY + return out; + + std::string msg; + if (res.ec == std::errc()) + { + msg = "Could not parse full string."; + } + else + { + switch (res.ec) + { + case std::errc::result_out_of_range: msg = "Value out of range."; break; + case std::errc::invalid_argument: msg = "Invalid argument."; break; + default: break; + } + } + + auto const base{ + "Could not convert '" + std::string(in) + + "' " + "to " + + pqxx::type_name}; + if (std::empty(msg)) + throw pqxx::conversion_error{base + "."}; + else + throw pqxx::conversion_error{base + ": " + msg}; +} +#endif +} // namespace + + +namespace +{ +#if !defined(PQXX_HAVE_CHARCONV_INT) +[[noreturn, maybe_unused]] void PQXX_COLD report_overflow() +{ + throw pqxx::conversion_error{ + "Could not convert string to integer: value out of range."}; +} + +template struct numeric_ten +{ + static inline constexpr T value = 10; +}; + +template struct numeric_high_threshold +{ + static inline constexpr T value = + (std::numeric_limits::max)() / numeric_ten::value; +}; + +template struct numeric_low_threshold +{ + static inline constexpr T value = + (std::numeric_limits::min)() / numeric_ten::value; +}; + +/// Return 10*n, or throw exception if it overflows. +template +[[maybe_unused]] constexpr inline T safe_multiply_by_ten(T n) +{ + using limits = std::numeric_limits; + + if (n > numeric_high_threshold::value) + PQXX_UNLIKELY + report_overflow(); + if constexpr (limits::is_signed) + { + if (numeric_low_threshold::value > n) + PQXX_UNLIKELY + report_overflow(); + } + return T(n * numeric_ten::value); +} + + +/// Add digit d to nonnegative n, or throw exception if it overflows. +template +[[maybe_unused]] constexpr inline T safe_add_digit(T n, T d) +{ + T const high_threshold{static_cast(std::numeric_limits::max() - d)}; + if (n > high_threshold) + PQXX_UNLIKELY + report_overflow(); + return static_cast(n + d); +} + + +/// Subtract digit d to nonpositive n, or throw exception if it overflows. +template +[[maybe_unused]] constexpr inline T safe_sub_digit(T n, T d) +{ + T const low_threshold{static_cast(std::numeric_limits::min() + d)}; + if (n < low_threshold) + PQXX_UNLIKELY + report_overflow(); + return static_cast(n - d); +} + + +/// For use in string parsing: add new numeric digit to intermediate value. +template +[[maybe_unused]] constexpr inline L absorb_digit_positive(L value, R digit) +{ + return safe_add_digit(safe_multiply_by_ten(value), L(digit)); +} + + +/// For use in string parsing: subtract digit from intermediate value. +template +[[maybe_unused]] constexpr inline L absorb_digit_negative(L value, R digit) +{ + return safe_sub_digit(safe_multiply_by_ten(value), L(digit)); +} + + +template +[[maybe_unused]] constexpr T from_string_integer(std::string_view text) +{ + if (std::size(text) == 0) + throw pqxx::conversion_error{ + "Attempt to convert empty string to " + pqxx::type_name + "."}; + + char const *const data{std::data(text)}; + std::size_t i{0}; + + // Skip whitespace. This is not the proper way to do it, but I see no way + // that any of the supported encodings could ever produce a valid character + // whose byte sequence would confuse this code. + // + // Why skip whitespace? Because that's how integral conversions are meant to + // work _for composite types._ I see no clean way to support leading + // whitespace there without putting the code in here. A shame about the + // overhead, modest as it is, for the normal case. + for (; i < std::size(text) and (data[i] == ' ' or data[i] == '\t'); ++i) + ; + if (i == std::size(text)) + throw pqxx::conversion_error{ + "Converting string to " + pqxx::type_name + + ", but it contains only whitespace."}; + + char const initial{data[i]}; + T result{0}; + + if (pqxx::internal::is_digit(initial)) + { + for (; pqxx::internal::is_digit(data[i]); ++i) + result = absorb_digit_positive( + result, pqxx::internal::digit_to_number(data[i])); + } + else if (initial == '-') + { + if constexpr (not std::is_signed_v) + throw pqxx::conversion_error{ + "Attempt to convert negative value to " + pqxx::type_name + "."}; + + ++i; + if (i >= std::size(text)) + throw pqxx::conversion_error{ + "Converting string to " + pqxx::type_name + + ", but it contains only a sign."}; + for (; i < std::size(text) and pqxx::internal::is_digit(data[i]); ++i) + result = absorb_digit_negative( + result, pqxx::internal::digit_to_number(data[i])); + } + else + { + throw pqxx::conversion_error{ + "Could not convert string to " + pqxx::type_name + + ": " + "'" + + std::string{text} + "'."}; + } + + if (i < std::size(text)) + throw pqxx::conversion_error{ + "Unexpected text after " + pqxx::type_name + + ": " + "'" + + std::string{text} + "'."}; + + return result; +} +#endif // !PQXX_HAVE_CHARCONV_INT +} // namespace + + +namespace +{ +[[maybe_unused]] constexpr bool +valid_infinity_string(std::string_view text) noexcept +{ + return equal("infinity", text) or equal("Infinity", text) or + equal("INFINITY", text) or equal("inf", text); +} +} // namespace + + +#if !defined(PQXX_HAVE_CHARCONV_FLOAT) +namespace +{ +/// Wrapper for std::stringstream with C locale. +/** We use this to work around missing std::to_chars for floating-point types. + * + * Initialising the stream (including locale and tweaked precision) seems to + * be expensive. So, create thread-local instances which we re-use. It's a + * lockless way of keeping global variables thread-safe, basically. + * + * The stream initialisation happens once per thread, in the constructor. + * And that's why we need to wrap this in a class. We can't just do it at the + * call site, or we'd still be doing it for every call. + */ +template class dumb_stringstream : public std::stringstream +{ +public: + // Do not initialise the base-class object using "stringstream{}" (with curly + // braces): that breaks on Visual C++. The classic "stringstream()" syntax + // (with parentheses) does work. + PQXX_COLD dumb_stringstream() + { + this->imbue(std::locale::classic()); + this->precision(std::numeric_limits::max_digits10); + } +}; + + +template +inline bool PQXX_COLD from_dumb_stringstream( + dumb_stringstream &s, F &result, std::string_view text) +{ + s.str(std::string{text}); + return static_cast(s >> result); +} + + +// These are hard, and some popular compilers still lack std::from_chars. +template +inline T PQXX_COLD from_string_awful_float(std::string_view text) +{ + if (std::empty(text)) + throw pqxx::conversion_error{ + "Trying to convert empty string to " + pqxx::type_name + "."}; + + bool ok{false}; + T result; + + switch (text[0]) + { + case 'N': + case 'n': + // Accept "NaN," "nan," etc. + ok = + (std::size(text) == 3 and (text[1] == 'A' or text[1] == 'a') and + (text[2] == 'N' or text[2] == 'n')); + result = std::numeric_limits::quiet_NaN(); + break; + + case 'I': + case 'i': + ok = valid_infinity_string(text); + result = std::numeric_limits::infinity(); + break; + + default: + if (text[0] == '-' and valid_infinity_string(text.substr(1))) + { + ok = true; + result = -std::numeric_limits::infinity(); + } + else + { + PQXX_LIKELY + if constexpr (have_thread_local) + { + thread_local dumb_stringstream S; + // Visual Studio 2017 seems to fail on repeated conversions if the + // clear() is done before the seekg(). Still don't know why! See #124 + // and #125. + S.seekg(0); + S.clear(); + ok = from_dumb_stringstream(S, result, text); + } + else + { + dumb_stringstream S; + ok = from_dumb_stringstream(S, result, text); + } + } + break; + } + + if (not ok) + throw pqxx::conversion_error{ + "Could not convert string to numeric value: '" + std::string{text} + + "'."}; + + return result; +} +} // namespace +#endif // !PQXX_HAVE_CHARCONV_FLOAT + + +namespace pqxx::internal +{ +/// Floating-point to_buf implemented in terms of to_string. +template +zview float_traits::to_buf(char *begin, char *end, T const &value) +{ +#if defined(PQXX_HAVE_CHARCONV_FLOAT) + { + // Definitely prefer to let the standard library handle this! + auto const ptr{wrap_to_chars(begin, end, value)}; + return zview{begin, std::size_t(ptr - begin - 1)}; + } +#else + { + // Implement it ourselves. Weird detail: since this workaround is based on + // std::stringstream, which produces a std::string, it's actually easier to + // build the to_buf() on top of the to_string() than the other way around. + if (std::isnan(value)) + return "nan"_zv; + if (std::isinf(value)) + return (value > 0) ? "infinity"_zv : "-infinity"_zv; + auto text{to_string_float(value)}; + auto have{end - begin}; + auto need{std::size(text) + 1}; + if (need > std::size_t(have)) + throw conversion_error{ + "Could not convert floating-point number to string: " + "buffer too small. " + + state_buffer_overrun(have, need)}; + text.copy(begin, need); + return zview{begin, std::size(text)}; + } +#endif +} + + +template zview float_traits::to_buf(char *, char *, float const &); +template zview float_traits::to_buf(char *, char *, double const &); +template zview +float_traits::to_buf(char *, char *, long double const &); + + +template +char *float_traits::into_buf(char *begin, char *end, T const &value) +{ +#if defined(PQXX_HAVE_CHARCONV_FLOAT) + return wrap_to_chars(begin, end, value); +#else + return generic_into_buf(begin, end, value); +#endif +} + + +template char *float_traits::into_buf(char *, char *, float const &); +template char *float_traits::into_buf(char *, char *, double const &); +template char * +float_traits::into_buf(char *, char *, long double const &); + + +#if !defined(PQXX_HAVE_CHARCONV_FLOAT) +template +inline std::string PQXX_COLD +to_dumb_stringstream(dumb_stringstream &s, F value) +{ + s.str(""); + s << value; + return s.str(); +} +#endif + + +/// Floating-point implementations for @c pqxx::to_string(). +template std::string to_string_float(T value) +{ +#if defined(PQXX_HAVE_CHARCONV_FLOAT) + { + static constexpr auto space{float_traits::size_buffer(value)}; + std::string buf; + buf.resize(space); + std::string_view const view{ + float_traits::to_buf(std::data(buf), std::data(buf) + space, value)}; + buf.resize(static_cast(std::end(view) - std::begin(view))); + return buf; + } +#else + { + // In this rare case, we can convert to std::string but not to a simple + // buffer. So, implement to_buf in terms of to_string instead of the other + // way around. + if constexpr (have_thread_local) + { + thread_local dumb_stringstream s; + return to_dumb_stringstream(s, value); + } + else + { + dumb_stringstream s; + return to_dumb_stringstream(s, value); + } + } +#endif +} +} // namespace pqxx::internal + + +namespace pqxx::internal +{ +template T integral_traits::from_string(std::string_view text) +{ +#if defined(PQXX_HAVE_CHARCONV_INT) + return from_string_arithmetic(text); +#else + return from_string_integer(text); +#endif +} + +template short integral_traits::from_string(std::string_view); +template unsigned short + integral_traits::from_string(std::string_view); +template int integral_traits::from_string(std::string_view); +template unsigned integral_traits::from_string(std::string_view); +template long integral_traits::from_string(std::string_view); +template unsigned long + integral_traits::from_string(std::string_view); +template long long integral_traits::from_string(std::string_view); +template unsigned long long + integral_traits::from_string(std::string_view); + + +template T float_traits::from_string(std::string_view text) +{ +#if defined(PQXX_HAVE_CHARCONV_FLOAT) + return from_string_arithmetic(text); +#else + return from_string_awful_float(text); +#endif +} + + +template float float_traits::from_string(std::string_view); +template double float_traits::from_string(std::string_view); +template long double float_traits::from_string(std::string_view); + + +template std::string to_string_float(float); +template std::string to_string_float(double); +template std::string to_string_float(long double); +} // namespace pqxx::internal + + +bool pqxx::string_traits::from_string(std::string_view text) +{ + std::optional result; + + switch (std::size(text)) + { + case 0: result = false; break; + + case 1: + switch (text[0]) + { + case 'f': + case 'F': + case '0': result = false; break; + + case 't': + case 'T': + case '1': result = true; break; + + default: break; + } + break; + + case 4: + if (equal(text, "true") or equal(text, "TRUE")) + result = true; + break; + + case 5: + if (equal(text, "false") or equal(text, "FALSE")) + result = false; + break; + + default: break; + } + + if (result) + return *result; + else + throw conversion_error{ + "Failed conversion to bool: '" + std::string{text} + "'."}; +} diff --git a/ext/libpqxx-7.7.3/src/stream_from.cxx b/ext/libpqxx-7.7.3/src/stream_from.cxx new file mode 100644 index 000000000..f710300ea --- /dev/null +++ b/ext/libpqxx-7.7.3/src/stream_from.cxx @@ -0,0 +1,327 @@ +/** Implementation of the pqxx::stream_from class. + * + * pqxx::stream_from enables optimized batch reads from a database table. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/internal/encodings.hxx" +#include "pqxx/internal/gates/connection-stream_from.hxx" +#include "pqxx/stream_from.hxx" +#include "pqxx/transaction_base.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace +{ +pqxx::internal::glyph_scanner_func * +get_scanner(pqxx::transaction_base const &tx) +{ + auto const group{pqxx::internal::enc_group(tx.conn().encoding_id())}; + return pqxx::internal::get_glyph_scanner(group); +} + + +constexpr std::string_view class_name{"stream_from"}; +} // namespace + + +pqxx::stream_from::stream_from( + transaction_base &tx, from_query_t, std::string_view query) : + transaction_focus{tx, class_name}, m_glyph_scanner{get_scanner(tx)} +{ + tx.exec0(internal::concat("COPY ("sv, query, ") TO STDOUT"sv)); + register_me(); +} + + +pqxx::stream_from::stream_from( + transaction_base &tx, from_table_t, std::string_view table) : + transaction_focus{tx, class_name, table}, + m_glyph_scanner{get_scanner(tx)} +{ + tx.exec0(internal::concat("COPY "sv, tx.quote_name(table), " TO STDOUT"sv)); + register_me(); +} + + +pqxx::stream_from::stream_from( + transaction_base &tx, std::string_view table, std::string_view columns, + from_table_t) : + transaction_focus{tx, class_name, table}, + m_glyph_scanner{get_scanner(tx)} +{ + if (std::empty(columns)) + PQXX_UNLIKELY + tx.exec0(internal::concat("COPY "sv, table, " TO STDOUT"sv)); + else PQXX_LIKELY tx.exec0( + internal::concat("COPY "sv, table, "("sv, columns, ") TO STDOUT"sv)); + register_me(); +} + + +pqxx::stream_from::stream_from( + transaction_base &tx, std::string_view unquoted_table, + std::string_view columns, from_table_t, int) : + stream_from{ + tx, tx.conn().quote_table(unquoted_table), columns, from_table} +{} + + +pqxx::stream_from pqxx::stream_from::raw_table( + transaction_base &tx, std::string_view path, std::string_view columns) +{ + return {tx, path, columns, from_table}; +} + + +pqxx::stream_from pqxx::stream_from::table( + transaction_base &tx, table_path path, + std::initializer_list columns) +{ + auto const &conn{tx.conn()}; + return raw_table(tx, conn.quote_table(path), conn.quote_columns(columns)); +} + + +pqxx::stream_from::~stream_from() noexcept +{ + try + { + close(); + } + catch (std::exception const &e) + { + reg_pending_error(e.what()); + } +} + + +pqxx::stream_from::raw_line pqxx::stream_from::get_raw_line() +{ + if (*this) + { + internal::gate::connection_stream_from gate{m_trans.conn()}; + try + { + raw_line line{gate.read_copy_line()}; + if (line.first.get() == nullptr) + close(); + return line; + } + catch (std::exception const &) + { + close(); + throw; + } + } + else + { + return {}; + } +} + + +void pqxx::stream_from::close() +{ + if (not m_finished) + { + PQXX_UNLIKELY + m_finished = true; + unregister_me(); + } +} + + +void pqxx::stream_from::complete() +{ + if (m_finished) + return; + try + { + // Flush any remaining lines - libpq will automatically close the stream + // when it hits the end. + bool done{false}; + while (not done) + { + auto [line, size] = get_raw_line(); + ignore_unused(size); + done = not line.get(); + } + } + catch (broken_connection const &) + { + close(); + throw; + } + catch (std::exception const &e) + { + reg_pending_error(e.what()); + } + close(); +} + + +void pqxx::stream_from::parse_line() +{ + if (m_finished) + PQXX_UNLIKELY + return; + auto const next_seq{m_glyph_scanner}; + + m_fields.clear(); + + auto const [line, line_size] = get_raw_line(); + if (line.get() == nullptr) + { + m_finished = true; + return; + } + + if (line_size >= (std::numeric_limits::max() / 2)) + throw range_error{"Stream produced a ridiculously long line."}; + + // Make room for unescaping the line. It's a pessimistic size. + // Unusually, we're storing terminating zeroes *inside* the string. + // This is the only place where we modify m_row. MAKE SURE THE BUFFER DOES + // NOT GET RESIZED while we're working, because we're working with views into + // its buffer. + m_row.resize(line_size + 1); + + char const *line_begin{line.get()}; + char const *line_end{line_begin + line_size}; + char const *read{line_begin}; + + // Output iterator for unescaped text. + char *write{m_row.data()}; + + // The pointer cannot be null at this point. But we initialise field_begin + // with this value, and carry it around the loop, and it can later become + // null. Static analysis in clang-tidy then likes to assume a case where + // field_begin is null, and deduces from this that "write" must have been + // null -- and so it marks "*write" as a null pointer dereference. + // + // This assertion tells clang-tidy just what it needs in order to deduce + // that *write never dereferences a null pointer. + assert(write != nullptr); + + // Beginning of current field in m_row, or nullptr for null fields. + char const *field_begin{write}; + + while (read < line_end) + { + auto const offset{static_cast(read - line_begin)}; + auto const glyph_end{line_begin + next_seq(line_begin, line_size, offset)}; + // XXX: find_char<'\t', '\\'>(). + if (glyph_end == read + 1) + { + // Single-byte character. + char c{*read++}; + switch (c) + { + case '\t': // Field separator. + // End the field. + if (field_begin == nullptr) + { + m_fields.emplace_back(); + } + else + { + // Would love to emplace_back() here, but gcc 9.1 warns about the + // constructor not throwing. It suggests adding "noexcept." Which + // we can hardly do, without std::string_view guaranteeing it. + m_fields.push_back(zview{field_begin, write - field_begin}); + *write++ = '\0'; + } + field_begin = write; + break; + + PQXX_UNLIKELY + case '\\': { + // Escape sequence. + if (read >= line_end) + throw failure{"Row ends in backslash"}; + + c = *read++; + switch (c) + { + case 'N': + // Null value. + if (write != field_begin) + throw failure{"Null sequence found in nonempty field"}; + field_begin = nullptr; + // (If there's any characters _after_ the null we'll just crash.) + break; + + case 'b': // Backspace. + PQXX_UNLIKELY + *write++ = '\b'; + break; + case 'f': // Form feed + PQXX_UNLIKELY + *write++ = '\f'; + break; + case 'n': // Line feed. + *write++ = '\n'; + break; + case 'r': // Carriage return. + *write++ = '\r'; + break; + case 't': // Horizontal tab. + *write++ = '\t'; + break; + case 'v': // Vertical tab. + *write++ = '\v'; + break; + + default: + PQXX_LIKELY + // Regular character ("self-escaped"). + *write++ = c; + break; + } + } + break; + + PQXX_LIKELY + default: *write++ = c; break; + } + } + else + { + // Multi-byte sequence. Never treated specially, so just append. + while (read < glyph_end) *write++ = *read++; + } + } + + // End the last field here. + if (field_begin == nullptr) + { + m_fields.emplace_back(); + } + else + { + m_fields.push_back(zview{field_begin, write - field_begin}); + *write++ = '\0'; + } + + // DO NOT shrink m_row to fit. We're carrying string_views pointing into + // the buffer. (Also, how useful would shrinking really be?) +} + + +std::vector const *pqxx::stream_from::read_row() & +{ + parse_line(); + return m_finished ? nullptr : &m_fields; +} diff --git a/ext/libpqxx-7.7.3/src/stream_to.cxx b/ext/libpqxx-7.7.3/src/stream_to.cxx new file mode 100644 index 000000000..1693ba377 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/stream_to.cxx @@ -0,0 +1,170 @@ +/** Implementation of the pqxx::stream_to class. + * + * pqxx::stream_to enables optimized batch updates to a database table. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/gates/connection-stream_to.hxx" +#include "pqxx/stream_from.hxx" +#include "pqxx/stream_to.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace +{ +using namespace std::literals; + +void begin_copy( + pqxx::transaction_base &tx, std::string_view table, std::string_view columns) +{ + tx.exec0( + std::empty(columns) ? + pqxx::internal::concat("COPY "sv, table, " FROM STDIN"sv) : + pqxx::internal::concat( + "COPY "sv, table, "("sv, columns, ") FROM STDIN"sv)); +} +} // namespace + + +pqxx::stream_to::~stream_to() noexcept +{ + try + { + complete(); + } + catch (std::exception const &e) + { + reg_pending_error(e.what()); + } +} + + +void pqxx::stream_to::write_raw_line(std::string_view text) +{ + internal::gate::connection_stream_to{m_trans.conn()}.write_copy_line(text); +} + + +void pqxx::stream_to::write_buffer() +{ + if (not std::empty(m_buffer)) + { + // In append_to_buffer() we write a tab after each field. We only want a + // tab _between_ fields. Remove that last one. + assert(m_buffer[std::size(m_buffer) - 1] == '\t'); + m_buffer.resize(std::size(m_buffer) - 1); + } + write_raw_line(m_buffer); + m_buffer.clear(); +} + + +pqxx::stream_to &pqxx::stream_to::operator<<(stream_from &tr) +{ + while (tr) + { + const auto [line, size] = tr.get_raw_line(); + if (line.get() == nullptr) + break; + write_raw_line(std::string_view{line.get(), size}); + } + return *this; +} + + +pqxx::stream_to::stream_to( + transaction_base &tx, std::string_view path, std::string_view columns) : + transaction_focus{tx, s_classname, path}, + m_scanner{get_glyph_scanner( + pqxx::internal::enc_group(tx.conn().encoding_id()))} +{ + begin_copy(tx, path, columns); + register_me(); +} + + +void pqxx::stream_to::complete() +{ + if (!m_finished) + { + m_finished = true; + unregister_me(); + internal::gate::connection_stream_to{m_trans.conn()}.end_copy_write(); + } +} + + +/// Return escape letter for c's backslash sequence, or 0 if not needed. +/** The API is a bit weird: you pass the width of the character, and its first + * byte. That's because we never need to escape a multibyte character anyway. + */ +constexpr char escape(std::size_t width, char c) +{ + if (width == 1u) + switch (c) + { + case '\b': return 'b'; + case '\f': return 'f'; + case '\n': return 'n'; + case '\r': return 'r'; + case '\t': return 't'; + case '\v': return 'v'; + case '\\': return '\\'; + } + + PQXX_LIKELY + return '\0'; +} + + +void pqxx::stream_to::escape_field_to_buffer(std::string_view data) +{ + if (not std::empty(data)) + { + // Mark the beginning of a stretch that we can copy into our buffer in one + // go. It feels like a waste to invoke generic multi-byte copies for every + // individual character in this loop, most of them actually probably only + // one byte long. + std::size_t begin_stretch{0}; + + std::size_t begin_char{0}, end; + // XXX: find_char<'\b', '\f', '\n', '\r', '\t', \v', '\\'>(). + for (end = m_scanner(std::data(data), std::size(data), begin_char); + begin_char < std::size(data); begin_char = end, + end = m_scanner(std::data(data), std::size(data), begin_char)) + { + // Escape sequence letter, if needed. + char const esc{escape(end - begin_char, data[begin_char])}; + if (esc != '\0') + { + // This character needs escaping. So, it ends any trivially copyable + // stretch that we may have been having. + + // Copy the stretch we've built up into our buffer. + m_buffer.append( + std::data(data) + begin_stretch, begin_char - begin_stretch); + + // Escape the current character. + m_buffer.push_back('\\'); + m_buffer.push_back(esc); + + // Start a new stretch, right after the current character. + begin_stretch = end; + } + } + // Copy the final stretch. + m_buffer.append( + std::data(data) + begin_stretch, begin_char - begin_stretch); + } + m_buffer.push_back('\t'); +} diff --git a/ext/libpqxx-7.7.3/src/subtransaction.cxx b/ext/libpqxx-7.7.3/src/subtransaction.cxx new file mode 100644 index 000000000..186ad13e8 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/subtransaction.cxx @@ -0,0 +1,68 @@ +/** Implementation of the pqxx::subtransaction class. + * + * pqxx::transaction is a nested transaction, i.e. one within a transaction + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/connection.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/subtransaction.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace +{ +using namespace std::literals; +constexpr std::string_view class_name{"subtransaction"sv}; +} // namespace + + +pqxx::subtransaction::subtransaction( + dbtransaction &t, std::string_view tname) : + transaction_focus{t, class_name, t.conn().adorn_name(tname)}, + // We can't initialise the rollback command here, because we don't yet + // have a full object to implement quoted_name(). + dbtransaction{t.conn(), tname, std::shared_ptr{}} +{ + set_rollback_cmd(std::make_shared( + internal::concat("ROLLBACK TO SAVEPOINT ", quoted_name()))); + direct_exec(std::make_shared( + internal::concat("SAVEPOINT ", quoted_name()))); +} + + +namespace +{ +using dbtransaction_ref = pqxx::dbtransaction &; +} + + +pqxx::subtransaction::subtransaction( + subtransaction &t, std::string_view tname) : + subtransaction(dbtransaction_ref(t), tname) +{} + + +pqxx::subtransaction::~subtransaction() noexcept +{ + close(); +} + + +void pqxx::subtransaction::do_commit() +{ + direct_exec(std::make_shared( + internal::concat("RELEASE SAVEPOINT ", quoted_name()))); +} diff --git a/ext/libpqxx-7.7.3/src/time.cxx b/ext/libpqxx-7.7.3/src/time.cxx new file mode 100644 index 000000000..6b3ffdb1c --- /dev/null +++ b/ext/libpqxx-7.7.3/src/time.cxx @@ -0,0 +1,226 @@ +/** Implementation of date/time support. + */ +#include "pqxx-source.hxx" + +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/time.hxx" + +#include "pqxx/internal/header-post.hxx" + +// std::chrono::year_month_day is C++20, so let's worry a bit less about C++17 +// compatibility in this file. +#if defined(PQXX_HAVE_YEAR_MONTH_DAY) +namespace +{ +using namespace std::literals; + + +/// Render the numeric part of a year value into a buffer. +/** Converts the year from "common era" (with a Year Zero) to "anno domini" + * (without a Year Zero). + * + * Doesn't render the sign. When you're rendering a date, you indicate a + * negative year by suffixing "BC" at the very end. + * + * Where @c string_traits::into_buf() returns a pointer to the position right + * after the terminating zero, this function returns a pointer to the character + * right after the last digit. (It may or may not write a terminating zero at + * that position itself.) + */ +inline char * +year_into_buf(char *begin, char *end, std::chrono::year const &value) +{ + int const y{value}; + if (y == int{(std::chrono::year::min)()}) + { + // This is an evil special case: C++ year -32767 translates to 32768 BC, + // which is a number we can't fit into a short. At the moment postgres + // doesn't handle years before 4713 BC, but who knows, right? + static_assert(int{(std::chrono::year::min)()} == -32767); + constexpr auto hardcoded{"32768"sv}; + PQXX_UNLIKELY + begin += hardcoded.copy(begin, std::size(hardcoded)); + } + else + { + // C++ std::chrono::year has a year zero. PostgreSQL does not. So, C++ + // year zero is 1 BC in the postgres calendar; C++ 1 BC is postgres 2 BC, + // and so on. + auto const absy{static_cast(std::abs(y) + int{y <= 0})}; + + // PostgreSQL requires year input to be at least 3 digits long, or it + // won't be able to deduce the date format correctly. However on output + // it always writes years as at least 4 digits, and we'll do the same. + // Dates and times are a dirty, dirty business. + if (absy < 1000) + { + PQXX_UNLIKELY + *begin++ = '0'; + if (absy < 100) + *begin++ = '0'; + if (absy < 10) + *begin++ = '0'; + } + begin = pqxx::string_traits::into_buf(begin, end, absy) - 1; + } + return begin; +} + + +/// Parse the numeric part of a year value. +inline int year_from_buf(std::string_view text) +{ + if (std::size(text) < 4) + throw pqxx::conversion_error{ + pqxx::internal::concat("Year field is too small: '", text, "'.")}; + // Parse as int, so we can accommodate 32768 BC which won't fit in a short + // as-is, but equates to 32767 BCE which will. + int const year{pqxx::string_traits::from_string(text)}; + if (year <= 0) + throw pqxx::conversion_error{ + pqxx::internal::concat("Bad year: '", text, "'.")}; + return year; +} + + +/// Render a valid 1-based month number into a buffer. +/* Where @c string_traits::into_buf() returns a pointer to the position right + * after the terminating zero, this function returns a pointer to the character + * right after the last digit. (It may or may not write a terminating zero at + * that position itself.) + */ +inline static char * +month_into_buf(char *begin, std::chrono::month const &value) +{ + unsigned const m{value}; + if (m >= 10) + *begin = '1'; + else + *begin = '0'; + ++begin; + *begin++ = pqxx::internal::number_to_digit(static_cast(m % 10)); + return begin; +} + + +/// Parse a 1-based month value. +inline std::chrono::month month_from_string(std::string_view text) +{ + if ( + not pqxx::internal::is_digit(text[0]) or + not pqxx::internal::is_digit(text[1])) + throw pqxx::conversion_error{ + pqxx::internal::concat("Invalid month: '", text, "'.")}; + return std::chrono::month{unsigned( + (10 * pqxx::internal::digit_to_number(text[0])) + + pqxx::internal::digit_to_number(text[1]))}; +} + + +/// Render a valid 1-based day-of-month value into a buffer. +inline char *day_into_buf(char *begin, std::chrono::day const &value) +{ + unsigned d{value}; + *begin++ = pqxx::internal::number_to_digit(static_cast(d / 10)); + *begin++ = pqxx::internal::number_to_digit(static_cast(d % 10)); + return begin; +} + + +/// Parse a 1-based day-of-month value. +inline std::chrono::day day_from_string(std::string_view text) +{ + if ( + not pqxx::internal::is_digit(text[0]) or + not pqxx::internal::is_digit(text[1])) + throw pqxx::conversion_error{ + pqxx::internal::concat("Bad day in date: '", text, "'.")}; + std::chrono::day const d{unsigned( + (10 * pqxx::internal::digit_to_number(text[0])) + + pqxx::internal::digit_to_number(text[1]))}; + if (not d.ok()) + throw pqxx::conversion_error{ + pqxx::internal::concat("Bad day in date: '", text, "'.")}; + return d; +} + + +/// Look for the dash separating year and month. +/** Assumes that @c text is nonempty. + */ +inline std::size_t find_year_month_separator(std::string_view text) noexcept +{ + // We're looking for a dash. PostgreSQL won't output a negative year, so + // no worries about a leading dash. We could start searching at offset 4, + // but starting at the beginning produces more helpful error messages for + // malformed years. + std::size_t here; + for (here = 0; here < std::size(text) and text[here] != '-'; ++here) + ; + return here; +} + + +/// Componse generic "invalid date" message for given (invalid) date text. +std::string make_parse_error(std::string_view text) +{ + return pqxx::internal::concat("Invalid date: '", text, "'."); +} +} // namespace + + +namespace pqxx +{ +char *string_traits::into_buf( + char *begin, char *end, std::chrono::year_month_day const &value) +{ + if (std::size_t(end - begin) < size_buffer(value)) + throw conversion_overrun{"Not enough room in buffer for date."}; + begin = year_into_buf(begin, end, value.year()); + *begin++ = '-'; + begin = month_into_buf(begin, value.month()); + *begin++ = '-'; + begin = day_into_buf(begin, value.day()); + if (int{value.year()} <= 0) + { + PQXX_UNLIKELY + begin += s_bc.copy(begin, std::size(s_bc)); + } + *begin++ = '\0'; + return begin; +} + + +std::chrono::year_month_day +string_traits::from_string(std::string_view text) +{ + // We can't just re-use the std::chrono::year conversions, because the "BC" + // suffix comes at the very end. + if (std::size(text) < 9) + throw conversion_error{make_parse_error(text)}; + bool const is_bc{text.ends_with(s_bc)}; + if (is_bc) + PQXX_UNLIKELY + text = text.substr(0, std::size(text) - std::size(s_bc)); + auto const ymsep{find_year_month_separator(text)}; + if ((std::size(text) - ymsep) != 6) + throw conversion_error{make_parse_error(text)}; + auto const base_year{ + year_from_buf(std::string_view{std::data(text), ymsep})}; + if (base_year == 0) + throw conversion_error{"Year zero conversion."}; + std::chrono::year const y{is_bc ? (-base_year + 1) : base_year}; + auto const m{month_from_string(text.substr(ymsep + 1, 2))}; + if (text[ymsep + 3] != '-') + throw conversion_error{make_parse_error(text)}; + auto const d{day_from_string(text.substr(ymsep + 4, 2))}; + std::chrono::year_month_day const date{y, m, d}; + if (not date.ok()) + throw conversion_error{make_parse_error(text)}; + return date; +} +} // namespace pqxx +#endif diff --git a/ext/libpqxx-7.7.3/src/transaction.cxx b/ext/libpqxx-7.7.3/src/transaction.cxx new file mode 100644 index 000000000..61ed29ef7 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/transaction.cxx @@ -0,0 +1,107 @@ +/** Implementation of the pqxx::transaction class. + * + * pqxx::transaction represents a regular database transaction. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/connection.hxx" +#include "pqxx/result.hxx" +#include "pqxx/transaction.hxx" + +#include "pqxx/internal/header-post.hxx" + + +pqxx::internal::basic_transaction::basic_transaction( + connection &c, zview begin_command, std::string_view tname) : + dbtransaction(c, tname) +{ + register_transaction(); + direct_exec(begin_command); +} + + +pqxx::internal::basic_transaction::basic_transaction( + connection &c, zview begin_command, std::string &&tname) : + dbtransaction(c, std::move(tname)) +{ + register_transaction(); + direct_exec(begin_command); +} + + +pqxx::internal::basic_transaction::basic_transaction( + connection &c, zview begin_command) : + dbtransaction(c) +{ + register_transaction(); + direct_exec(begin_command); +} + + +// This should stop the compiler from generating the same vtables and +// destructor in multiple translation units. More importantly, if we don't do +// this, the sanitisers in g++ 7 and clang++ 6 complain about pointers to +// dbtransaction actually pointing to basic_transaction. Which is odd, in that +// any basic_transaction pointer should also be a dbtransaction pointer. But, +// apparently the vtable isn't the right one. +pqxx::internal::basic_transaction::~basic_transaction() noexcept = default; + + +void pqxx::internal::basic_transaction::do_commit() +{ + static auto const commit_q{std::make_shared("COMMIT"sv)}; + try + { + direct_exec(commit_q); + } + catch (statement_completion_unknown const &e) + { + // Outcome of "commit" is unknown. This is a disaster: we don't know the + // resulting state of the database. + process_notice(internal::concat(e.what(), "\n")); + + std::string msg{internal::concat( + "WARNING: Commit of transaction '", name(), + "' is unknown. " + "There is no way to tell whether the transaction succeeded " + "or was aborted except to check manually.\n")}; + process_notice(msg); + // Strip newline. It was only needed for process_notice(). + msg.pop_back(); + throw in_doubt_error{std::move(msg)}; + } + catch (std::exception const &e) + { + if (not conn().is_open()) + { + // We've lost the connection while committing. There is just no way of + // telling what happened on the other end. >8-O + process_notice(internal::concat(e.what(), "\n")); + + auto msg{internal::concat( + "WARNING: Connection lost while committing transaction '", name(), + "'. There is no way to tell whether the transaction succeeded " + "or was aborted except to check manually.\n")}; + process_notice(msg); + // Strip newline. It was only needed for process_notice(). + msg.pop_back(); + throw in_doubt_error{std::move(msg)}; + } + else + { + // Commit failed--probably due to a constraint violation or something + // similar. + throw; + } + } +} diff --git a/ext/libpqxx-7.7.3/src/transaction_base.cxx b/ext/libpqxx-7.7.3/src/transaction_base.cxx new file mode 100644 index 000000000..c7d01ac08 --- /dev/null +++ b/ext/libpqxx-7.7.3/src/transaction_base.cxx @@ -0,0 +1,539 @@ +/** Common code and definitions for the transaction classes. + * + * pqxx::transaction_base defines the interface for any abstract class that + * represents a database transaction. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/connection.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/internal/encodings.hxx" +#include "pqxx/internal/gates/connection-transaction.hxx" +#include "pqxx/internal/gates/transaction-transaction_focus.hxx" +#include "pqxx/result.hxx" +#include "pqxx/transaction_base.hxx" +#include "pqxx/transaction_focus.hxx" + +#include "pqxx/internal/header-post.hxx" + + +using namespace std::literals; + +namespace +{ +/// Return a query pointer for the command "ROLLBACK". +/** Concentrates constructions so as to minimise the number of allocations. + * This way, the string gets allocated once and then all subsequent invocations + * copy shared_ptr instances to the same string. + */ +std::shared_ptr make_rollback_cmd() +{ + static auto const cmd{std::make_shared("ROLLBACK")}; + return cmd; +} +} // namespace + +pqxx::transaction_base::transaction_base(connection &c) : + m_conn{c}, m_rollback_cmd{make_rollback_cmd()} +{} + + +pqxx::transaction_base::transaction_base( + connection &c, std::string_view tname) : + m_conn{c}, m_name{tname}, m_rollback_cmd{make_rollback_cmd()} +{} + + +pqxx::transaction_base::~transaction_base() +{ + try + { + if (not std::empty(m_pending_error)) + PQXX_UNLIKELY + process_notice( + internal::concat("UNPROCESSED ERROR: ", m_pending_error, "\n")); + + if (m_registered) + { + m_conn.process_notice( + internal::concat(description(), " was never closed properly!\n")); + pqxx::internal::gate::connection_transaction{conn()} + .unregister_transaction(this); + } + } + catch (std::exception const &e) + { + try + { + process_notice(internal::concat(e.what(), "\n")); + } + catch (std::exception const &) + { + process_notice(e.what()); + } + } +} + + +void pqxx::transaction_base::register_transaction() +{ + pqxx::internal::gate::connection_transaction{conn()}.register_transaction( + this); + m_registered = true; +} + + +void pqxx::transaction_base::commit() +{ + check_pending_error(); + + // Check previous status code. Caller should only call this function if + // we're in "implicit" state, but multiple commits are silently accepted. + switch (m_status) + { + case status::active: // Just fine. This is what we expect. + break; + + case status::aborted: + throw usage_error{internal::concat( + "Attempt to commit previously aborted ", description())}; + + case status::committed: + // Transaction has been committed already. This is not exactly proper + // behaviour, but throwing an exception here would only give the impression + // that an abort is needed--which would only confuse things further at this + // stage. + // Therefore, multiple commits are accepted, though under protest. + m_conn.process_notice( + internal::concat(description(), " committed more than once.\n")); + return; + + case status::in_doubt: + // Transaction may or may not have been committed. The only thing we can + // really do is keep telling the caller that the transaction is in doubt. + throw in_doubt_error{internal::concat( + description(), " committed again while in an indeterminate state.")}; + + default: throw internal_error{"pqxx::transaction: invalid status code."}; + } + + // Tricky one. If stream is nested in transaction but inside the same scope, + // the commit() will come before the stream is closed. Which means the + // commit is premature. Punish this swiftly and without fail to discourage + // the habit from forming. + if (m_focus != nullptr) + throw failure{internal::concat( + "Attempt to commit ", description(), " with ", m_focus->description(), + " still open.")}; + + // Check that we're still connected (as far as we know--this is not an + // absolute thing!) before trying to commit. If the connection was broken + // already, the commit would fail anyway but this way at least we don't + // remain in-doubt as to whether the backend got the commit order at all. + if (not m_conn.is_open()) + throw broken_connection{ + "Broken connection to backend; cannot complete transaction."}; + + try + { + do_commit(); + m_status = status::committed; + } + catch (in_doubt_error const &) + { + m_status = status::in_doubt; + throw; + } + catch (std::exception const &) + { + m_status = status::aborted; + throw; + } + + close(); +} + + +void pqxx::transaction_base::do_abort() +{ + if (m_rollback_cmd) + direct_exec(m_rollback_cmd); +} + + +void pqxx::transaction_base::abort() +{ + // Check previous status code. Quietly accept multiple aborts to + // simplify emergency bailout code. + switch (m_status) + { + case status::active: + try + { + do_abort(); + } + catch (std::exception const &e) + { + m_conn.process_notice(internal::concat(e.what(), "\n")); + } + break; + + case status::aborted: return; + + case status::committed: + throw usage_error{internal::concat( + "Attempt to abort previously committed ", description())}; + + case status::in_doubt: + // Aborting an in-doubt transaction is probably a reasonably sane response + // to an insane situation. Log it, but do not fail. + m_conn.process_notice(internal::concat( + "Warning: ", description(), + " aborted after going into indeterminate state; " + "it may have been executed anyway.\n")); + return; + + default: throw internal_error{"Invalid transaction status."}; + } + + m_status = status::aborted; + close(); +} + + +std::string PQXX_COLD pqxx::transaction_base::quote_raw(zview bin) const +{ + return conn().quote(binary_cast(bin)); +} + + +namespace +{ +/// Guard command execution against clashes with pipelines and such. +/** A transaction can have only one focus at a time. Command execution is the + * most basic example of a transaction focus. + */ +class PQXX_PRIVATE command : pqxx::transaction_focus +{ +public: + command(pqxx::transaction_base &tx, std::string_view oname) : + transaction_focus{tx, "command"sv, oname} + { + register_me(); + } + + ~command() { unregister_me(); } +}; +} // namespace + +pqxx::result +pqxx::transaction_base::exec(std::string_view query, std::string_view desc) +{ + check_pending_error(); + + command cmd{*this, desc}; + + switch (m_status) + { + case status::active: break; + + case status::committed: + case status::aborted: + case status::in_doubt: { + std::string const n{ + std::empty(desc) ? "" : internal::concat("'", desc, "' ")}; + + throw usage_error{internal::concat( + "Could not execute command ", n, ": transaction is already closed.")}; + } + + default: throw internal_error{"pqxx::transaction: invalid status code."}; + } + + return direct_exec(query, desc); +} + + +pqxx::result pqxx::transaction_base::exec_n( + result::size_type rows, zview query, std::string_view desc) +{ +#include "pqxx/internal/ignore-deprecated-pre.hxx" + result const r{exec(query, desc)}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + if (std::size(r) != rows) + { + std::string const N{ + std::empty(desc) ? "" : internal::concat("'", desc, "'")}; + throw unexpected_rows{internal::concat( + "Expected ", rows, " row(s) of data from query ", N, ", got ", + std::size(r), ".")}; + } + return r; +} + + +void pqxx::transaction_base::check_rowcount_prepared( + zview statement, result::size_type expected_rows, + result::size_type actual_rows) +{ + if (actual_rows != expected_rows) + throw unexpected_rows{internal::concat( + "Expected ", expected_rows, " row(s) of data from prepared statement '", + statement, "', got ", actual_rows, ".")}; +} + + +void pqxx::transaction_base::check_rowcount_params( + std::size_t expected_rows, std::size_t actual_rows) +{ + if (actual_rows != expected_rows) + throw unexpected_rows{internal::concat( + "Expected ", expected_rows, + " row(s) of data from parameterised query, got ", actual_rows, ".")}; +} + + +pqxx::result pqxx::transaction_base::internal_exec_prepared( + zview statement, internal::c_params const &args) +{ + command cmd{*this, statement}; + return pqxx::internal::gate::connection_transaction{conn()}.exec_prepared( + statement, args); +} + + +pqxx::result pqxx::transaction_base::internal_exec_params( + zview query, internal::c_params const &args) +{ + command cmd{*this, query}; + return pqxx::internal::gate::connection_transaction{conn()}.exec_params( + query, args); +} + + +void pqxx::transaction_base::set_variable( + std::string_view var, std::string_view value) +{ +#include "pqxx/internal/ignore-deprecated-pre.hxx" + conn().set_variable(var, value); +#include "pqxx/internal/ignore-deprecated-post.hxx" +} + + +std::string pqxx::transaction_base::get_variable(std::string_view var) +{ +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return conn().get_variable(var); +#include "pqxx/internal/ignore-deprecated-post.hxx" +} + + +void pqxx::transaction_base::close() noexcept +{ + try + { + try + { + check_pending_error(); + } + catch (std::exception const &e) + { + m_conn.process_notice(e.what()); + } + + if (m_registered) + { + m_registered = false; + pqxx::internal::gate::connection_transaction{conn()} + .unregister_transaction(this); + } + + if (m_status != status::active) + return; + + if (m_focus != nullptr) + PQXX_UNLIKELY + m_conn.process_notice(internal::concat( + "Closing ", description(), " with ", m_focus->description(), + " still open.\n")); + + try + { + abort(); + } + catch (std::exception const &e) + { + m_conn.process_notice(e.what()); + } + } + catch (std::exception const &e) + { + try + { + m_conn.process_notice(e.what()); + } + catch (std::exception const &) + {} + } +} + + +namespace +{ +[[nodiscard]] std::string_view +get_classname(pqxx::transaction_focus const *focus) +{ + return (focus == nullptr) ? ""sv : focus->classname(); +} + + +[[nodiscard]] std::string_view +get_obj_name(pqxx::transaction_focus const *focus) +{ + return (focus == nullptr) ? ""sv : focus->name(); +} +} // namespace + + +void pqxx::transaction_base::register_focus(transaction_focus *new_focus) +{ + internal::check_unique_register( + m_focus, get_classname(m_focus), get_obj_name(m_focus), new_focus, + get_classname(new_focus), get_obj_name(new_focus)); + m_focus = new_focus; +} + + +void pqxx::transaction_base::unregister_focus( + transaction_focus *new_focus) noexcept +{ + try + { + pqxx::internal::check_unique_unregister( + m_focus, get_classname(m_focus), get_obj_name(m_focus), new_focus, + get_classname(new_focus), get_obj_name(new_focus)); + m_focus = nullptr; + } + catch (std::exception const &e) + { + m_conn.process_notice(internal::concat(e.what(), "\n")); + } +} + + +pqxx::result pqxx::transaction_base::direct_exec( + std::string_view cmd, std::string_view desc) +{ + check_pending_error(); + return pqxx::internal::gate::connection_transaction{conn()}.exec(cmd, desc); +} + + +pqxx::result pqxx::transaction_base::direct_exec( + std::shared_ptr cmd, std::string_view desc) +{ + check_pending_error(); + return pqxx::internal::gate::connection_transaction{conn()}.exec(cmd, desc); +} + + +void pqxx::transaction_base::register_pending_error(zview err) noexcept +{ + if (std::empty(m_pending_error) and not std::empty(err)) + { + try + { + m_pending_error = err; + } + catch (std::exception const &e) + { + try + { + PQXX_UNLIKELY + process_notice("UNABLE TO PROCESS ERROR\n"); + process_notice(e.what()); + process_notice("ERROR WAS:"); + process_notice(err); + } + catch (...) + {} + } + } +} + + +void pqxx::transaction_base::register_pending_error(std::string &&err) noexcept +{ + if (std::empty(m_pending_error) and not std::empty(err)) + { + try + { + m_pending_error = std::move(err); + } + catch (std::exception const &e) + { + try + { + PQXX_UNLIKELY + process_notice("UNABLE TO PROCESS ERROR\n"); + process_notice(e.what()); + process_notice("ERROR WAS:"); + process_notice(err); + } + catch (...) + {} + } + } +} + + +void pqxx::transaction_base::check_pending_error() +{ + if (not std::empty(m_pending_error)) + { + std::string err; + err.swap(m_pending_error); + throw failure{err}; + } +} + + +std::string pqxx::transaction_base::description() const +{ + return internal::describe_object("transaction", name()); +} + + +void pqxx::transaction_focus::register_me() +{ + pqxx::internal::gate::transaction_transaction_focus{m_trans}.register_focus( + this); + m_registered = true; +} + + +void pqxx::transaction_focus::unregister_me() noexcept +{ + pqxx::internal::gate::transaction_transaction_focus{m_trans} + .unregister_focus(this); + m_registered = false; +} + + +void pqxx::transaction_focus::reg_pending_error( + std::string const &err) noexcept +{ + pqxx::internal::gate::transaction_transaction_focus{m_trans} + .register_pending_error(err); +} diff --git a/ext/libpqxx-7.7.3/src/util.cxx b/ext/libpqxx-7.7.3/src/util.cxx new file mode 100644 index 000000000..fe859bd2a --- /dev/null +++ b/ext/libpqxx-7.7.3/src/util.cxx @@ -0,0 +1,194 @@ +/** Various utility functions. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include +#include +#include +#include +#include + +extern "C" +{ +#include +} + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/except.hxx" +#include "pqxx/internal/concat.hxx" +#include "pqxx/util.hxx" + +#include "pqxx/internal/header-post.hxx" + + +using namespace std::literals; + +pqxx::thread_safety_model PQXX_COLD pqxx::describe_thread_safety() +{ + thread_safety_model model; + model.safe_libpq = (PQisthreadsafe() != 0); + // Sadly I'm not aware of any way to avoid this just yet. + model.safe_kerberos = false; + + model.description = internal::concat( + (model.safe_libpq ? ""sv : + "Using a libpq build that is not thread-safe.\n"sv), + (model.safe_kerberos ? + ""sv : + "Kerberos is not thread-safe. If your application uses Kerberos, " + "protect all calls to Kerberos or libpqxx using a global lock.\n"sv)); + return model; +} + + +std::string pqxx::internal::describe_object( + std::string_view class_name, std::string_view obj_name) +{ + if (std::empty(obj_name)) + return std::string{class_name}; + else + return pqxx::internal::concat(class_name, " '", obj_name, "'"); +} + + +void pqxx::internal::check_unique_register( + void const *old_guest, std::string_view old_class, std::string_view old_name, + void const *new_guest, std::string_view new_class, std::string_view new_name) +{ + if (new_guest == nullptr) + throw internal_error{"Null pointer registered."}; + + if (old_guest != nullptr) + throw usage_error{ + (old_guest == new_guest) ? + concat("Started twice: ", describe_object(old_class, old_name), ".") : + concat( + "Started new ", describe_object(new_class, new_name), " while ", + describe_object(new_class, new_name), " was still active.")}; +} + + +void pqxx::internal::check_unique_unregister( + void const *old_guest, std::string_view old_class, std::string_view old_name, + void const *new_guest, std::string_view new_class, std::string_view new_name) +{ + if (new_guest != old_guest) + { + PQXX_UNLIKELY + if (new_guest == nullptr) + throw usage_error{concat( + "Expected to close ", describe_object(old_class, old_name), + ", but got null pointer instead.")}; + if (old_guest == nullptr) + throw usage_error{concat( + "Closed while not open: ", describe_object(new_class, new_name))}; + else + throw usage_error{concat( + "Closed ", describe_object(new_class, new_name), + "; expected to close ", describe_object(old_class, old_name))}; + } +} + + +namespace +{ +constexpr char hex_digits[] = {'0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; + + +/// Translate a number (must be between 0 and 16 exclusive) to a hex digit. +constexpr char hex_digit(int c) noexcept +{ + return hex_digits[c]; +} + + +/// Translate a hex digit to a nibble. Return -1 if it's not a valid digit. +constexpr int nibble(int c) noexcept +{ + if (c >= '0' and c <= '9') + PQXX_LIKELY + return c - '0'; + else if (c >= 'a' and c <= 'f') return 10 + (c - 'a'); + else if (c >= 'A' and c <= 'F') return 10 + (c - 'A'); + else return -1; +} +} // namespace + + +void pqxx::internal::esc_bin( + std::basic_string_view binary_data, char buffer[]) noexcept +{ + auto here{buffer}; + *here++ = '\\'; + *here++ = 'x'; + + for (auto const byte : binary_data) + { + auto uc{static_cast(byte)}; + *here++ = hex_digit(uc >> 4); + *here++ = hex_digit(uc & 0x0f); + } + + // (No need to increment further. Facebook's "infer" complains if we do.) + *here = '\0'; +} + + +std::string +pqxx::internal::esc_bin(std::basic_string_view binary_data) +{ + auto const bytes{size_esc_bin(std::size(binary_data))}; + std::string buf; + buf.resize(bytes); + esc_bin(binary_data, buf.data()); + // Strip off the trailing zero. + buf.resize(bytes - 1); + return buf; +} + + +void pqxx::internal::unesc_bin( + std::string_view escaped_data, std::byte buffer[]) +{ + auto const in_size{std::size(escaped_data)}; + if (in_size < 2) + throw pqxx::failure{"Binary data appears truncated."}; + if ((in_size % 2) != 0) + throw pqxx::failure{"Invalid escaped binary length."}; + char const *in{escaped_data.data()}; + char const *const end{in + in_size}; + if (*in++ != '\\' or *in++ != 'x') + throw pqxx::failure( + "Escaped binary data did not start with '\\x'`. Is the server or libpq " + "too old?"); + auto out{buffer}; + while (in != end) + { + int hi{nibble(*in++)}; + if (hi < 0) + throw pqxx::failure{"Invalid hex-escaped data."}; + int lo{nibble(*in++)}; + if (lo < 0) + throw pqxx::failure{"Invalid hex-escaped data."}; + *out++ = static_cast((hi << 4) | lo); + } +} + + +std::basic_string +pqxx::internal::unesc_bin(std::string_view escaped_data) +{ + auto const bytes{size_unesc_bin(std::size(escaped_data))}; + std::basic_string buf; + buf.resize(bytes); + unesc_bin(escaped_data, buf.data()); + return buf; +} diff --git a/ext/libpqxx-7.7.3/src/version.cxx b/ext/libpqxx-7.7.3/src/version.cxx new file mode 100644 index 000000000..e216ecd4f --- /dev/null +++ b/ext/libpqxx-7.7.3/src/version.cxx @@ -0,0 +1,27 @@ +/** Version check. + * + * Copyright (c) 2000-2022, Jeroen T. Vermeulen. + * + * See COPYING for copyright license. If you did not receive a file called + * COPYING with this source code, please notify the distributor of this + * mistake, or contact the author. + */ +#include "pqxx-source.hxx" + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/version.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace pqxx::internal +{ +// One, single definition of this function. If a call fails to link, and +// (some) other calls do link, then the libpqxx binary was built against a +// different libpqxx version than the code which is being linked against it. +PQXX_LIBEXPORT int PQXX_VERSION_CHECK() noexcept +{ + return 0; +} +} // namespace pqxx::internal diff --git a/ext/libpqxx-7.7.3/src/wait.cxx b/ext/libpqxx-7.7.3/src/wait.cxx new file mode 100644 index 000000000..276d9659e --- /dev/null +++ b/ext/libpqxx-7.7.3/src/wait.cxx @@ -0,0 +1,136 @@ +/** Functions that wait. + */ +#include "pqxx-source.hxx" + +// The header is still broken on MinGW. :-( +#if defined(PQXX_HAVE_SLEEP_FOR) +# include +#endif + +// For WSAPoll(): +#if __has_include() +# include +# define PQXX_HAVE_SELECT +#endif +#if __has_include() +# include +#endif +#if __has_include() +# include +#endif + +// For poll(): +#if __has_include() +# include +#endif + +// For select() on recent POSIX systems. +#if __has_include() +# include +# define PQXX_HAVE_SELECT +#endif + +// For select() on some older POSIX systems. +#if __has_include() +# include +# define PQXX_HAVE_SELECT +#endif +#if __has_include() +# include +#endif +#if __has_include() +# include +#endif + + +#include "pqxx/internal/header-pre.hxx" + +#include "pqxx/internal/wait.hxx" +#include "pqxx/util.hxx" + +#include "pqxx/internal/header-post.hxx" + + +namespace +{ +template T to_milli(unsigned seconds, unsigned microseconds) +{ + return pqxx::check_cast( + (seconds * 1000) + (microseconds / 1000), + "Wait timeout value out of bounds."); +} + + +#if defined(PQXX_HAVE_SELECT) +/// Set a bit on an fd_set. +[[maybe_unused]] void set_fdbit(fd_set &bits, int fd) +{ +# ifdef _MSC_VER +// Suppress pointless, unfixable warnings in Visual Studio. +# pragma warning(push) +# pragma warning(disable : 4389) // Signed/unsigned mismatch. +# pragma warning(disable : 4127) // Conditional expression is constant. +# endif + FD_SET(fd, &bits); +# ifdef _MSV_VER +// Restore prevalent warning settings. +# pragma warning(pop) +# endif +} +#endif +} // namespace + + +void pqxx::internal::wait_fd( + int fd, bool for_read, bool for_write, unsigned seconds, + unsigned microseconds) +{ +// WSAPoll is available in winsock2.h only for versions of Windows >= 0x0600 +#if defined(_WIN32) && (_WIN32_WINNT >= 0x0600) + short const events{static_cast( + (for_read ? POLLRDNORM : 0) | (for_write ? POLLWRNORM : 0))}; + WSAPOLLFD fdarray{SOCKET(fd), events, 0}; + WSAPoll(&fdarray, 1u, to_milli(seconds, microseconds)); + // TODO: Check for errors. +#elif defined(PQXX_HAVE_POLL) + auto const events{static_cast( + POLLERR | POLLHUP | POLLNVAL | (for_read ? POLLIN : 0) | + (for_write ? POLLOUT : 0))}; + pollfd pfd{fd, events, 0}; + poll(&pfd, 1, to_milli(seconds, microseconds)); + // TODO: Check for errors. +#else + // No poll()? Our last option is select(). + fd_set read_fds; + FD_ZERO(&read_fds); + if (for_read) + set_fdbit(read_fds, fd); + + fd_set write_fds; + FD_ZERO(&write_fds); + if (for_write) + set_fdbit(write_fds, fd); + + fd_set except_fds; + FD_ZERO(&except_fds); + set_fdbit(except_fds, fd); + + timeval tv = {seconds, microseconds}; + select(fd + 1, &read_fds, &write_fds, &except_fds, &tv); + // TODO: Check for errors. +#endif +} + + +void PQXX_COLD pqxx::internal::wait_for(unsigned int microseconds) +{ +#if defined(PQXX_HAVE_SLEEP_FOR) + std::this_thread::sleep_for(std::chrono::microseconds{microseconds}); +#else + // MinGW still does not have a functioning header. Work around this + // using select(). + // Not worth optimising for though -- they'll have to fix it at some point. + timeval tv{microseconds / 1'000'000u, microseconds % 1'000'000u}; + select(0, nullptr, nullptr, nullptr, &tv); +#endif +} diff --git a/ext/libpqxx-7.7.3/test/CMakeLists.txt b/ext/libpqxx-7.7.3/test/CMakeLists.txt new file mode 100644 index 000000000..331631145 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/CMakeLists.txt @@ -0,0 +1,24 @@ +enable_testing() + +if(NOT PostgreSQL_FOUND) + find_package(PostgreSQL REQUIRED) +endif() + +file(GLOB TEST_SOURCES test*.cxx unit/test_*.cxx runner.cxx) + +add_executable(runner ${TEST_SOURCES}) +target_link_libraries(runner PUBLIC pqxx) +target_include_directories(runner PRIVATE ${PostgreSQL_INCLUDE_DIRS}) +add_test( + NAME runner + WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + COMMAND runner +) + +if(INSTALL_TEST) + install( + PROGRAMS runner + TYPE BIN + RENAME libpqxx-test-runner + ) +endif() diff --git a/ext/libpqxx-7.7.3/test/Makefile.am b/ext/libpqxx-7.7.3/test/Makefile.am new file mode 100644 index 000000000..e3ce4be7b --- /dev/null +++ b/ext/libpqxx-7.7.3/test/Makefile.am @@ -0,0 +1,129 @@ +# ############################################################################## +# AUTOMATICALLY GENERATED FILE -- DO NOT EDIT. +# +# This file is generated automatically by libpqxx's template2mak.py script, and +# will be rewritten from time to time. +# +# If you modify this file, chances are your modifications will be lost. +# +# The template2mak.py script should be available in the tools directory of the +# libpqxx source archive. +# +# Generated from template './test/Makefile.am.template'. +# ############################################################################## +# Makefile.am is generated automatically for automake whenever test programs are +# added to libpqxx. + +EXTRA_DIST = Makefile.am.template + +# Use the serial test runner, so tests don't get run in parallel. Otherwise, +# output from test failures will be hidden away in log files where we can't +# see them when running in a build slave. +AUTOMAKE_OPTIONS=serial-tests + + +AM_CPPFLAGS=-I$(top_builddir)/include -I$(top_srcdir)/include + +# Override automatically generated list of default includes. It contains only +# unnecessary entries, and incorrectly mentions include/pqxx directly. +DEFAULT_INCLUDES= + +noinst_HEADERS = test_helpers.hxx + +CLEANFILES=pqxxlo.txt +MAINTAINERCLEANFILES=Makefile.in + +#TESTS_ENVIRONMENT=PGDATABASE=libpqxx +# PGDATABASE, PGHOST, PGPORT, PGUSER + +runner_SOURCES = \ + test00.cxx \ + test01.cxx \ + test02.cxx \ + test04.cxx \ + test07.cxx \ + test10.cxx \ + test11.cxx \ + test13.cxx \ + test14.cxx \ + test16.cxx \ + test17.cxx \ + test18.cxx \ + test20.cxx \ + test21.cxx \ + test26.cxx \ + test29.cxx \ + test30.cxx \ + test32.cxx \ + test37.cxx \ + test39.cxx \ + test46.cxx \ + test56.cxx \ + test60.cxx \ + test61.cxx \ + test62.cxx \ + test69.cxx \ + test70.cxx \ + test71.cxx \ + test72.cxx \ + test74.cxx \ + test75.cxx \ + test76.cxx \ + test77.cxx \ + test78.cxx \ + test79.cxx \ + test82.cxx \ + test84.cxx \ + test87.cxx \ + test88.cxx \ + test89.cxx \ + test90.cxx \ + unit/test_array.cxx \ + unit/test_binarystring.cxx \ + unit/test_blob.cxx \ + unit/test_cancel_query.cxx \ + unit/test_column.cxx \ + unit/test_composite.cxx \ + unit/test_connection.cxx \ + unit/test_cursor.cxx \ + unit/test_encodings.cxx \ + unit/test_error_verbosity.cxx \ + unit/test_errorhandler.cxx \ + unit/test_escape.cxx \ + unit/test_exceptions.cxx \ + unit/test_field.cxx \ + unit/test_float.cxx \ + unit/test_largeobject.cxx \ + unit/test_nonblocking_connect.cxx \ + unit/test_notification.cxx \ + unit/test_pipeline.cxx \ + unit/test_prepared_statement.cxx \ + unit/test_range.cxx \ + unit/test_read_transaction.cxx \ + unit/test_result_iteration.cxx \ + unit/test_result_slicing.cxx \ + unit/test_row.cxx \ + unit/test_separated_list.cxx \ + unit/test_simultaneous_transactions.cxx \ + unit/test_sql_cursor.cxx \ + unit/test_stateless_cursor.cxx \ + unit/test_strconv.cxx \ + unit/test_stream_from.cxx \ + unit/test_stream_to.cxx \ + unit/test_string_conversion.cxx \ + unit/test_subtransaction.cxx \ + unit/test_test_helpers.cxx \ + unit/test_thread_safety_model.cxx \ + unit/test_time.cxx \ + unit/test_transaction.cxx \ + unit/test_transaction_base.cxx \ + unit/test_transaction_focus.cxx \ + unit/test_transactor.cxx \ + unit/test_type_name.cxx \ + unit/test_zview.cxx \ + runner.cxx + +runner_LDADD = $(top_builddir)/src/libpqxx.la ${POSTGRES_LIB} + +TESTS = runner +check_PROGRAMS = ${TESTS} diff --git a/ext/libpqxx-7.7.3/test/Makefile.am.template b/ext/libpqxx-7.7.3/test/Makefile.am.template new file mode 100644 index 000000000..2a21f6ef7 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/Makefile.am.template @@ -0,0 +1,38 @@ +# Makefile.am is generated automatically for automake whenever test programs are +# added to libpqxx. + +EXTRA_DIST = Makefile.am.template + +# Use the serial test runner, so tests don't get run in parallel. Otherwise, +# output from test failures will be hidden away in log files where we can't +# see them when running in a build slave. +AUTOMAKE_OPTIONS=serial-tests + + +AM_CPPFLAGS=-I$(top_builddir)/include -I$(top_srcdir)/include + +# Override automatically generated list of default includes. It contains only +# unnecessary entries, and incorrectly mentions include/pqxx directly. +DEFAULT_INCLUDES= + +noinst_HEADERS = test_helpers.hxx + +CLEANFILES=pqxxlo.txt +MAINTAINERCLEANFILES=Makefile.in + +#TESTS_ENVIRONMENT=PGDATABASE=libpqxx +# PGDATABASE, PGHOST, PGPORT, PGUSER + +runner_SOURCES = \ +###MAKTEMPLATE:FOREACH test/test*.cxx + ###BASENAME###.cxx \ +###MAKTEMPLATE:ENDFOREACH +###MAKTEMPLATE:FOREACH test/unit/test_*.cxx + unit/###BASENAME###.cxx \ +###MAKTEMPLATE:ENDFOREACH + runner.cxx + +runner_LDADD = $(top_builddir)/src/libpqxx.la ${POSTGRES_LIB} + +TESTS = runner +check_PROGRAMS = ${TESTS} diff --git a/ext/libpqxx-7.7.3/test/Makefile.in b/ext/libpqxx-7.7.3/test/Makefile.in new file mode 100644 index 000000000..8ffcca06c --- /dev/null +++ b/ext/libpqxx-7.7.3/test/Makefile.in @@ -0,0 +1,1302 @@ +# Makefile.in generated by automake 1.16.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2021 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# ############################################################################## +# AUTOMATICALLY GENERATED FILE -- DO NOT EDIT. +# +# This file is generated automatically by libpqxx's template2mak.py script, and +# will be rewritten from time to time. +# +# If you modify this file, chances are your modifications will be lost. +# +# The template2mak.py script should be available in the tools directory of the +# libpqxx source archive. +# +# Generated from template './test/Makefile.am.template'. +# ############################################################################## +# Makefile.am is generated automatically for automake whenever test programs are +# added to libpqxx. + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +TESTS = runner$(EXEEXT) +check_PROGRAMS = $(am__EXEEXT_1) +subdir = test +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/config/m4/libtool.m4 \ + $(top_srcdir)/config/m4/ltoptions.m4 \ + $(top_srcdir)/config/m4/ltsugar.m4 \ + $(top_srcdir)/config/m4/ltversion.m4 \ + $(top_srcdir)/config/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(noinst_HEADERS) \ + $(am__DIST_COMMON) +mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs +CONFIG_HEADER = $(top_builddir)/include/pqxx/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +am__EXEEXT_1 = runner$(EXEEXT) +am__dirstamp = $(am__leading_dot)dirstamp +am_runner_OBJECTS = test00.$(OBJEXT) test01.$(OBJEXT) test02.$(OBJEXT) \ + test04.$(OBJEXT) test07.$(OBJEXT) test10.$(OBJEXT) \ + test11.$(OBJEXT) test13.$(OBJEXT) test14.$(OBJEXT) \ + test16.$(OBJEXT) test17.$(OBJEXT) test18.$(OBJEXT) \ + test20.$(OBJEXT) test21.$(OBJEXT) test26.$(OBJEXT) \ + test29.$(OBJEXT) test30.$(OBJEXT) test32.$(OBJEXT) \ + test37.$(OBJEXT) test39.$(OBJEXT) test46.$(OBJEXT) \ + test56.$(OBJEXT) test60.$(OBJEXT) test61.$(OBJEXT) \ + test62.$(OBJEXT) test69.$(OBJEXT) test70.$(OBJEXT) \ + test71.$(OBJEXT) test72.$(OBJEXT) test74.$(OBJEXT) \ + test75.$(OBJEXT) test76.$(OBJEXT) test77.$(OBJEXT) \ + test78.$(OBJEXT) test79.$(OBJEXT) test82.$(OBJEXT) \ + test84.$(OBJEXT) test87.$(OBJEXT) test88.$(OBJEXT) \ + test89.$(OBJEXT) test90.$(OBJEXT) unit/test_array.$(OBJEXT) \ + unit/test_binarystring.$(OBJEXT) unit/test_blob.$(OBJEXT) \ + unit/test_cancel_query.$(OBJEXT) unit/test_column.$(OBJEXT) \ + unit/test_composite.$(OBJEXT) unit/test_connection.$(OBJEXT) \ + unit/test_cursor.$(OBJEXT) unit/test_encodings.$(OBJEXT) \ + unit/test_error_verbosity.$(OBJEXT) \ + unit/test_errorhandler.$(OBJEXT) unit/test_escape.$(OBJEXT) \ + unit/test_exceptions.$(OBJEXT) unit/test_field.$(OBJEXT) \ + unit/test_float.$(OBJEXT) unit/test_largeobject.$(OBJEXT) \ + unit/test_nonblocking_connect.$(OBJEXT) \ + unit/test_notification.$(OBJEXT) unit/test_pipeline.$(OBJEXT) \ + unit/test_prepared_statement.$(OBJEXT) \ + unit/test_range.$(OBJEXT) unit/test_read_transaction.$(OBJEXT) \ + unit/test_result_iteration.$(OBJEXT) \ + unit/test_result_slicing.$(OBJEXT) unit/test_row.$(OBJEXT) \ + unit/test_separated_list.$(OBJEXT) \ + unit/test_simultaneous_transactions.$(OBJEXT) \ + unit/test_sql_cursor.$(OBJEXT) \ + unit/test_stateless_cursor.$(OBJEXT) \ + unit/test_strconv.$(OBJEXT) unit/test_stream_from.$(OBJEXT) \ + unit/test_stream_to.$(OBJEXT) \ + unit/test_string_conversion.$(OBJEXT) \ + unit/test_subtransaction.$(OBJEXT) \ + unit/test_test_helpers.$(OBJEXT) \ + unit/test_thread_safety_model.$(OBJEXT) \ + unit/test_time.$(OBJEXT) unit/test_transaction.$(OBJEXT) \ + unit/test_transaction_base.$(OBJEXT) \ + unit/test_transaction_focus.$(OBJEXT) \ + unit/test_transactor.$(OBJEXT) unit/test_type_name.$(OBJEXT) \ + unit/test_zview.$(OBJEXT) runner.$(OBJEXT) +runner_OBJECTS = $(am_runner_OBJECTS) +runner_DEPENDENCIES = $(top_builddir)/src/libpqxx.la +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am__v_lt_1 = +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +depcomp = $(SHELL) $(top_srcdir)/config/depcomp +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = ./$(DEPDIR)/runner.Po ./$(DEPDIR)/test00.Po \ + ./$(DEPDIR)/test01.Po ./$(DEPDIR)/test02.Po \ + ./$(DEPDIR)/test04.Po ./$(DEPDIR)/test07.Po \ + ./$(DEPDIR)/test10.Po ./$(DEPDIR)/test11.Po \ + ./$(DEPDIR)/test13.Po ./$(DEPDIR)/test14.Po \ + ./$(DEPDIR)/test16.Po ./$(DEPDIR)/test17.Po \ + ./$(DEPDIR)/test18.Po ./$(DEPDIR)/test20.Po \ + ./$(DEPDIR)/test21.Po ./$(DEPDIR)/test26.Po \ + ./$(DEPDIR)/test29.Po ./$(DEPDIR)/test30.Po \ + ./$(DEPDIR)/test32.Po ./$(DEPDIR)/test37.Po \ + ./$(DEPDIR)/test39.Po ./$(DEPDIR)/test46.Po \ + ./$(DEPDIR)/test56.Po ./$(DEPDIR)/test60.Po \ + ./$(DEPDIR)/test61.Po ./$(DEPDIR)/test62.Po \ + ./$(DEPDIR)/test69.Po ./$(DEPDIR)/test70.Po \ + ./$(DEPDIR)/test71.Po ./$(DEPDIR)/test72.Po \ + ./$(DEPDIR)/test74.Po ./$(DEPDIR)/test75.Po \ + ./$(DEPDIR)/test76.Po ./$(DEPDIR)/test77.Po \ + ./$(DEPDIR)/test78.Po ./$(DEPDIR)/test79.Po \ + ./$(DEPDIR)/test82.Po ./$(DEPDIR)/test84.Po \ + ./$(DEPDIR)/test87.Po ./$(DEPDIR)/test88.Po \ + ./$(DEPDIR)/test89.Po ./$(DEPDIR)/test90.Po \ + unit/$(DEPDIR)/test_array.Po \ + unit/$(DEPDIR)/test_binarystring.Po \ + unit/$(DEPDIR)/test_blob.Po \ + unit/$(DEPDIR)/test_cancel_query.Po \ + unit/$(DEPDIR)/test_column.Po unit/$(DEPDIR)/test_composite.Po \ + unit/$(DEPDIR)/test_connection.Po \ + unit/$(DEPDIR)/test_cursor.Po unit/$(DEPDIR)/test_encodings.Po \ + unit/$(DEPDIR)/test_error_verbosity.Po \ + unit/$(DEPDIR)/test_errorhandler.Po \ + unit/$(DEPDIR)/test_escape.Po \ + unit/$(DEPDIR)/test_exceptions.Po unit/$(DEPDIR)/test_field.Po \ + unit/$(DEPDIR)/test_float.Po \ + unit/$(DEPDIR)/test_largeobject.Po \ + unit/$(DEPDIR)/test_nonblocking_connect.Po \ + unit/$(DEPDIR)/test_notification.Po \ + unit/$(DEPDIR)/test_pipeline.Po \ + unit/$(DEPDIR)/test_prepared_statement.Po \ + unit/$(DEPDIR)/test_range.Po \ + unit/$(DEPDIR)/test_read_transaction.Po \ + unit/$(DEPDIR)/test_result_iteration.Po \ + unit/$(DEPDIR)/test_result_slicing.Po \ + unit/$(DEPDIR)/test_row.Po \ + unit/$(DEPDIR)/test_separated_list.Po \ + unit/$(DEPDIR)/test_simultaneous_transactions.Po \ + unit/$(DEPDIR)/test_sql_cursor.Po \ + unit/$(DEPDIR)/test_stateless_cursor.Po \ + unit/$(DEPDIR)/test_strconv.Po \ + unit/$(DEPDIR)/test_stream_from.Po \ + unit/$(DEPDIR)/test_stream_to.Po \ + unit/$(DEPDIR)/test_string_conversion.Po \ + unit/$(DEPDIR)/test_subtransaction.Po \ + unit/$(DEPDIR)/test_test_helpers.Po \ + unit/$(DEPDIR)/test_thread_safety_model.Po \ + unit/$(DEPDIR)/test_time.Po unit/$(DEPDIR)/test_transaction.Po \ + unit/$(DEPDIR)/test_transaction_base.Po \ + unit/$(DEPDIR)/test_transaction_focus.Po \ + unit/$(DEPDIR)/test_transactor.Po \ + unit/$(DEPDIR)/test_type_name.Po unit/$(DEPDIR)/test_zview.Po +am__mv = mv -f +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CXXFLAGS) $(CXXFLAGS) +AM_V_CXX = $(am__v_CXX_@AM_V@) +am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) +am__v_CXX_0 = @echo " CXX " $@; +am__v_CXX_1 = +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) +am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) +am__v_CXXLD_0 = @echo " CXXLD " $@; +am__v_CXXLD_1 = +SOURCES = $(runner_SOURCES) +DIST_SOURCES = $(runner_SOURCES) +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +HEADERS = $(noinst_HEADERS) +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +am__tty_colors_dummy = \ + mgn= red= grn= lgn= blu= brg= std=; \ + am__color_tests=no +am__tty_colors = { \ + $(am__tty_colors_dummy); \ + if test "X$(AM_COLOR_TESTS)" = Xno; then \ + am__color_tests=no; \ + elif test "X$(AM_COLOR_TESTS)" = Xalways; then \ + am__color_tests=yes; \ + elif test "X$$TERM" != Xdumb && { test -t 1; } 2>/dev/null; then \ + am__color_tests=yes; \ + fi; \ + if test $$am__color_tests = yes; then \ + red=''; \ + grn=''; \ + lgn=''; \ + blu=''; \ + mgn=''; \ + brg=''; \ + std=''; \ + fi; \ +} +am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/config/depcomp \ + $(top_srcdir)/config/mkinstalldirs +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ETAGS = @ETAGS@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_DOT = @HAVE_DOT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR = @MKDIR@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PG_CONFIG = @PG_CONFIG@ +PKG_CONFIG = @PKG_CONFIG@ +POSTGRES_INCLUDE = @POSTGRES_INCLUDE@ +PQXXVERSION = @PQXXVERSION@ +PQXX_ABI = @PQXX_ABI@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +with_postgres_lib = @with_postgres_lib@ +EXTRA_DIST = Makefile.am.template + +# Use the serial test runner, so tests don't get run in parallel. Otherwise, +# output from test failures will be hidden away in log files where we can't +# see them when running in a build slave. +AUTOMAKE_OPTIONS = serial-tests +AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include + +# Override automatically generated list of default includes. It contains only +# unnecessary entries, and incorrectly mentions include/pqxx directly. +DEFAULT_INCLUDES = +noinst_HEADERS = test_helpers.hxx +CLEANFILES = pqxxlo.txt +MAINTAINERCLEANFILES = Makefile.in + +#TESTS_ENVIRONMENT=PGDATABASE=libpqxx +# PGDATABASE, PGHOST, PGPORT, PGUSER +runner_SOURCES = \ + test00.cxx \ + test01.cxx \ + test02.cxx \ + test04.cxx \ + test07.cxx \ + test10.cxx \ + test11.cxx \ + test13.cxx \ + test14.cxx \ + test16.cxx \ + test17.cxx \ + test18.cxx \ + test20.cxx \ + test21.cxx \ + test26.cxx \ + test29.cxx \ + test30.cxx \ + test32.cxx \ + test37.cxx \ + test39.cxx \ + test46.cxx \ + test56.cxx \ + test60.cxx \ + test61.cxx \ + test62.cxx \ + test69.cxx \ + test70.cxx \ + test71.cxx \ + test72.cxx \ + test74.cxx \ + test75.cxx \ + test76.cxx \ + test77.cxx \ + test78.cxx \ + test79.cxx \ + test82.cxx \ + test84.cxx \ + test87.cxx \ + test88.cxx \ + test89.cxx \ + test90.cxx \ + unit/test_array.cxx \ + unit/test_binarystring.cxx \ + unit/test_blob.cxx \ + unit/test_cancel_query.cxx \ + unit/test_column.cxx \ + unit/test_composite.cxx \ + unit/test_connection.cxx \ + unit/test_cursor.cxx \ + unit/test_encodings.cxx \ + unit/test_error_verbosity.cxx \ + unit/test_errorhandler.cxx \ + unit/test_escape.cxx \ + unit/test_exceptions.cxx \ + unit/test_field.cxx \ + unit/test_float.cxx \ + unit/test_largeobject.cxx \ + unit/test_nonblocking_connect.cxx \ + unit/test_notification.cxx \ + unit/test_pipeline.cxx \ + unit/test_prepared_statement.cxx \ + unit/test_range.cxx \ + unit/test_read_transaction.cxx \ + unit/test_result_iteration.cxx \ + unit/test_result_slicing.cxx \ + unit/test_row.cxx \ + unit/test_separated_list.cxx \ + unit/test_simultaneous_transactions.cxx \ + unit/test_sql_cursor.cxx \ + unit/test_stateless_cursor.cxx \ + unit/test_strconv.cxx \ + unit/test_stream_from.cxx \ + unit/test_stream_to.cxx \ + unit/test_string_conversion.cxx \ + unit/test_subtransaction.cxx \ + unit/test_test_helpers.cxx \ + unit/test_thread_safety_model.cxx \ + unit/test_time.cxx \ + unit/test_transaction.cxx \ + unit/test_transaction_base.cxx \ + unit/test_transaction_focus.cxx \ + unit/test_transactor.cxx \ + unit/test_type_name.cxx \ + unit/test_zview.cxx \ + runner.cxx + +runner_LDADD = $(top_builddir)/src/libpqxx.la ${POSTGRES_LIB} +all: all-am + +.SUFFIXES: +.SUFFIXES: .cxx .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu test/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu test/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +clean-checkPROGRAMS: + @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ + echo " rm -f" $$list; \ + rm -f $$list || exit $$?; \ + test -n "$(EXEEXT)" || exit 0; \ + list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f" $$list; \ + rm -f $$list +unit/$(am__dirstamp): + @$(MKDIR_P) unit + @: > unit/$(am__dirstamp) +unit/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) unit/$(DEPDIR) + @: > unit/$(DEPDIR)/$(am__dirstamp) +unit/test_array.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_binarystring.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_blob.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_cancel_query.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_column.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_composite.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_connection.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_cursor.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_encodings.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_error_verbosity.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_errorhandler.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_escape.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_exceptions.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_field.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_float.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_largeobject.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_nonblocking_connect.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_notification.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_pipeline.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_prepared_statement.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_range.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_read_transaction.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_result_iteration.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_result_slicing.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_row.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_separated_list.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_simultaneous_transactions.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_sql_cursor.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_stateless_cursor.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_strconv.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_stream_from.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_stream_to.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_string_conversion.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_subtransaction.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_test_helpers.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_thread_safety_model.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_time.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_transaction.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_transaction_base.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_transaction_focus.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_transactor.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_type_name.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) +unit/test_zview.$(OBJEXT): unit/$(am__dirstamp) \ + unit/$(DEPDIR)/$(am__dirstamp) + +runner$(EXEEXT): $(runner_OBJECTS) $(runner_DEPENDENCIES) $(EXTRA_runner_DEPENDENCIES) + @rm -f runner$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(runner_OBJECTS) $(runner_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + -rm -f unit/*.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/runner.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test00.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test01.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test02.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test04.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test07.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test10.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test11.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test13.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test14.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test16.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test17.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test18.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test20.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test21.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test26.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test29.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test30.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test32.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test37.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test39.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test46.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test56.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test60.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test61.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test62.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test69.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test70.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test71.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test72.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test74.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test75.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test76.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test77.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test78.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test79.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test82.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test84.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test87.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test88.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test89.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/test90.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_array.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_binarystring.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_blob.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_cancel_query.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_column.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_composite.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_connection.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_cursor.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_encodings.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_error_verbosity.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_errorhandler.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_escape.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_exceptions.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_field.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_float.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_largeobject.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_nonblocking_connect.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_notification.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_pipeline.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_prepared_statement.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_range.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_read_transaction.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_result_iteration.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_result_slicing.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_row.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_separated_list.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_simultaneous_transactions.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_sql_cursor.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_stateless_cursor.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_strconv.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_stream_from.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_stream_to.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_string_conversion.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_subtransaction.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_test_helpers.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_thread_safety_model.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_time.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_transaction.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_transaction_base.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_transaction_focus.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_transactor.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_type_name.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@unit/$(DEPDIR)/test_zview.Po@am__quote@ # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) + +.cxx.o: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< + +.cxx.obj: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cxx.lo: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +check-TESTS: $(TESTS) + @failed=0; all=0; xfail=0; xpass=0; skip=0; \ + srcdir=$(srcdir); export srcdir; \ + list=' $(TESTS) '; \ + $(am__tty_colors); \ + if test -n "$$list"; then \ + for tst in $$list; do \ + if test -f ./$$tst; then dir=./; \ + elif test -f $$tst; then dir=; \ + else dir="$(srcdir)/"; fi; \ + if $(TESTS_ENVIRONMENT) $${dir}$$tst $(AM_TESTS_FD_REDIRECT); then \ + all=`expr $$all + 1`; \ + case " $(XFAIL_TESTS) " in \ + *[\ \ ]$$tst[\ \ ]*) \ + xpass=`expr $$xpass + 1`; \ + failed=`expr $$failed + 1`; \ + col=$$red; res=XPASS; \ + ;; \ + *) \ + col=$$grn; res=PASS; \ + ;; \ + esac; \ + elif test $$? -ne 77; then \ + all=`expr $$all + 1`; \ + case " $(XFAIL_TESTS) " in \ + *[\ \ ]$$tst[\ \ ]*) \ + xfail=`expr $$xfail + 1`; \ + col=$$lgn; res=XFAIL; \ + ;; \ + *) \ + failed=`expr $$failed + 1`; \ + col=$$red; res=FAIL; \ + ;; \ + esac; \ + else \ + skip=`expr $$skip + 1`; \ + col=$$blu; res=SKIP; \ + fi; \ + echo "$${col}$$res$${std}: $$tst"; \ + done; \ + if test "$$all" -eq 1; then \ + tests="test"; \ + All=""; \ + else \ + tests="tests"; \ + All="All "; \ + fi; \ + if test "$$failed" -eq 0; then \ + if test "$$xfail" -eq 0; then \ + banner="$$All$$all $$tests passed"; \ + else \ + if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ + banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ + fi; \ + else \ + if test "$$xpass" -eq 0; then \ + banner="$$failed of $$all $$tests failed"; \ + else \ + if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ + banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ + fi; \ + fi; \ + dashes="$$banner"; \ + skipped=""; \ + if test "$$skip" -ne 0; then \ + if test "$$skip" -eq 1; then \ + skipped="($$skip test was not run)"; \ + else \ + skipped="($$skip tests were not run)"; \ + fi; \ + test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ + dashes="$$skipped"; \ + fi; \ + report=""; \ + if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ + report="Please report to $(PACKAGE_BUGREPORT)"; \ + test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ + dashes="$$report"; \ + fi; \ + dashes=`echo "$$dashes" | sed s/./=/g`; \ + if test "$$failed" -eq 0; then \ + col="$$grn"; \ + else \ + col="$$red"; \ + fi; \ + echo "$${col}$$dashes$${std}"; \ + echo "$${col}$$banner$${std}"; \ + test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \ + test -z "$$report" || echo "$${col}$$report$${std}"; \ + echo "$${col}$$dashes$${std}"; \ + test "$$failed" -eq 0; \ + else :; fi +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am + $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) + $(MAKE) $(AM_MAKEFLAGS) check-TESTS +check: check-am +all-am: Makefile $(HEADERS) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -rm -f unit/$(DEPDIR)/$(am__dirstamp) + -rm -f unit/$(am__dirstamp) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." + -test -z "$(MAINTAINERCLEANFILES)" || rm -f $(MAINTAINERCLEANFILES) +clean: clean-am + +clean-am: clean-checkPROGRAMS clean-generic clean-libtool \ + mostlyclean-am + +distclean: distclean-am + -rm -f ./$(DEPDIR)/runner.Po + -rm -f ./$(DEPDIR)/test00.Po + -rm -f ./$(DEPDIR)/test01.Po + -rm -f ./$(DEPDIR)/test02.Po + -rm -f ./$(DEPDIR)/test04.Po + -rm -f ./$(DEPDIR)/test07.Po + -rm -f ./$(DEPDIR)/test10.Po + -rm -f ./$(DEPDIR)/test11.Po + -rm -f ./$(DEPDIR)/test13.Po + -rm -f ./$(DEPDIR)/test14.Po + -rm -f ./$(DEPDIR)/test16.Po + -rm -f ./$(DEPDIR)/test17.Po + -rm -f ./$(DEPDIR)/test18.Po + -rm -f ./$(DEPDIR)/test20.Po + -rm -f ./$(DEPDIR)/test21.Po + -rm -f ./$(DEPDIR)/test26.Po + -rm -f ./$(DEPDIR)/test29.Po + -rm -f ./$(DEPDIR)/test30.Po + -rm -f ./$(DEPDIR)/test32.Po + -rm -f ./$(DEPDIR)/test37.Po + -rm -f ./$(DEPDIR)/test39.Po + -rm -f ./$(DEPDIR)/test46.Po + -rm -f ./$(DEPDIR)/test56.Po + -rm -f ./$(DEPDIR)/test60.Po + -rm -f ./$(DEPDIR)/test61.Po + -rm -f ./$(DEPDIR)/test62.Po + -rm -f ./$(DEPDIR)/test69.Po + -rm -f ./$(DEPDIR)/test70.Po + -rm -f ./$(DEPDIR)/test71.Po + -rm -f ./$(DEPDIR)/test72.Po + -rm -f ./$(DEPDIR)/test74.Po + -rm -f ./$(DEPDIR)/test75.Po + -rm -f ./$(DEPDIR)/test76.Po + -rm -f ./$(DEPDIR)/test77.Po + -rm -f ./$(DEPDIR)/test78.Po + -rm -f ./$(DEPDIR)/test79.Po + -rm -f ./$(DEPDIR)/test82.Po + -rm -f ./$(DEPDIR)/test84.Po + -rm -f ./$(DEPDIR)/test87.Po + -rm -f ./$(DEPDIR)/test88.Po + -rm -f ./$(DEPDIR)/test89.Po + -rm -f ./$(DEPDIR)/test90.Po + -rm -f unit/$(DEPDIR)/test_array.Po + -rm -f unit/$(DEPDIR)/test_binarystring.Po + -rm -f unit/$(DEPDIR)/test_blob.Po + -rm -f unit/$(DEPDIR)/test_cancel_query.Po + -rm -f unit/$(DEPDIR)/test_column.Po + -rm -f unit/$(DEPDIR)/test_composite.Po + -rm -f unit/$(DEPDIR)/test_connection.Po + -rm -f unit/$(DEPDIR)/test_cursor.Po + -rm -f unit/$(DEPDIR)/test_encodings.Po + -rm -f unit/$(DEPDIR)/test_error_verbosity.Po + -rm -f unit/$(DEPDIR)/test_errorhandler.Po + -rm -f unit/$(DEPDIR)/test_escape.Po + -rm -f unit/$(DEPDIR)/test_exceptions.Po + -rm -f unit/$(DEPDIR)/test_field.Po + -rm -f unit/$(DEPDIR)/test_float.Po + -rm -f unit/$(DEPDIR)/test_largeobject.Po + -rm -f unit/$(DEPDIR)/test_nonblocking_connect.Po + -rm -f unit/$(DEPDIR)/test_notification.Po + -rm -f unit/$(DEPDIR)/test_pipeline.Po + -rm -f unit/$(DEPDIR)/test_prepared_statement.Po + -rm -f unit/$(DEPDIR)/test_range.Po + -rm -f unit/$(DEPDIR)/test_read_transaction.Po + -rm -f unit/$(DEPDIR)/test_result_iteration.Po + -rm -f unit/$(DEPDIR)/test_result_slicing.Po + -rm -f unit/$(DEPDIR)/test_row.Po + -rm -f unit/$(DEPDIR)/test_separated_list.Po + -rm -f unit/$(DEPDIR)/test_simultaneous_transactions.Po + -rm -f unit/$(DEPDIR)/test_sql_cursor.Po + -rm -f unit/$(DEPDIR)/test_stateless_cursor.Po + -rm -f unit/$(DEPDIR)/test_strconv.Po + -rm -f unit/$(DEPDIR)/test_stream_from.Po + -rm -f unit/$(DEPDIR)/test_stream_to.Po + -rm -f unit/$(DEPDIR)/test_string_conversion.Po + -rm -f unit/$(DEPDIR)/test_subtransaction.Po + -rm -f unit/$(DEPDIR)/test_test_helpers.Po + -rm -f unit/$(DEPDIR)/test_thread_safety_model.Po + -rm -f unit/$(DEPDIR)/test_time.Po + -rm -f unit/$(DEPDIR)/test_transaction.Po + -rm -f unit/$(DEPDIR)/test_transaction_base.Po + -rm -f unit/$(DEPDIR)/test_transaction_focus.Po + -rm -f unit/$(DEPDIR)/test_transactor.Po + -rm -f unit/$(DEPDIR)/test_type_name.Po + -rm -f unit/$(DEPDIR)/test_zview.Po + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f ./$(DEPDIR)/runner.Po + -rm -f ./$(DEPDIR)/test00.Po + -rm -f ./$(DEPDIR)/test01.Po + -rm -f ./$(DEPDIR)/test02.Po + -rm -f ./$(DEPDIR)/test04.Po + -rm -f ./$(DEPDIR)/test07.Po + -rm -f ./$(DEPDIR)/test10.Po + -rm -f ./$(DEPDIR)/test11.Po + -rm -f ./$(DEPDIR)/test13.Po + -rm -f ./$(DEPDIR)/test14.Po + -rm -f ./$(DEPDIR)/test16.Po + -rm -f ./$(DEPDIR)/test17.Po + -rm -f ./$(DEPDIR)/test18.Po + -rm -f ./$(DEPDIR)/test20.Po + -rm -f ./$(DEPDIR)/test21.Po + -rm -f ./$(DEPDIR)/test26.Po + -rm -f ./$(DEPDIR)/test29.Po + -rm -f ./$(DEPDIR)/test30.Po + -rm -f ./$(DEPDIR)/test32.Po + -rm -f ./$(DEPDIR)/test37.Po + -rm -f ./$(DEPDIR)/test39.Po + -rm -f ./$(DEPDIR)/test46.Po + -rm -f ./$(DEPDIR)/test56.Po + -rm -f ./$(DEPDIR)/test60.Po + -rm -f ./$(DEPDIR)/test61.Po + -rm -f ./$(DEPDIR)/test62.Po + -rm -f ./$(DEPDIR)/test69.Po + -rm -f ./$(DEPDIR)/test70.Po + -rm -f ./$(DEPDIR)/test71.Po + -rm -f ./$(DEPDIR)/test72.Po + -rm -f ./$(DEPDIR)/test74.Po + -rm -f ./$(DEPDIR)/test75.Po + -rm -f ./$(DEPDIR)/test76.Po + -rm -f ./$(DEPDIR)/test77.Po + -rm -f ./$(DEPDIR)/test78.Po + -rm -f ./$(DEPDIR)/test79.Po + -rm -f ./$(DEPDIR)/test82.Po + -rm -f ./$(DEPDIR)/test84.Po + -rm -f ./$(DEPDIR)/test87.Po + -rm -f ./$(DEPDIR)/test88.Po + -rm -f ./$(DEPDIR)/test89.Po + -rm -f ./$(DEPDIR)/test90.Po + -rm -f unit/$(DEPDIR)/test_array.Po + -rm -f unit/$(DEPDIR)/test_binarystring.Po + -rm -f unit/$(DEPDIR)/test_blob.Po + -rm -f unit/$(DEPDIR)/test_cancel_query.Po + -rm -f unit/$(DEPDIR)/test_column.Po + -rm -f unit/$(DEPDIR)/test_composite.Po + -rm -f unit/$(DEPDIR)/test_connection.Po + -rm -f unit/$(DEPDIR)/test_cursor.Po + -rm -f unit/$(DEPDIR)/test_encodings.Po + -rm -f unit/$(DEPDIR)/test_error_verbosity.Po + -rm -f unit/$(DEPDIR)/test_errorhandler.Po + -rm -f unit/$(DEPDIR)/test_escape.Po + -rm -f unit/$(DEPDIR)/test_exceptions.Po + -rm -f unit/$(DEPDIR)/test_field.Po + -rm -f unit/$(DEPDIR)/test_float.Po + -rm -f unit/$(DEPDIR)/test_largeobject.Po + -rm -f unit/$(DEPDIR)/test_nonblocking_connect.Po + -rm -f unit/$(DEPDIR)/test_notification.Po + -rm -f unit/$(DEPDIR)/test_pipeline.Po + -rm -f unit/$(DEPDIR)/test_prepared_statement.Po + -rm -f unit/$(DEPDIR)/test_range.Po + -rm -f unit/$(DEPDIR)/test_read_transaction.Po + -rm -f unit/$(DEPDIR)/test_result_iteration.Po + -rm -f unit/$(DEPDIR)/test_result_slicing.Po + -rm -f unit/$(DEPDIR)/test_row.Po + -rm -f unit/$(DEPDIR)/test_separated_list.Po + -rm -f unit/$(DEPDIR)/test_simultaneous_transactions.Po + -rm -f unit/$(DEPDIR)/test_sql_cursor.Po + -rm -f unit/$(DEPDIR)/test_stateless_cursor.Po + -rm -f unit/$(DEPDIR)/test_strconv.Po + -rm -f unit/$(DEPDIR)/test_stream_from.Po + -rm -f unit/$(DEPDIR)/test_stream_to.Po + -rm -f unit/$(DEPDIR)/test_string_conversion.Po + -rm -f unit/$(DEPDIR)/test_subtransaction.Po + -rm -f unit/$(DEPDIR)/test_test_helpers.Po + -rm -f unit/$(DEPDIR)/test_thread_safety_model.Po + -rm -f unit/$(DEPDIR)/test_time.Po + -rm -f unit/$(DEPDIR)/test_transaction.Po + -rm -f unit/$(DEPDIR)/test_transaction_base.Po + -rm -f unit/$(DEPDIR)/test_transaction_focus.Po + -rm -f unit/$(DEPDIR)/test_transactor.Po + -rm -f unit/$(DEPDIR)/test_type_name.Po + -rm -f unit/$(DEPDIR)/test_zview.Po + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: check-am install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-TESTS \ + check-am clean clean-checkPROGRAMS clean-generic clean-libtool \ + cscopelist-am ctags ctags-am distclean distclean-compile \ + distclean-generic distclean-libtool distclean-tags distdir dvi \ + dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-dvi install-dvi-am \ + install-exec install-exec-am install-html install-html-am \ + install-info install-info-am install-man install-pdf \ + install-pdf-am install-ps install-ps-am install-strip \ + installcheck installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags tags-am uninstall uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/ext/libpqxx-7.7.3/test/runner.cxx b/ext/libpqxx-7.7.3/test/runner.cxx new file mode 100644 index 000000000..38c9849d8 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/runner.cxx @@ -0,0 +1,203 @@ +/* libpqxx test runner. + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "test_helpers.hxx" + +namespace +{ +inline std::string deref_field(pqxx::field const &f) +{ + return f.c_str(); +} +} // namespace + + +namespace pqxx::test +{ +test_failure::test_failure( + std::string const &ffile, int fline, std::string const &desc) : + std::logic_error(desc), m_file(ffile), m_line(fline) +{} + +test_failure::~test_failure() noexcept = default; + + +/// Drop table, if it exists. +inline void drop_table(transaction_base &t, std::string const &table) +{ + t.exec("DROP TABLE IF EXISTS " + table); +} + + +[[noreturn]] void +check_notreached(char const file[], int line, std::string desc) +{ + throw test_failure(file, line, desc); +} + + +void check( + char const file[], int line, bool condition, char const text[], + std::string const &desc) +{ + if (not condition) + throw test_failure( + file, line, desc + " (failed expression: " + text + ")"); +} + + +void expected_exception(std::string const &message) +{ + std::cout << "(Expected) " << message << std::endl; +} + + +std::string list_row(row Obj) +{ + return separated_list(", ", std::begin(Obj), std::end(Obj), deref_field); +} + + +std::string list_result(result Obj) +{ + if (std::empty(Obj)) + return ""; + return "{" + + separated_list( + "}\n{", std::begin(Obj), std::end(Obj), + [](row r) { return list_row(r); }) + + "}"; +} + + +std::string list_result_iterator(result::const_iterator Obj) +{ + return ""; +} + + +void create_pqxxevents(transaction_base &t) +{ + t.exec( + "CREATE TEMP TABLE pqxxevents(year integer, event varchar) " + "ON COMMIT PRESERVE ROWS"); + t.exec("INSERT INTO pqxxevents(year, event) VALUES (71, 'jtv')"); + t.exec("INSERT INTO pqxxevents(year, event) VALUES (38, 'time_t overflow')"); + t.exec( + "INSERT INTO pqxxevents(year, event) VALUES (1, '''911'' WTC attack')"); + t.exec("INSERT INTO pqxxevents(year, event) VALUES (81, 'C:\\>')"); + t.exec( + "INSERT INTO pqxxevents(year, event) VALUES (1978, 'bloody\t\tcold')"); + t.exec("INSERT INTO pqxxevents(year, event) VALUES (99, '')"); + t.exec("INSERT INTO pqxxevents(year, event) VALUES (2002, 'libpqxx')"); + t.exec( + "INSERT INTO pqxxevents(year, event) " + "VALUES (1989, 'Ode an die Freiheit')"); + t.exec( + "INSERT INTO pqxxevents(year, event) VALUES (2001, 'New millennium')"); + t.exec("INSERT INTO pqxxevents(year, event) VALUES (1974, '')"); + t.exec("INSERT INTO pqxxevents(year, event) VALUES (97, 'Asian crisis')"); + t.exec( + "INSERT INTO pqxxevents(year, event) VALUES (2001, 'A Space Odyssey')"); +} +} // namespace pqxx::test + + +namespace +{ +std::map *all_tests{nullptr}; +} // namespace + + +namespace pqxx::test +{ +void register_test(char const name[], pqxx::test::testfunc func) +{ + if (all_tests == nullptr) + { + all_tests = new std::map(); + } + else + { + assert(all_tests->find(name) == all_tests->end()); + } + (*all_tests)[name] = func; +} +} // namespace pqxx::test + + +int main(int argc, char const *argv[]) +{ + char const *const test_name{(argc > 1) ? argv[1] : nullptr}; + + int test_count = 0; + std::list failed; + for (auto const &i : *all_tests) + if (test_name == nullptr or std::string{test_name} == std::string{i.first}) + { + std::cout << std::endl << "Running: " << i.first << std::endl; + + bool success = false; + try + { + i.second(); + success = true; + } + catch (pqxx::test::test_failure const &e) + { + std::cerr << "Test failure in " + e.file() + " line " + + pqxx::to_string(e.line()) + << ": " << e.what() << std::endl; + } + catch (std::bad_alloc const &) + { + std::cerr << "Out of memory!" << std::endl; + } + catch (pqxx::feature_not_supported const &e) + { + std::cerr << "Not testing unsupported feature: " << e.what() + << std::endl; + success = true; + --test_count; + } + catch (pqxx::sql_error const &e) + { + std::cerr << "SQL error: " << e.what() << std::endl + << "Query was: " << e.query() << std::endl; + } + catch (std::exception const &e) + { + std::cerr << "Exception: " << e.what() << std::endl; + } + catch (...) + { + std::cerr << "Unknown exception" << std::endl; + } + + if (not success) + { + std::cerr << "FAILED: " << i.first << std::endl; + failed.emplace_back(i.first); + } + ++test_count; + } + + std::cout << "Ran " << test_count << " test(s).\n"; + + if (not std::empty(failed)) + { + std::cerr << "*** " << std::size(failed) << " test(s) failed: ***\n"; + for (auto const &i : failed) std::cerr << "\t" << i << '\n'; + } + + return int(std::size(failed)); +} diff --git a/ext/libpqxx-7.7.3/test/test00.cxx b/ext/libpqxx-7.7.3/test/test00.cxx new file mode 100644 index 000000000..698c9b81b --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test00.cxx @@ -0,0 +1,114 @@ +#include + +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Initial test program for libpqxx. Test functionality that doesn't require a +// running database. + +namespace +{ +template +inline void +strconv(std::string const &type, T const &Obj, std::string const &expected) +{ + std::string const Objstr{to_string(Obj)}; + + PQXX_CHECK_EQUAL(Objstr, expected, "String mismatch for " + type + "."); + T NewObj; + from_string(Objstr, NewObj); + PQXX_CHECK_EQUAL( + to_string(NewObj), expected, "String mismatch for recycled " + type + "."); +} + +// There's no from_string()... +inline void +strconv(std::string const &type, char const Obj[], std::string const &expected) +{ + std::string const Objstr(to_string(Obj)); + PQXX_CHECK_EQUAL(Objstr, expected, "String mismatch for " + type + "."); +} + +constexpr double not_a_number{std::numeric_limits::quiet_NaN()}; + +struct intderef +{ + template int operator()(ITER i) const noexcept + { + return int(*i); + } +}; + + +void test_000() +{ + PQXX_CHECK_EQUAL( + oid_none, 0u, + "InvalidIod is not zero as it used to be. This may conceivably " + "cause problems in libpqxx."); + + PQXX_CHECK( + cursor_base::prior() < 0 and cursor_base::backward_all() < 0, + "cursor_base::difference_type appears to be unsigned."); + + constexpr char weird[]{"foo\t\n\0bar"}; + std::string const weirdstr(weird, std::size(weird) - 1); + + // Test string conversions + strconv("char const[]", "", ""); + strconv("char const[]", "foo", "foo"); + strconv("int", 0, "0"); + strconv("int", 100, "100"); + strconv("int", -1, "-1"); + +#if defined(_MSC_VER) + long const long_min{LONG_MIN}, long_max{LONG_MAX}; +#else + long const long_min{std::numeric_limits::min()}, + long_max{std::numeric_limits::max()}; +#endif + + std::stringstream lminstr, lmaxstr, llminstr, llmaxstr, ullmaxstr; + lminstr.imbue(std::locale("C")); + lmaxstr.imbue(std::locale("C")); + llminstr.imbue(std::locale("C")); + llmaxstr.imbue(std::locale("C")); + ullmaxstr.imbue(std::locale("C")); + + lminstr << long_min; + lmaxstr << long_max; + + auto const ullong_max{std::numeric_limits::max()}; + auto const llong_max{std::numeric_limits::max()}, + llong_min{std::numeric_limits::min()}; + + llminstr << llong_min; + llmaxstr << llong_max; + ullmaxstr << ullong_max; + + strconv("long", 0, "0"); + strconv("long", long_min, lminstr.str()); + strconv("long", long_max, lmaxstr.str()); + strconv("double", not_a_number, "nan"); + strconv("string", std::string{}, ""); + strconv("string", weirdstr, weirdstr); + strconv("long long", 0LL, "0"); + strconv("long long", llong_min, llminstr.str()); + strconv("long long", llong_max, llmaxstr.str()); + strconv("unsigned long long", 0ULL, "0"); + strconv("unsigned long long", ullong_max, ullmaxstr.str()); + + std::stringstream ss; + strconv("empty stringstream", ss, ""); + ss << -3.1415; + strconv("stringstream", ss, ss.str()); +} + + +PQXX_REGISTER_TEST(test_000); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test01.cxx b/ext/libpqxx-7.7.3/test/test01.cxx new file mode 100644 index 000000000..84b7f3b78 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test01.cxx @@ -0,0 +1,33 @@ +#include + +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +namespace +{ +// Simple test program for libpqxx. Open connection to database, start +// a transaction, and perform a query inside it. +void test_001() +{ + connection conn; + + // Begin a transaction acting on our current connection. Give it a human- + // readable name so the library can include it in error messages. + work tx{conn, "test1"}; + + // Perform a query on the database, storing result rows in R. + result r(tx.exec("SELECT * FROM pg_tables")); + + // We're expecting to find some tables... + PQXX_CHECK(not std::empty(r), "No tables found."); + + tx.commit(); +} + + +PQXX_REGISTER_TEST(test_001); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test02.cxx b/ext/libpqxx-7.7.3/test/test02.cxx new file mode 100644 index 000000000..f98e18d08 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test02.cxx @@ -0,0 +1,76 @@ +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Example/test program for libpqxx. Perform a query and enumerate its output +// using array indexing. + +namespace +{ +void bad_connect() +{ + connection conn{"totally#invalid@connect$string!?"}; +} + +void test_002() +{ + // Before we really connect, test the expected behaviour of the default + // connection type, where a failure to connect results in an immediate + // exception rather than a silent retry. + PQXX_CHECK_THROWS_EXCEPTION( + bad_connect(), "Invalid connection string did not cause exception."); + + // Set up connection to database + std::string ConnectString; + connection C{ConnectString}; + + // Start transaction within context of connection. + work T{C, "test2"}; + + // Perform query within transaction. + result R(T.exec("SELECT * FROM pg_tables")); + + // Let's keep the database waiting as briefly as possible: commit now, + // before we start processing results. We could do this later, or since + // we're not making any changes in the database that need to be committed, + // we could in this case even omit it altogether. + T.commit(); + + // Ah, this version of postgres will tell you which table a column in a + // result came from. Let's just test that functionality... + oid const rtable{R.column_table(0)}; + PQXX_CHECK_EQUAL( + rtable, R.column_table(pqxx::row::size_type(0)), + "Inconsistent answers from column_table()"); + + std::string const rcol{R.column_name(0)}; + oid const crtable{R.column_table(rcol)}; + PQXX_CHECK_EQUAL( + crtable, rtable, "Field looked up by name gives different origin."); + + // Now we've got all that settled, let's process our results. + for (auto const &f : R) + { + oid const ftable{f[0].table()}; + PQXX_CHECK_EQUAL(ftable, rtable, "field::table() is broken."); + + oid const ttable{f.column_table(0)}; + + PQXX_CHECK_EQUAL( + ttable, f.column_table(pqxx::row::size_type(0)), + "Inconsistent pqxx::row::column_table()."); + + PQXX_CHECK_EQUAL(ttable, rtable, "Inconsistent result::column_table()."); + + oid const cttable{f.column_table(rcol)}; + + PQXX_CHECK_EQUAL(cttable, rtable, "pqxx::row::column_table() is broken."); + } +} + + +PQXX_REGISTER_TEST(test_002); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test04.cxx b/ext/libpqxx-7.7.3/test/test04.cxx new file mode 100644 index 000000000..623446398 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test04.cxx @@ -0,0 +1,77 @@ +#include +#include +#include +#include + +#include + +#include + +#include + +#include +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + +// Example program for libpqxx. Send notification to self. + +namespace +{ +int Backend_PID{0}; + + +// Sample implementation of notification receiver. +class TestListener final : public notification_receiver +{ + bool m_done; + +public: + explicit TestListener(connection &conn) : + notification_receiver(conn, "listen"), m_done(false) + {} + + virtual void operator()(std::string const &, int be_pid) override + { + m_done = true; + PQXX_CHECK_EQUAL( + be_pid, Backend_PID, "Notification came from wrong backend process."); + } + + bool done() const { return m_done; } +}; + + +void test_004() +{ + connection conn; + + TestListener L{conn}; + // Trigger our notification receiver. + perform([&conn, &L] { + work tx(conn); + tx.exec0("NOTIFY " + conn.quote_name(L.channel())); + Backend_PID = conn.backendpid(); + tx.commit(); + }); + + int notifs{0}; + for (int i{0}; (i < 20) and not L.done(); ++i) + { + PQXX_CHECK_EQUAL(notifs, 0, "Got unexpected notifications."); + // Sleep for one second. I'm not proud of this, but how does one inject + // a change to the built-in clock in a static language? + pqxx::internal::wait_for(1'000'000u); + notifs = conn.get_notifs(); + } + + PQXX_CHECK_NOT_EQUAL(L.done(), false, "No notification received."); + PQXX_CHECK_EQUAL(notifs, 1, "Got too many notifications."); +} + + +PQXX_REGISTER_TEST(test_004); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test07.cxx b/ext/libpqxx-7.7.3/test/test07.cxx new file mode 100644 index 000000000..875ee8d50 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test07.cxx @@ -0,0 +1,133 @@ +#include +#include + +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Example program for libpqxx. Modify the database, retaining transactional +// integrity using the transactor framework. +// +// This assumes the existence of a database table "pqxxevents" containing a +// 2-digit "year" field, which is extended to a 4-digit format by assuming all +// year numbers of 70 or higher are in the 20th century, and all others in the +// 21st, and that no years before 1970 are possible. + +namespace +{ +// Convert year to 4-digit format. +int To4Digits(int Y) +{ + int Result{Y}; + + PQXX_CHECK(Y >= 0, "Negative year: " + to_string(Y)); + if (Y < 70) + Result += 2000; + else if (Y < 100) + Result += 1900; + else if (Y < 1970) + PQXX_CHECK_NOTREACHED("Unexpected year: " + to_string(Y)); + + return Result; +} + + +void test_007() +{ + connection conn; + conn.set_client_encoding("SQL_ASCII"); + + { + work tx{conn}; + test::create_pqxxevents(tx); + tx.commit(); + } + + // Perform (an instantiation of) the UpdateYears transactor we've defined + // in the code above. This is where the work gets done. + std::map conversions; + perform([&conversions, &conn] { + work tx{conn}; + // First select all different years occurring in the table. + result R(tx.exec("SELECT year FROM pqxxevents")); + + // See if we get reasonable type identifier for this column. + oid const rctype{R.column_type(0)}; + PQXX_CHECK_EQUAL( + R.column_type(pqxx::row::size_type(0)), rctype, + "Inconsistent result::column_type()."); + + std::string const rct{to_string(rctype)}; + PQXX_CHECK(rctype > 0, "Got strange type ID for column: " + rct); + + std::string const rcol{R.column_name(0)}; + PQXX_CHECK(not std::empty(rcol), "Didn't get a name for column."); + + oid const rcctype{R.column_type(rcol)}; + PQXX_CHECK_EQUAL( + rcctype, rctype, "Column type is not what it is by name."); + + oid const rawrcctype{R.column_type(rcol)}; + PQXX_CHECK_EQUAL( + rawrcctype, rctype, "Column type by C-style name is different."); + + // Note all different years currently occurring in the table, writing + // them and their correct mappings to conversions. + for (auto const &r : R) + { + int Y{0}; + + // Read year, and if it is non-null, note its converted value + if (r[0] >> Y) + conversions[Y] = To4Digits(Y); + + // See if type identifiers are consistent + oid const tctype{r.column_type(0)}; + + PQXX_CHECK_EQUAL( + tctype, r.column_type(pqxx::row::size_type(0)), + "Inconsistent pqxx::row::column_type()"); + + PQXX_CHECK_EQUAL( + tctype, rctype, + "pqxx::row::column_type() is inconsistent with " + "result::column_type()."); + + oid const ctctype{r.column_type(rcol)}; + + PQXX_CHECK_EQUAL( + ctctype, rctype, "Column type lookup by column name is broken."); + + oid const rawctctype{r.column_type(rcol)}; + + PQXX_CHECK_EQUAL( + rawctctype, rctype, "Column type lookup by C-style name is broken."); + + oid const fctype{r[0].type()}; + PQXX_CHECK_EQUAL(fctype, rctype, "Field type lookup is broken."); + } + + // For each occurring year, write converted date back to whereever it may + // occur in the table. Since we're in a transaction, any changes made by + // others at the same time will not affect us. + for (auto const &c : conversions) + { + auto const query{ + "UPDATE pqxxevents " + "SET year=" + + to_string(c.second) + + " " + "WHERE year=" + + to_string(c.first)}; + R = tx.exec0(query); + } + }); +} + + +PQXX_REGISTER_TEST(test_007); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test10.cxx b/ext/libpqxx-7.7.3/test/test10.cxx new file mode 100644 index 000000000..4cb4aaba1 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test10.cxx @@ -0,0 +1,106 @@ +#include +#include +#include + +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Open connection to database, start a transaction, +// abort it, and verify that it "never happened." + +namespace +{ +// Let's take a boring year that is not going to be in the "pqxxevents" table +constexpr int BoringYear{1977}; + +std::string const Table("pqxxevents"); + + +// Count events, and boring events, in table +std::pair CountEvents(transaction_base &T) +{ + std::string const events_query{"SELECT count(*) FROM " + Table}; + std::string const boring_query{ + events_query + " WHERE year=" + to_string(BoringYear)}; + return std::make_pair( + T.query_value(events_query), T.query_value(boring_query)); +} + + +// Try adding a record, then aborting it, and check whether the abort was +// performed correctly. +void Test(connection &C, bool ExplicitAbort) +{ + std::pair EventCounts; + + // First run our doomed transaction. This will refuse to run if an event + // exists for our Boring Year. + { + // Begin a transaction acting on our current connection; we'll abort it + // later though. + work Doomed{C, "Doomed"}; + + // Verify that our Boring Year was not yet in the events table + EventCounts = CountEvents(Doomed); + + PQXX_CHECK_EQUAL( + EventCounts.second, 0, "Can't run, boring year is already in table."); + + // Now let's try to introduce a row for our Boring Year + Doomed.exec0( + "INSERT INTO " + Table + + "(year, event) " + "VALUES (" + + to_string(BoringYear) + ", 'yawn')"); + + auto const Recount{CountEvents(Doomed)}; + PQXX_CHECK_EQUAL( + Recount.second, 1, "Wrong # events for " + to_string(BoringYear)); + + PQXX_CHECK_EQUAL( + Recount.first, EventCounts.first + 1, "Number of events changed."); + + // Okay, we've added an entry but we don't really want to. Abort it + // explicitly if requested, or simply let the Transaction object "expire." + if (ExplicitAbort) + Doomed.abort(); + + // If now explicit abort requested, Doomed Transaction still ends here + } + + // Now check that we're back in the original state. Note that this may go + // wrong if somebody managed to change the table between our two + // transactions. + work Checkup(C, "Checkup"); + + auto const NewEvents{CountEvents(Checkup)}; + PQXX_CHECK_EQUAL( + NewEvents.first, EventCounts.first, + "Number of events changed. This may be due to a bug in libpqxx, " + "or the test table was modified by some other process."); + + PQXX_CHECK_EQUAL( + NewEvents.second, 0, + "Found unexpected events. This may be due to a bug in libpqxx, " + "or the test table was modified by some other process."); +} + + +void test_abort() +{ + connection conn; + nontransaction t{conn}; + test::create_pqxxevents(t); + connection &c(t.conn()); + t.commit(); + Test(c, true); + Test(c, false); +} + + +PQXX_REGISTER_TEST(test_abort); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test11.cxx b/ext/libpqxx-7.7.3/test/test11.cxx new file mode 100644 index 000000000..4b42a66d5 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test11.cxx @@ -0,0 +1,76 @@ +#include +#include +#include + +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Query a table and report its metadata. +namespace +{ +void test_011() +{ + connection conn; + work tx{conn}; + std::string const Table{"pg_tables"}; + + result R(tx.exec("SELECT * FROM " + Table)); + + // Print column names + for (pqxx::row::size_type c{0}; c < R.columns(); ++c) + { + std::string N{R.column_name(c)}; + PQXX_CHECK_EQUAL(R.column_number(N), c, "Inconsistent column numbers."); + } + + // If there are rows in R, compare their metadata to R's. + if (not std::empty(R)) + { + PQXX_CHECK_EQUAL(R[0].rownumber(), 0, "Row 0 has wrong number."); + + if (std::size(R) >= 2) + PQXX_CHECK_EQUAL(R[1].rownumber(), 1, "Row 1 has wrong number."); + + // Test result::iterator::swap() + pqxx::result::const_iterator const T1(R[0]), T2(R[1]); + PQXX_CHECK_NOT_EQUAL(T1, T2, "Values are identical--can't test swap()."); + pqxx::result::const_iterator T1s(T1), T2s(T2); + PQXX_CHECK_EQUAL(T1s, T1, "Result iterator copy-construction is wrong."); + PQXX_CHECK_EQUAL( + T2s, T2, "Result iterator copy-construction is inconsistently wrong."); + T1s.swap(T2s); + PQXX_CHECK_NOT_EQUAL(T1s, T1, "Result iterator swap doesn't work."); + PQXX_CHECK_NOT_EQUAL( + T2s, T2, "Result iterator swap inconsistently wrong."); + PQXX_CHECK_EQUAL(T2s, T1, "Result iterator swap is asymmetric."); + PQXX_CHECK_EQUAL( + T1s, T2, "Result iterator swap is inconsistently asymmetric."); + + for (pqxx::row::size_type c{0}; c < std::size(R[0]); ++c) + { + std::string N{R.column_name(c)}; + + PQXX_CHECK_EQUAL( + std::string{R[0].at(c).c_str()}, R[0].at(N).c_str(), + "Field by name != field by number."); + + PQXX_CHECK_EQUAL( + std::string{R[0][c].c_str()}, R[0][N].c_str(), + "at() is inconsistent with operator[]."); + + PQXX_CHECK_EQUAL(R[0][c].name(), N, "Field names are inconsistent."); + + PQXX_CHECK_EQUAL( + std::size(R[0][c]), strlen(R[0][c].c_str()), + "Field size is not what we expected."); + } + } +} + + +PQXX_REGISTER_TEST(test_011); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test13.cxx b/ext/libpqxx-7.7.3/test/test13.cxx new file mode 100644 index 000000000..b3577a5d3 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test13.cxx @@ -0,0 +1,86 @@ +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Verify abort behaviour of transactor. +// +// The program will attempt to add an entry to a table called "pqxxevents", +// with a key column called "year"--and then abort the change. +// +// Note for the superstitious: the numbering for this test program is pure +// coincidence. +namespace +{ +// Let's take a boring year that is not going to be in the "pqxxevents" table +constexpr unsigned int BoringYear = 1977; + + +// Count events and specifically events occurring in Boring Year, leaving the +// former count in the result pair's first member, and the latter in second. +std::pair count_events(connection &conn, std::string const &table) +{ + work tx{conn}; + std::string const count_query{"SELECT count(*) FROM " + table}; + return std::make_pair( + tx.query_value(count_query), + tx.query_value(count_query + " WHERE year=" + to_string(BoringYear))); +} + + +struct deliberate_error : std::exception +{}; + + +void failed_insert(connection &C, std::string const &table) +{ + work tx(C); + result R = tx.exec0( + "INSERT INTO " + table + " VALUES (" + to_string(BoringYear) + + ", " + "'yawn')"); + + PQXX_CHECK_EQUAL(R.affected_rows(), 1, "Bad affected_rows()."); + throw deliberate_error(); +} + + +void test_013() +{ + connection conn; + { + work tx{conn}; + test::create_pqxxevents(tx); + tx.commit(); + } + + std::string const Table{"pqxxevents"}; + + auto const Before{ + perform([&conn, &Table] { return count_events(conn, Table); })}; + PQXX_CHECK_EQUAL( + Before.second, 0, + "Already have event for " + to_string(BoringYear) + "--can't test."); + + quiet_errorhandler d(conn); + PQXX_CHECK_THROWS( + perform([&conn, &Table] { failed_insert(conn, Table); }), deliberate_error, + "Failing transactor failed to throw correct exception."); + + auto const After{ + perform([&conn, &Table] { return count_events(conn, Table); })}; + + PQXX_CHECK_EQUAL( + After.first, Before.first, "abort() didn't reset event count."); + + PQXX_CHECK_EQUAL( + After.second, Before.second, + "abort() didn't reset event count for " + to_string(BoringYear)); +} + + +PQXX_REGISTER_TEST(test_013); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test14.cxx b/ext/libpqxx-7.7.3/test/test14.cxx new file mode 100644 index 000000000..c82f2323c --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test14.cxx @@ -0,0 +1,36 @@ +#include + +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test nontransaction. + +namespace +{ +void test_014() +{ + connection conn; + + // Begin a "non-transaction" acting on our current connection. This is + // really all the transactional integrity we need since we're only + // performing one query which does not modify the database. + nontransaction tx{conn, "test14"}; + + // The transaction class family also has process_notice() functions. + // These simply pass the notice through to their connection, but this may + // be more convenient in some cases. All process_notice() functions accept + // C++ strings as well as C strings. + tx.process_notice(std::string{"Started nontransaction\n"}); + + // "Commit" the non-transaction. This doesn't really do anything since + // nontransaction doesn't start a backend transaction. + tx.commit(); +} + + +PQXX_REGISTER_TEST(test_014); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test16.cxx b/ext/libpqxx-7.7.3/test/test16.cxx new file mode 100644 index 000000000..211bbba6a --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test16.cxx @@ -0,0 +1,46 @@ +#include + +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test robusttransaction. +namespace +{ +void test_016() +{ + connection conn; + robusttransaction<> tx{conn}; + result R{tx.exec("SELECT * FROM pg_tables")}; + + result::const_iterator c; + for (c = std::begin(R); c != std::end(R); ++c) + ; + + // See if back() and row comparison work properly + PQXX_CHECK( + std::size(R) >= 2, "Not enough rows in pg_tables to test, sorry!"); + + --c; + + PQXX_CHECK_EQUAL( + c->size(), std::size(R.back()), + "Size mismatch between row iterator and back()."); + + std::string const nullstr; + for (pqxx::row::size_type i{0}; i < c->size(); ++i) + PQXX_CHECK_EQUAL( + c[i].as(nullstr), R.back()[i].as(nullstr), "Value mismatch in back()."); + PQXX_CHECK(*c == R.back(), "Row equality is broken."); + PQXX_CHECK(not(*c != R.back()), "Row inequality is broken."); + + tx.commit(); +} + + +PQXX_REGISTER_TEST(test_016); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test17.cxx b/ext/libpqxx-7.7.3/test/test17.cxx new file mode 100644 index 000000000..7c99a35cc --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test17.cxx @@ -0,0 +1,29 @@ +#include + +#include +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Simple test program for libpqxx. Open connection to database, start +// a dummy transaction to gain nontransactional access, and perform a query. +namespace +{ +void test_017() +{ + connection conn; + perform([&conn] { + nontransaction tx{conn}; + auto const r{tx.exec("SELECT * FROM generate_series(1, 4)")}; + PQXX_CHECK_EQUAL(std::size(r), 4, "Weird query result."); + tx.commit(); + }); +} + + +PQXX_REGISTER_TEST(test_017); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test18.cxx b/ext/libpqxx-7.7.3/test/test18.cxx new file mode 100644 index 000000000..9718d2daa --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test18.cxx @@ -0,0 +1,80 @@ +#include +#include +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Verify abort behaviour of RobustTransaction. +// +// The program will attempt to add an entry to a table called "pqxxevents", +// with a key column called "year"--and then abort the change. +namespace +{ +// Let's take a boring year that is not going to be in the "pqxxevents" table +constexpr long BoringYear{1977}; + + +// Count events and specifically events occurring in Boring Year, leaving the +// former count in the result pair's first member, and the latter in second. +std::pair count_events(connection &conn, std::string const &table) +{ + nontransaction tx{conn}; + std::string const count_query{"SELECT count(*) FROM " + table}; + return std::make_pair( + tx.query_value(count_query), + tx.query_value(count_query + " WHERE year=" + to_string(BoringYear))); +} + + +struct deliberate_error : std::exception +{}; + + +void test_018() +{ + connection conn; + { + work tx{conn}; + test::create_pqxxevents(tx); + tx.commit(); + } + + std::string const Table{"pqxxevents"}; + + auto const Before{ + perform([&conn, &Table] { return count_events(conn, Table); })}; + PQXX_CHECK_EQUAL( + Before.second, 0, + "Already have event for " + to_string(BoringYear) + ", cannot run."); + + { + quiet_errorhandler d{conn}; + PQXX_CHECK_THROWS( + perform([&conn, Table] { + robusttransaction tx{conn}; + tx.exec0( + "INSERT INTO " + Table + " VALUES (" + to_string(BoringYear) + + ", '" + tx.esc("yawn") + "')"); + + throw deliberate_error(); + }), + deliberate_error, + "Not getting expected exception from failing transactor."); + } + + auto const After{ + perform([&conn, &Table] { return count_events(conn, Table); })}; + + PQXX_CHECK_EQUAL(After.first, Before.first, "Event count changed."); + PQXX_CHECK_EQUAL( + After.second, Before.second, + "Event count for " + to_string(BoringYear) + " changed."); +} + + +PQXX_REGISTER_TEST(test_018); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test20.cxx b/ext/libpqxx-7.7.3/test/test20.cxx new file mode 100644 index 000000000..1ddd4b09a --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test20.cxx @@ -0,0 +1,95 @@ +#include + +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test: nontransaction changes are not rolled back on abort. +namespace +{ +constexpr unsigned long BoringYear{1977}; + + +void test_020() +{ + connection conn; + nontransaction t1{conn}; + test::create_pqxxevents(t1); + + std::string const Table{"pqxxevents"}; + + // Verify our start condition before beginning: there must not be a 1977 + // record already. + result R(t1.exec(("SELECT * FROM " + Table + + " " + "WHERE year=" + + to_string(BoringYear)) + .c_str())); + PQXX_CHECK_EQUAL( + std::size(R), 0, + "Already have a row for " + to_string(BoringYear) + ", cannot test."); + + // (Not needed, but verify that clear() works on empty containers) + R.clear(); + PQXX_CHECK(std::empty(R), "result::clear() is broken."); + + // OK. Having laid that worry to rest, add a record for 1977. + t1.exec0( + "INSERT INTO " + Table + + " VALUES" + "(" + + to_string(BoringYear) + + "," + "'Yawn'" + ")"); + + // Abort T1. Since T1 is a nontransaction, which provides only the + // transaction class interface without providing any form of transactional + // integrity, this is not going to undo our work. + t1.abort(); + + // Verify that our record was added, despite the Abort() + nontransaction t2{conn, "t2"}; + R = t2.exec(("SELECT * FROM " + Table + + " " + "WHERE year=" + + to_string(BoringYear)) + .c_str()); + + PQXX_CHECK_EQUAL( + std::size(R), 1, + "Found wrong number of rows for " + to_string(BoringYear) + "."); + + PQXX_CHECK(R.capacity() >= std::size(R), "Result's capacity is too small."); + + R.clear(); + PQXX_CHECK(std::empty(R), "result::clear() doesn't work."); + + // Now remove our record again + t2.exec0( + "DELETE FROM " + Table + + " " + "WHERE year=" + + to_string(BoringYear)); + + t2.commit(); + + // And again, verify results + nontransaction t3{conn, "t3"}; + + R = t3.exec(("SELECT * FROM " + Table + + " " + "WHERE year=" + + to_string(BoringYear)) + .c_str()); + + PQXX_CHECK_EQUAL(std::size(R), 0, "Record still found after removal."); +} + + +PQXX_REGISTER_TEST(test_020); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test21.cxx b/ext/libpqxx-7.7.3/test/test21.cxx new file mode 100644 index 000000000..1090050d9 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test21.cxx @@ -0,0 +1,70 @@ +#include + +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Simple test program for libpqxx. Open a connection to database, start a +// transaction, and perform a query inside it. +namespace +{ +void test_021() +{ + connection conn; + + std::string const HostName{ + ((conn.hostname() == nullptr) ? "" : conn.hostname())}; + conn.process_notice( + std::string{} + "database=" + conn.dbname() + + ", " + "username=" + + conn.username() + + ", " + "hostname=" + + HostName + + ", " + "port=" + + to_string(conn.port()) + + ", " + "backendpid=" + + to_string(conn.backendpid()) + "\n"); + + work tx{conn, "test_021"}; + + // By now our connection should really have been created + conn.process_notice("Printing details on actual connection\n"); + conn.process_notice( + std::string{} + "database=" + conn.dbname() + + ", " + "username=" + + conn.username() + + ", " + "hostname=" + + HostName + + ", " + "port=" + + to_string(conn.port()) + + ", " + "backendpid=" + + to_string(conn.backendpid()) + "\n"); + + std::string P; + from_string(conn.port(), P); + PQXX_CHECK_EQUAL( + P, to_string(conn.port()), "Port string conversion is broken."); + PQXX_CHECK_EQUAL(to_string(P), P, "Port string conversion is broken."); + + result R(tx.exec("SELECT * FROM pg_tables")); + + tx.process_notice(pqxx::internal::concat( + to_string(std::size(R)), " result row in transaction ", tx.name(), "\n")); + tx.commit(); +} + + +PQXX_REGISTER_TEST(test_021); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test26.cxx b/ext/libpqxx-7.7.3/test/test26.cxx new file mode 100644 index 000000000..69b04d839 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test26.cxx @@ -0,0 +1,86 @@ +#include +#include + +#include +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Example program for libpqxx. Modify the database, retaining transactional +// integrity using the transactor framework. +namespace +{ +// Convert year to 4-digit format. +int To4Digits(int Y) +{ + int Result{Y}; + + PQXX_CHECK(Y >= 0, "Negative year: " + to_string(Y)); + + if (Y < 70) + Result += 2000; + else if (Y < 100) + Result += 1900; + else + PQXX_CHECK(Y >= 1970, "Unexpected year: " + to_string(Y)); + return Result; +} + + +// Transaction definition for year-field update. Returns conversions done. +std::map update_years(connection &C) +{ + std::map conversions; + work tx{C}; + + // Note all different years currently occurring in the table, writing them + // and their correct mappings to m_conversions + for (auto const &[y] : + tx.stream>("SELECT year FROM pqxxevents")) + { + // Read year, and if it is non-null, note its converted value + if (bool(y)) + conversions[y.value()] = To4Digits(y.value()); + } + + // For each occurring year, write converted date back to whereever it may + // occur in the table. Since we're in a transaction, any changes made by + // others at the same time will not affect us. + for (auto const &c : conversions) + tx.exec0( + "UPDATE pqxxevents " + "SET year=" + + to_string(c.second) + + " " + "WHERE year=" + + to_string(c.first)); + + tx.commit(); + + return conversions; +} + + +void test_026() +{ + connection conn; + { + nontransaction tx{conn}; + test::create_pqxxevents(tx); + tx.commit(); + } + + // Perform (an instantiation of) the UpdateYears transactor we've defined + // in the code above. This is where the work gets done. + auto const conversions{perform([&conn] { return update_years(conn); })}; + + PQXX_CHECK(not std::empty(conversions), "No conversions done!"); +} + + +PQXX_REGISTER_TEST(test_026); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test29.cxx b/ext/libpqxx-7.7.3/test/test29.cxx new file mode 100644 index 000000000..4547523f1 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test29.cxx @@ -0,0 +1,108 @@ +#include +#include +#include + +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Open connection to database, start a transaction, +// abort it, and verify that it "never happened." +// +// The program will attempt to add an entry to a table called "pqxxevents", +// with a key column called "year"--and then abort the change. +namespace +{ +// Let's take a boring year that is not going to be in the "pqxxevents" table +constexpr int BoringYear{1977}; + +std::string const Table{"pqxxevents"}; + + +// Count events, and boring events, in table +std::pair CountEvents(transaction_base &tx) +{ + std::string const events_query{"SELECT count(*) FROM " + Table}; + std::string const boring_query{ + events_query + " WHERE year=" + to_string(BoringYear)}; + + return std::make_pair( + tx.query_value(events_query), tx.query_value(boring_query)); +} + + +// Try adding a record, then aborting it, and check whether the abort was +// performed correctly. +void Test(connection &conn, bool ExplicitAbort) +{ + std::vector BoringRow{to_string(BoringYear), "yawn"}; + + std::pair EventCounts; + + // First run our doomed transaction. This will refuse to run if an event + // exists for our Boring Year. + { + // Begin a transaction acting on our current connection; we'll abort it + // later though. + work Doomed(conn, "Doomed"); + + // Verify that our Boring Year was not yet in the events table + EventCounts = CountEvents(Doomed); + + PQXX_CHECK_EQUAL( + EventCounts.second, 0, + "Can't run; " + to_string(BoringYear) + " is already in the table."); + + // Now let's try to introduce a row for our Boring Year + Doomed.exec0( + "INSERT INTO " + Table + + "(year, event) " + "VALUES (" + + to_string(BoringYear) + ", 'yawn')"); + + auto Recount{CountEvents(Doomed)}; + PQXX_CHECK_EQUAL(Recount.second, 1, "Unexpected number of events."); + PQXX_CHECK_EQUAL( + Recount.first, EventCounts.first + 1, "Number of events changed."); + + // Okay, we've added an entry but we don't really want to. Abort it + // explicitly if requested, or simply let the Transaction object "expire." + if (ExplicitAbort) + Doomed.abort(); + + // If now explicit abort requested, Doomed Transaction still ends here + } + + // Now check that we're back in the original state. Note that this may go + // wrong if somebody managed to change the table between our two + // transactions. + work Checkup(conn, "Checkup"); + + auto NewEvents{CountEvents(Checkup)}; + PQXX_CHECK_EQUAL( + NewEvents.first, EventCounts.first, "Wrong number of events."); + + PQXX_CHECK_EQUAL(NewEvents.second, 0, "Found unexpected events."); +} + + +void test_029() +{ + connection conn; + { + nontransaction tx{conn}; + test::create_pqxxevents(tx); + } + + // Test abort semantics, both with explicit and implicit abort + Test(conn, true); + Test(conn, false); +} + + +PQXX_REGISTER_TEST(test_029); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test30.cxx b/ext/libpqxx-7.7.3/test/test30.cxx new file mode 100644 index 000000000..fb6a05da0 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test30.cxx @@ -0,0 +1,73 @@ +#include +#include +#include + +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Query a table and report its metadata. +namespace +{ +void test_030() +{ + std::string const Table{"pg_tables"}; + + connection conn; + work tx{conn, "test30"}; + + result R(tx.exec(("SELECT * FROM " + Table).c_str())); + PQXX_CHECK(not std::empty(R), "Table " + Table + " is empty, cannot test."); + + // Print column names + for (pqxx::row::size_type c{0}; c < R.columns(); ++c) + { + std::string N{R.column_name(c)}; + + PQXX_CHECK_EQUAL( + R[0].column_number(N), R.column_number(N), + "row::column_number() is inconsistent with result::column_number()."); + + PQXX_CHECK_EQUAL(R[0].column_number(N), c, "Inconsistent column numbers."); + } + + // If there are rows in R, compare their metadata to R's. + if (std::empty(R)) + { + std::cout << "(Table is empty.)\n"; + return; + } + + PQXX_CHECK_EQUAL(R[0].rownumber(), 0, "Row 0 reports wrong number."); + + if (std::size(R) < 2) + std::cout << "(Only one row in table.)\n"; + else + PQXX_CHECK_EQUAL(R[1].rownumber(), 1, "Row 1 reports wrong number."); + + for (pqxx::row::size_type c{0}; c < std::size(R[0]); ++c) + { + std::string N{R.column_name(c)}; + + PQXX_CHECK_EQUAL( + std::string{R[0].at(c).c_str()}, R[0].at(N).c_str(), + "Different field values by name and by number."); + + PQXX_CHECK_EQUAL( + std::string{R[0][c].c_str()}, R[0][N].c_str(), + "at() is inconsistent with operator[]."); + + PQXX_CHECK_EQUAL(R[0][c].name(), N, "Inconsistent field names."); + + PQXX_CHECK_EQUAL( + std::size(R[0][c]), std::strlen(R[0][c].c_str()), + "Inconsistent field lengths."); + } +} + + +PQXX_REGISTER_TEST(test_030); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test32.cxx b/ext/libpqxx-7.7.3/test/test32.cxx new file mode 100644 index 000000000..90f0db68c --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test32.cxx @@ -0,0 +1,78 @@ +#include +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Verify abort behaviour of transactor. +// +// The program will attempt to add an entry to a table called "pqxxevents", +// with a key column called "year"--and then abort the change. +// +// Note for the superstitious: the numbering for this test program is pure +// coincidence. + +namespace +{ +// Let's take a boring year that is not going to be in the "pqxxevents" table +constexpr int BoringYear{1977}; + +std::pair count_events(connection &conn, std::string const &table) +{ + std::string const count_query{"SELECT count(*) FROM " + table}; + work tx{conn}; + return std::make_pair( + tx.query_value(count_query), + tx.query_value(count_query + " WHERE year=" + to_string(BoringYear))); +} + + +struct deliberate_error : std::exception +{}; + + +void test_032() +{ + connection conn; + { + nontransaction tx{conn}; + test::create_pqxxevents(tx); + } + + std::string const Table{"pqxxevents"}; + + std::pair const Before{ + perform([&conn, &Table] { return count_events(conn, Table); })}; + PQXX_CHECK_EQUAL( + Before.second, 0, + "Already have event for " + to_string(BoringYear) + ", cannot test."); + + { + quiet_errorhandler d(conn); + PQXX_CHECK_THROWS( + perform([&conn, &Table] { + work{conn}.exec0( + "INSERT INTO " + Table + " VALUES (" + to_string(BoringYear) + + ", " + "'yawn')"); + throw deliberate_error(); + }), + deliberate_error, + "Did not get expected exception from failing transactor."); + } + + std::pair const After{ + perform([&conn, &Table] { return count_events(conn, Table); })}; + + PQXX_CHECK_EQUAL(After.first, Before.first, "Event count changed."); + PQXX_CHECK_EQUAL( + After.second, Before.second, + "Event count for " + to_string(BoringYear) + " changed."); +} + + +PQXX_REGISTER_TEST(test_032); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test37.cxx b/ext/libpqxx-7.7.3/test/test37.cxx new file mode 100644 index 000000000..fe231d39b --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test37.cxx @@ -0,0 +1,78 @@ +#include +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Verify abort behaviour of RobustTransaction. +// +// The program will attempt to add an entry to a table called "pqxxevents", +// with a key column called "year"--and then abort the change. +namespace +{ +// Let's take a boring year that is not going to be in the "pqxxevents" table +constexpr int BoringYear{1977}; + +// Count events and specifically events occurring in Boring Year, leaving the +// former count in the result pair's first member, and the latter in second. +std::pair count_events(connection &conn, std::string const &table) +{ + std::string const count_query{"SELECT count(*) FROM " + table}; + nontransaction tx{conn}; + return std::make_pair( + tx.query_value(count_query), + tx.query_value(count_query + " WHERE year=" + to_string(BoringYear))); +} + + +struct deliberate_error : std::exception +{}; + + +void test_037() +{ + connection conn; + { + nontransaction tx{conn}; + test::create_pqxxevents(tx); + } + + std::string const Table{"pqxxevents"}; + + auto const Before{ + perform([&conn, &Table] { return count_events(conn, Table); })}; + PQXX_CHECK_EQUAL( + Before.second, 0, + "Already have event for " + to_string(BoringYear) + ", cannot test."); + + { + quiet_errorhandler d(conn); + PQXX_CHECK_THROWS( + perform([&conn, &Table] { + robusttransaction<> tx{conn}; + tx.exec0( + "INSERT INTO " + Table + " VALUES (" + to_string(BoringYear) + + ", " + "'yawn')"); + + throw deliberate_error(); + }), + deliberate_error, + "Did not get expected exception from failing transactor."); + } + + auto const After{ + perform([&conn, &Table] { return count_events(conn, Table); })}; + + PQXX_CHECK_EQUAL(After.first, Before.first, "Number of events changed."); + PQXX_CHECK_EQUAL( + After.second, Before.second, + "Number of events for " + to_string(BoringYear) + " changed."); +} + + +PQXX_REGISTER_TEST(test_037); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test39.cxx b/ext/libpqxx-7.7.3/test/test39.cxx new file mode 100644 index 000000000..d26855811 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test39.cxx @@ -0,0 +1,89 @@ +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test: nontransaction changes are committed immediately. +namespace +{ +int BoringYear{1977}; + + +void test_039() +{ + connection conn; + nontransaction tx1{conn}; + test::create_pqxxevents(tx1); + std::string const Table{"pqxxevents"}; + + // Verify our start condition before beginning: there must not be a 1977 + // record already. + result R(tx1.exec( + "SELECT * FROM " + Table + + " " + "WHERE year=" + + to_string(BoringYear))); + + PQXX_CHECK_EQUAL( + std::size(R), 0, + "Already have a row for " + to_string(BoringYear) + ", cannot test."); + + // (Not needed, but verify that clear() works on empty containers) + R.clear(); + PQXX_CHECK(std::empty(R), "Result is non-empty after clear()."); + + // OK. Having laid that worry to rest, add a record for 1977. + tx1.exec0( + "INSERT INTO " + Table + + " VALUES" + "(" + + to_string(BoringYear) + + "," + "'Yawn'" + ")"); + + // Abort tx1. Since tx1 is a nontransaction, which provides only the + // transaction class interface without providing any form of transactional + // integrity, this is not going to undo our work. + tx1.abort(); + + // Verify that our record was added, despite the Abort() + nontransaction tx2(conn, "tx2"); + R = tx2.exec( + "SELECT * FROM " + Table + + " " + "WHERE year=" + + to_string(BoringYear)); + PQXX_CHECK_EQUAL(std::size(R), 1, "Unexpected result size."); + + PQXX_CHECK(R.capacity() >= std::size(R), "Result's capacity is too small."); + + R.clear(); + PQXX_CHECK(std::empty(R), "result::clear() is broken."); + + // Now remove our record again + tx2.exec0( + "DELETE FROM " + Table + + " " + "WHERE year=" + + to_string(BoringYear)); + + tx2.commit(); + + // And again, verify results + nontransaction tx3(conn, "tx3"); + + R = tx3.exec( + "SELECT * FROM " + Table + + " " + "WHERE year=" + + to_string(BoringYear)); + + PQXX_CHECK_EQUAL(std::size(R), 0, "Record is not gone as expected."); +} + + +PQXX_REGISTER_TEST(test_039); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test46.cxx b/ext/libpqxx-7.7.3/test/test46.cxx new file mode 100644 index 000000000..0fa88915a --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test46.cxx @@ -0,0 +1,74 @@ +#include +#include +#include + +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Streams test program for libpqxx. Insert a result field into various +// types of streams. +namespace +{ +void test_046() +{ + connection conn; + work tx{conn}; + + row R{tx.exec1("SELECT count(*) FROM pg_tables")}; + + // Read the value into a stringstream. + std::stringstream I; + I << R[0]; + + // Now convert the stringstream into a numeric type + long L{}, L2{}; + I >> L; + + R[0].to(L2); + PQXX_CHECK_EQUAL(L, L2, "Inconsistency between conversion methods."); + + float F{}, F2{}; + std::stringstream I2; + I2 << R[0]; + I2 >> F; + R[0].to(F2); + PQXX_CHECK_BOUNDS(F2, F - 0.01, F + 0.01, "Bad floating-point result."); + + auto F3{from_string(R[0].c_str())}; + PQXX_CHECK_BOUNDS(F3, F - 0.01, F + 0.01, "Bad float from from_string."); + + auto D{from_string(R[0].c_str())}; + PQXX_CHECK_BOUNDS(D, F - 0.01, F + 0.01, "Bad double from from_string."); + + auto LD{from_string(R[0].c_str())}; + PQXX_CHECK_BOUNDS( + LD, F - 0.01, F + 0.01, "Bad long double from from_string."); + + auto S{from_string(R[0].c_str())}, + S2{from_string(std::string{R[0].c_str()})}, + S3{from_string(R[0])}; + + PQXX_CHECK_EQUAL( + S2, S, + "from_string(char const[], std::string &) " + "is inconsistent with " + "from_string(std::string const &, std::string &)."); + + PQXX_CHECK_EQUAL( + S3, S2, + "from_string(result::field const &, std::string &) " + "is inconsistent with " + "from_string(std::string const &, std::string &)."); + + PQXX_CHECK(tx.query_value("SELECT 1=1"), "1=1 doesn't yield 'true.'"); + + PQXX_CHECK(not tx.query_value("SELECT 2+2=5"), "2+2=5 yields 'true.'"); +} + + +PQXX_REGISTER_TEST(test_046); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test56.cxx b/ext/libpqxx-7.7.3/test/test56.cxx new file mode 100644 index 000000000..3aef757bb --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test56.cxx @@ -0,0 +1,24 @@ +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Simple test program for libpqxx. Issue invalid query and handle error. +namespace +{ +void test_056() +{ + connection conn; + work tx{conn}; + quiet_errorhandler d(conn); + + PQXX_CHECK_THROWS( + tx.exec("DELIBERATELY INVALID TEST QUERY..."), sql_error, + "SQL syntax error did not raise expected exception."); +} + + +PQXX_REGISTER_TEST(test_056); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test60.cxx b/ext/libpqxx-7.7.3/test/test60.cxx new file mode 100644 index 000000000..a29ce9ff7 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test60.cxx @@ -0,0 +1,85 @@ +#include + +#include +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Example program for libpqxx. Test session variable functionality. +namespace +{ +std::string GetDatestyle(connection &conn) +{ + return conn.get_var("DATESTYLE"); +} + + +std::string SetDatestyle(connection &conn, std::string style) +{ + conn.set_session_var("DATESTYLE", style); + std::string const fullname{GetDatestyle(conn)}; + PQXX_CHECK( + not std::empty(fullname), + "Setting datestyle to " + style + " makes it an empty string."); + + return fullname; +} + + +void CheckDatestyle(connection &conn, std::string expected) +{ + PQXX_CHECK_EQUAL(GetDatestyle(conn), expected, "Got wrong datestyle."); +} + + +void RedoDatestyle( + connection &conn, std::string const &style, std::string const &expected) +{ + PQXX_CHECK_EQUAL( + SetDatestyle(conn, style), expected, "Set wrong datestyle."); +} + + +void ActivationTest( + connection &conn, std::string const &style, std::string const &expected) +{ + RedoDatestyle(conn, style, expected); + CheckDatestyle(conn, expected); +} + + +void test_060() +{ + connection conn; + + PQXX_CHECK(not std::empty(GetDatestyle(conn)), "Initial datestyle not set."); + + std::string const ISOname{SetDatestyle(conn, "ISO")}; + std::string const SQLname{SetDatestyle(conn, "SQL")}; + + PQXX_CHECK_NOT_EQUAL(ISOname, SQLname, "Same datestyle in SQL and ISO."); + + RedoDatestyle(conn, "SQL", SQLname); + + ActivationTest(conn, "ISO", ISOname); + ActivationTest(conn, "SQL", SQLname); + + PQXX_CHECK_THROWS( + conn.set_session_var("bonjour_name", std::optional{}), + pqxx::variable_set_to_null, + "Setting a variable to null did not report the error correctly."); + + // Prove that setting an unknown variable causes an error, as expected + quiet_errorhandler d{conn}; + PQXX_CHECK_THROWS( + conn.set_session_var("NONEXISTENT_VARIABLE_I_HOPE", 1), sql_error, + "Setting unknown variable failed to fail."); +} + + +PQXX_REGISTER_TEST(test_060); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test61.cxx b/ext/libpqxx-7.7.3/test/test61.cxx new file mode 100644 index 000000000..a9c871bdb --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test61.cxx @@ -0,0 +1,61 @@ +#include + +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Example program for libpqxx. Test local variable functionality. +namespace +{ +std::string GetDatestyle(transaction_base &T) +{ + return T.conn().get_var("DATESTYLE"); +} + + +std::string SetDatestyle(transaction_base &T, std::string style) +{ + T.conn().set_session_var("DATESTYLE", style); + std::string const fullname{GetDatestyle(T)}; + PQXX_CHECK( + not std::empty(fullname), + "Setting datestyle to " + style + " makes it an empty string."); + + return fullname; +} + + +void RedoDatestyle( + transaction_base &T, std::string const &style, std::string const &expected) +{ + PQXX_CHECK_EQUAL(SetDatestyle(T, style), expected, "Set wrong datestyle."); +} + + +void test_061() +{ + connection conn; + work tx{conn}; + + PQXX_CHECK(not std::empty(GetDatestyle(tx)), "Initial datestyle not set."); + + std::string const ISOname{SetDatestyle(tx, "ISO")}; + std::string const SQLname{SetDatestyle(tx, "SQL")}; + + PQXX_CHECK_NOT_EQUAL(ISOname, SQLname, "Same datestyle in SQL and ISO."); + + RedoDatestyle(tx, "SQL", SQLname); + + // Prove that setting an unknown variable causes an error, as expected + quiet_errorhandler d(tx.conn()); + PQXX_CHECK_THROWS( + conn.set_session_var("NONEXISTENT_VARIABLE_I_HOPE", 1), sql_error, + "Setting unknown variable failed to fail."); +} + + +PQXX_REGISTER_TEST(test_061); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test62.cxx b/ext/libpqxx-7.7.3/test/test62.cxx new file mode 100644 index 000000000..d23e49e5a --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test62.cxx @@ -0,0 +1,61 @@ +#include +#include + +#include + +#include "test_helpers.hxx" + + +using namespace pqxx; + + +// Example program for libpqxx. Test binary string functionality. +namespace +{ +void test_062() +{ + connection conn; + work tx{conn}; + + std::string const TestStr{ + "Nasty\n\030Test\n\t String with \200\277 weird bytes " + "\r\0 and Trailer\\\\\0"}; + + tx.exec0("CREATE TEMP TABLE pqxxbin (binfield bytea)"); + + std::string const Esc{tx.esc_raw(std::basic_string{ + reinterpret_cast(std::data(TestStr)), + std::size(TestStr)})}; + + tx.exec0("INSERT INTO pqxxbin VALUES ('" + Esc + "')"); + + result R{tx.exec("SELECT * from pqxxbin")}; + tx.exec0("DELETE FROM pqxxbin"); + + auto const B{R.at(0).at(0).as>()}; + + PQXX_CHECK(not std::empty(B), "Binary string became empty in conversion."); + + PQXX_CHECK_EQUAL( + std::size(B), std::size(TestStr), "Binary string was mangled."); + + std::basic_string::const_iterator c; + std::basic_string::size_type i; + for (i = 0, c = std::begin(B); i < std::size(B); ++i, ++c) + { + PQXX_CHECK(c != std::end(B), "Premature end to binary string."); + + char const x{TestStr.at(i)}, y{char(B.at(i))}, z{char(std::data(B)[i])}; + + PQXX_CHECK_EQUAL( + std::string(&x, 1), std::string(&y, 1), "Binary string byte changed."); + + PQXX_CHECK_EQUAL( + std::string(&y, 1), std::string(&z, 1), + "Inconsistent byte at offset " + to_string(i) + "."); + } +} + + +PQXX_REGISTER_TEST(test_062); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test69.cxx b/ext/libpqxx-7.7.3/test/test69.cxx new file mode 100644 index 000000000..d15ff1489 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test69.cxx @@ -0,0 +1,55 @@ +#include + +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Issue a query repeatedly through a pipeline, and +// compare results. +namespace +{ +void TestPipeline(pipeline &P, int numqueries) +{ + std::string const Q{"SELECT 99"}; + + for (int i{numqueries}; i > 0; --i) P.insert(Q); + + PQXX_CHECK( + (numqueries == 0) or not std::empty(P), "pipeline::empty() is broken."); + + int res{0}; + for (int i{numqueries}; i > 0; --i) + { + PQXX_CHECK( + not std::empty(P), "Got wrong number of queries from pipeline."); + + auto R{P.retrieve()}; + + if (res != 0) + PQXX_CHECK_EQUAL( + R.second[0][0].as(), res, + "Got unexpected result out of pipeline."); + + res = R.second[0][0].as(); + } + + PQXX_CHECK(std::empty(P), "Pipeline not empty after retrieval."); +} + + +void test_069() +{ + connection conn; + work tx{conn}; + pipeline P(tx); + PQXX_CHECK(std::empty(P), "Pipeline is not empty initially."); + for (int i{0}; i < 5; ++i) TestPipeline(P, i); +} + + +PQXX_REGISTER_TEST(test_069); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/test70.cxx b/ext/libpqxx-7.7.3/test/test70.cxx new file mode 100644 index 000000000..0f72089f1 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test70.cxx @@ -0,0 +1,101 @@ +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +namespace +{ +void TestPipeline(pipeline &P, int numqueries) +{ + std::string const Q{"SELECT * FROM generate_series(1, 10)"}; + result const Empty; + PQXX_CHECK(std::empty(Empty), "Default-constructed result is not empty."); + PQXX_CHECK( + std::empty(Empty.query()), "Default-constructed result has query"); + + P.retain(); + for (int i{numqueries}; i > 0; --i) P.insert(Q); + P.resume(); + + PQXX_CHECK( + (numqueries == 0) || not std::empty(P), "pipeline::empty() is broken."); + + int res{0}; + result Prev; + PQXX_CHECK_EQUAL(Prev, Empty, "Default-constructed results are not equal."); + + for (int i{numqueries}; i > 0; --i) + { + PQXX_CHECK(not std::empty(P), "Got no results from pipeline."); + + auto R{P.retrieve()}; + + PQXX_CHECK_NOT_EQUAL(R.second, Empty, "Got empty result."); + if (Prev != Empty) + PQXX_CHECK_EQUAL(R.second, Prev, "Results to same query are different."); + + Prev = R.second; + PQXX_CHECK_EQUAL(Prev, R.second, "Assignment breaks result equality."); + PQXX_CHECK_EQUAL(R.second.query(), Q, "Result is for unexpected query."); + + if (res != 0) + PQXX_CHECK_EQUAL(Prev[0][0].as(), res, "Bad result from pipeline."); + + res = Prev[0][0].as(); + } + + PQXX_CHECK(std::empty(P), "Pipeline was not empty after retrieval."); +} + + +// Test program for libpqxx. Issue a query repeatedly through a pipeline, and +// compare results. Use retain() and resume() for performance. +void test_070() +{ + connection conn; + work tx{conn}; + pipeline P(tx); + + PQXX_CHECK(std::empty(P), "Pipeline is not empty initially."); + + // Try to confuse the pipeline by feeding it a query and flushing + P.retain(); + std::string const Q{"SELECT * FROM pg_tables"}; + P.insert(Q); + P.flush(); + + PQXX_CHECK(std::empty(P), "Pipeline was not empty after flush()."); + + // See if complete() breaks retain() as it should + P.retain(); + P.insert(Q); + PQXX_CHECK(not std::empty(P), "Pipeline was empty after insert()."); + P.complete(); + PQXX_CHECK(not std::empty(P), "complete() emptied pipeline."); + + PQXX_CHECK_EQUAL( + P.retrieve().second.query(), Q, "Result is for wrong query."); + + PQXX_CHECK(std::empty(P), "Pipeline not empty after retrieve()."); + + // See if retrieve() breaks retain() when it needs to + P.retain(); + P.insert(Q); + PQXX_CHECK_EQUAL( + P.retrieve().second.query(), Q, "Got result for wrong query."); + + // See if regular retain()/resume() works + for (int i{0}; i < 5; ++i) TestPipeline(P, i); + + // See if retrieve() fails on an empty pipeline, as it should + quiet_errorhandler d(conn); + PQXX_CHECK_THROWS_EXCEPTION( + P.retrieve(), "Empty pipeline allows retrieve()."); +} +} // namespace + + +PQXX_REGISTER_TEST(test_070); diff --git a/ext/libpqxx-7.7.3/test/test71.cxx b/ext/libpqxx-7.7.3/test/test71.cxx new file mode 100644 index 000000000..e669d9ad5 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test71.cxx @@ -0,0 +1,74 @@ +#include +#include + +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Issue queries through a pipeline, and retrieve +// results both in-order and out-of-order. +namespace +{ +using Exp = std::map; + +template void checkresult(pipeline &P, PAIR c) +{ + result const r{P.retrieve(c.first)}; + int const val{r.at(0).at(0).as(int(0))}; + PQXX_CHECK_EQUAL(val, c.second, "Wrong result from pipeline."); +} + + +void test_071() +{ + connection conn; + work tx{conn}; + pipeline P(tx); + + // Keep expected result for every query we issue + Exp values; + + // Insert queries returning various numbers. + for (int i{1}; i < 10; ++i) values[P.insert("SELECT " + to_string(i))] = i; + + // Retrieve results in query_id order, and compare to expected values + for (auto &c : values) checkresult(P, c); + + PQXX_CHECK(std::empty(P), "Pipeline was not empty retrieving all results."); + + values.clear(); + + // Insert more queries returning various numbers + P.retain(20); + for (int i{100}; i > 90; --i) values[P.insert("SELECT " + to_string(i))] = i; + + P.resume(); + + // Retrieve results in reverse order + for (auto c{std::rbegin(values)}; c != std::rend(values); ++c) + checkresult(P, *c); + + values.clear(); + P.retain(10); + for (int i{1010}; i > 1000; --i) + values[P.insert("SELECT " + to_string(i))] = i; + for (auto &c : values) + { + if (P.is_finished(c.first)) + std::cout << "Query #" << c.first << " completed despite retain()" + << std::endl; + } + + // See that all results are retrieved by complete() + P.complete(); + for (auto &c : values) + PQXX_CHECK(P.is_finished(c.first), "Query not finished after complete()."); +} +} // namespace + + +PQXX_REGISTER_TEST(test_071); diff --git a/ext/libpqxx-7.7.3/test/test72.cxx b/ext/libpqxx-7.7.3/test/test72.cxx new file mode 100644 index 000000000..716081ed7 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test72.cxx @@ -0,0 +1,53 @@ +#include + +#include +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Test error handling for pipeline. +namespace +{ +void test_072() +{ + connection conn; + work tx{conn}; + pipeline P{tx}; + + // Ensure all queries are issued at once to make the test more interesting + P.retain(); + + // The middle query should fail; the surrounding two should succeed + auto const id_1{P.insert("SELECT 1")}; + auto const id_f{P.insert("SELECT * FROM pg_nonexist")}; + auto const id_2{P.insert("SELECT 2")}; + + // See that we can process the queries without stumbling over the error + P.complete(); + + // We should be able to get the first result, which preceeds the error + auto const res_1{P.retrieve(id_1).at(0).at(0).as()}; + PQXX_CHECK_EQUAL(res_1, 1, "Got wrong result from pipeline."); + + // We should *not* get a result for the query behind the error + { + quiet_errorhandler d{conn}; + PQXX_CHECK_THROWS( + P.retrieve(id_2).at(0).at(0).as(), std::runtime_error, + "Pipeline wrongly resumed after SQL error."); + } + + // Now see that we get an exception when we touch the failed result + { + quiet_errorhandler d{conn}; + PQXX_CHECK_THROWS( + P.retrieve(id_f), sql_error, "Pipeline failed to register SQL error."); + } +} +} // namespace + + +PQXX_REGISTER_TEST(test_072); diff --git a/ext/libpqxx-7.7.3/test/test74.cxx b/ext/libpqxx-7.7.3/test/test74.cxx new file mode 100644 index 000000000..ff1abd993 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test74.cxx @@ -0,0 +1,72 @@ +#include + +#include + +#include "test_helpers.hxx" + +using namespace pqxx; + + +// Test program for libpqxx. Test fieldstream. +namespace +{ +void test_074() +{ + connection conn; + work tx{conn}; + + result R{tx.exec("SELECT * FROM pg_tables")}; + std::string const sval{R.at(0).at(1).c_str()}; + std::string sval2; + fieldstream fs1(R.front()[1]); + fs1 >> sval2; + PQXX_CHECK_EQUAL(sval2, sval, "fieldstream returned wrong value."); + + R = tx.exec("SELECT count(*) FROM pg_tables"); + int ival; + fieldstream fs2(R.at(0).at(0)); + fs2 >> ival; + PQXX_CHECK_EQUAL( + ival, R.front().front().as(), "fieldstream::front() is broken."); + + double dval; + (fieldstream(R.at(0).at(0))) >> dval; + PQXX_CHECK_BOUNDS( + dval, R[0][0].as() - 0.1, R[0][0].as() + 0.1, + "Got wrong double from fieldstream."); + + auto const roughpi{static_cast(3.1415926435)}; + R = tx.exec("SELECT " + to_string(roughpi)); + float pival; + (fieldstream(R.at(0).at(0))) >> pival; + PQXX_CHECK_BOUNDS( + pival, roughpi - 0.001, roughpi + 0.001, + "Pi approximation came back wrong from fieldstream."); + + PQXX_CHECK_EQUAL( + to_string(R[0][0]), R[0][0].c_str(), + "to_string(result::field) is inconsistent with c_str()."); + + float float_pi; + from_string(to_string(roughpi), float_pi); + PQXX_CHECK_BOUNDS( + float_pi, roughpi - 0.00001, roughpi + 0.00001, + "Float changed in conversion."); + + double double_pi; + pqxx::from_string(pqxx::to_string(static_cast(roughpi)), double_pi); + PQXX_CHECK_BOUNDS( + double_pi, roughpi - 0.00001, roughpi + 0.00001, + "Double changed in conversion."); + + long double const ld{roughpi}; + long double long_double_pi; + from_string(to_string(ld), long_double_pi); + PQXX_CHECK_BOUNDS( + long_double_pi, roughpi - 0.00001, roughpi + 0.00001, + "long double changed in conversion."); +} +} // namespace + + +PQXX_REGISTER_TEST(test_074); diff --git a/ext/libpqxx-7.7.3/test/test75.cxx b/ext/libpqxx-7.7.3/test/test75.cxx new file mode 100644 index 000000000..07f00141e --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test75.cxx @@ -0,0 +1,108 @@ +#include +#include + +#include + +#include "test_helpers.hxx" + + +// Test program for libpqxx. Compare const_reverse_iterator iteration of a +// result to a regular, const_iterator iteration. +namespace +{ +void test_075() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + + pqxx::test::create_pqxxevents(tx); + auto const R(tx.exec("SELECT year FROM pqxxevents")); + PQXX_CHECK(not std::empty(R), "No events found, cannot test."); + + PQXX_CHECK_EQUAL(R[0], R.at(0), "Inconsistent result indexing."); + PQXX_CHECK(not(R[0] != R.at(0)), "result::row::operator!=() is broken."); + + PQXX_CHECK_EQUAL(R[0][0], R[0].at(0), "Inconsistent row indexing."); + PQXX_CHECK( + not(R[0][0] != R[0].at(0)), "result::field::operator!=() is broken."); + + std::vector contents; + for (auto const &i : R) contents.push_back(i.at(0).as()); + + PQXX_CHECK_EQUAL( + std::size(contents), std::vector::size_type(std::size(R)), + "Number of values does not match result size."); + + for (pqxx::result::size_type i{0}; i < std::size(R); ++i) + PQXX_CHECK_EQUAL( + contents[static_cast(i)], R.at(i).at(0).c_str(), + "Inconsistent iteration."); + + // Thorough test for result::const_reverse_iterator + pqxx::result::const_reverse_iterator ri1(std::rbegin(R)), ri2(ri1), + ri3(std::end(R)); + ri2 = std::rbegin(R); + + PQXX_CHECK(ri2 == ri1, "reverse_iterator copy constructor is broken."); + PQXX_CHECK(ri3 == ri2, "result::end() does not generate rbegin()."); + PQXX_CHECK_EQUAL( + ri2 - ri3, 0, + "const_reverse_iterator is at nonzero distance from its own copy."); + + PQXX_CHECK(ri2 == ri3 + 0, "reverse_iterator+0 gives strange result."); + PQXX_CHECK(ri2 == ri3 - 0, "reverse_iterator-0 gives strange result."); + PQXX_CHECK(not(ri3 < ri2), "operator<() breaks on equal reverse_iterators."); + PQXX_CHECK(ri2 <= ri3, "operator<=() breaks on equal reverse_iterators."); + + PQXX_CHECK(ri3++ == ri2, "reverse_iterator post-increment is broken."); + + PQXX_CHECK_EQUAL(ri3 - ri2, 1, "Wrong nonzero reverse_iterator distance."); + PQXX_CHECK(ri3 > ri2, "reverse_iterator operator>() is broken."); + PQXX_CHECK(ri3 >= ri2, "reverse_iterator operator>=() is broken."); + PQXX_CHECK(ri2 < ri3, "reverse_iterator operator<() is broken."); + PQXX_CHECK(ri2 <= ri3, "reverse_iterator operator<=() is broken."); + PQXX_CHECK(ri3 == ri2 + 1, "Adding int to reverse_iterator is broken."); + PQXX_CHECK( + ri2 == ri3 - 1, "Subtracting int from reverse_iterator is broken."); + + PQXX_CHECK(ri3 == ++ri2, "reverse_iterator pre-increment is broken."); + PQXX_CHECK(ri3 >= ri2, "operator>=() breaks on equal reverse_iterators."); + PQXX_CHECK(ri3 >= ri2, "operator<=() breaks on equal reverse_iterators."); + + PQXX_CHECK( + *ri3.base() == R.back(), "reverse_iterator does not arrive at back()."); + + PQXX_CHECK( + ri1->at(0) == (*ri1).at(0), + "reverse_iterator operator->() is inconsistent with operator*()."); + + PQXX_CHECK(ri2-- == ri3, "reverse_iterator post-decrement is broken."); + PQXX_CHECK(ri2 == --ri3, "reverse_iterator pre-decrement is broken."); + PQXX_CHECK(ri2 == std::rbegin(R), "reverse_iterator decrement is broken."); + + ri2 += 1; + ri3 -= -1; + + PQXX_CHECK( + ri2 != std::rbegin(R), "Adding to reverse_iterator does not work."); + PQXX_CHECK( + ri3 == ri2, "reverse_iterator operator-=() breaks on negative distances."); + + ri2 -= 1; + PQXX_CHECK( + ri2 == std::rbegin(R), + "reverse_iterator operator+=() and operator-=() do not cancel out."); + + // Now verify that reverse iterator also sees the same results... + auto l{std::rbegin(contents)}; + for (auto i{std::rbegin(R)}; i != std::rend(R); ++i, ++l) + PQXX_CHECK_EQUAL(*l, i->at(0).c_str(), "Inconsistent reverse iteration."); + + PQXX_CHECK(l == std::rend(contents), "Reverse iteration ended too soon."); + + PQXX_CHECK(not std::empty(R), "No events found in table, cannot test."); +} +} // namespace + + +PQXX_REGISTER_TEST(test_075); diff --git a/ext/libpqxx-7.7.3/test/test76.cxx b/ext/libpqxx-7.7.3/test/test76.cxx new file mode 100644 index 000000000..122c0445c --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test76.cxx @@ -0,0 +1,52 @@ +#include + +#include "test_helpers.hxx" + +// Simple test program for libpqxx. Test string conversion routines. +namespace +{ +void test_076() +{ + pqxx::connection conn; + pqxx::nontransaction tx{conn}; + + auto RFalse{tx.exec1("SELECT 1=0")}, RTrue{tx.exec1("SELECT 1=1")}; + auto False{pqxx::from_string(RFalse[0])}, + True{pqxx::from_string(RTrue[0])}; + PQXX_CHECK(not False, "False bool converted to true."); + PQXX_CHECK(True, "True bool converted to false."); + + RFalse = tx.exec1("SELECT " + pqxx::to_string(False)); + RTrue = tx.exec1("SELECT " + pqxx::to_string(True)); + False = pqxx::from_string(RFalse[0]); + True = pqxx::from_string(RTrue[0]); + PQXX_CHECK(not False, "False bool converted to true."); + PQXX_CHECK(True, "True bool converted to false."); + + short const svals[]{-1, 1, 999, -32767, -32768, 32767, 0}; + for (int i{0}; svals[i] != 0; ++i) + { + auto s{pqxx::from_string(pqxx::to_string(svals[i]))}; + PQXX_CHECK_EQUAL(s, svals[i], "short/string conversion not bijective."); + s = pqxx::from_string( + tx.exec1("SELECT " + pqxx::to_string(svals[i]))[0].c_str()); + PQXX_CHECK_EQUAL(s, svals[i], "Roundtrip through backend changed short."); + } + + unsigned short const uvals[]{1, 999, 32767, 32768, 65535, 0}; + for (int i{0}; uvals[i] != 0; ++i) + { + auto u{pqxx::from_string(pqxx::to_string(uvals[i]))}; + PQXX_CHECK_EQUAL( + u, uvals[i], "unsigned short/string conversion not bijective."); + + u = pqxx::from_string( + tx.exec1("SELECT " + pqxx::to_string(uvals[i]))[0].c_str()); + PQXX_CHECK_EQUAL( + u, uvals[i], "Roundtrip through backend changed unsigned short."); + } +} +} // namespace + + +PQXX_REGISTER_TEST(test_076); diff --git a/ext/libpqxx-7.7.3/test/test77.cxx b/ext/libpqxx-7.7.3/test/test77.cxx new file mode 100644 index 000000000..88e83c26d --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test77.cxx @@ -0,0 +1,28 @@ +#include + +#include "test_helpers.hxx" + + +// Test program for libpqxx. Test result::swap() +namespace +{ +void test_077() +{ + pqxx::connection conn; + pqxx::nontransaction tx{conn}; + + auto RFalse{tx.exec("SELECT 1=0")}, RTrue{tx.exec("SELECT 1=1")}; + auto f{pqxx::from_string(RFalse[0][0])}; + auto t{pqxx::from_string(RTrue[0][0])}; + PQXX_CHECK( + not f and t, "Booleans converted incorrectly; can't trust this test."); + + RFalse.swap(RTrue); + f = pqxx::from_string(RFalse[0][0]); + t = pqxx::from_string(RTrue[0][0]); + PQXX_CHECK(f and not t, "result::swap() is broken."); +} +} // namespace + + +PQXX_REGISTER_TEST(test_077); diff --git a/ext/libpqxx-7.7.3/test/test78.cxx b/ext/libpqxx-7.7.3/test/test78.cxx new file mode 100644 index 000000000..f970cc926 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test78.cxx @@ -0,0 +1,69 @@ +#include +#include +#include + +#include +#include +#include + +#include "test_helpers.hxx" + + +// Example program for libpqxx. Send notification to self, using a +// notification name with unusal characters, and without polling. +namespace +{ +// Sample implementation of notification receiver. +class TestListener : public pqxx::notification_receiver +{ + bool m_done; + +public: + explicit TestListener(pqxx::connection &conn, std::string const &Name) : + pqxx::notification_receiver(conn, Name), m_done(false) + {} + + void operator()(std::string const &, int be_pid) override + { + m_done = true; + PQXX_CHECK_EQUAL( + be_pid, conn().backendpid(), + "Got notification from wrong backend process."); + + std::cout << "Received notification: " << channel() << " pid=" << be_pid + << std::endl; + } + + bool done() const { return m_done; } +}; + + +void test_078() +{ + pqxx::connection conn; + + std::string const NotifName{"my listener"}; + TestListener L{conn, NotifName}; + + pqxx::perform([&conn, &L] { + pqxx::work tx{conn}; + tx.exec0("NOTIFY " + tx.quote_name(L.channel())); + tx.commit(); + }); + + int notifs{0}; + for (int i{0}; (i < 20) and not L.done(); ++i) + { + PQXX_CHECK_EQUAL(notifs, 0, "Got unexpected notifications."); + std::cout << "."; + notifs = conn.await_notification(); + } + std::cout << std::endl; + + PQXX_CHECK(L.done(), "No notification received."); + PQXX_CHECK_EQUAL(notifs, 1, "Got unexpected number of notifications."); +} +} // namespace + + +PQXX_REGISTER_TEST(test_078); diff --git a/ext/libpqxx-7.7.3/test/test79.cxx b/ext/libpqxx-7.7.3/test/test79.cxx new file mode 100644 index 000000000..761e5f3c3 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test79.cxx @@ -0,0 +1,70 @@ +#include +#include +#include + +#include +#include +#include + +#include "test_helpers.hxx" + + +// Example program for libpqxx. Test waiting for notification with timeout. +namespace +{ +// Sample implementation of notification receiver. +class TestListener final : public pqxx::notification_receiver +{ + bool m_done; + +public: + explicit TestListener(pqxx::connection &conn, std::string const &Name) : + pqxx::notification_receiver(conn, Name), m_done(false) + {} + + void operator()(std::string const &, int be_pid) override + { + m_done = true; + PQXX_CHECK_EQUAL( + be_pid, conn().backendpid(), "Notification came from wrong backend."); + + std::cout << "Received notification: " << channel() << " pid=" << be_pid + << std::endl; + } + + bool done() const { return m_done; } +}; + + +void test_079() +{ + pqxx::connection conn; + + std::string const NotifName{"mylistener"}; + TestListener L(conn, NotifName); + + // First see if the timeout really works: we're not expecting any notifs + int notifs{conn.await_notification(0, 1)}; + PQXX_CHECK_EQUAL(notifs, 0, "Got unexpected notification."); + + pqxx::perform([&conn, &L] { + pqxx::work tx{conn}; + tx.exec0("NOTIFY " + L.channel()); + tx.commit(); + }); + + for (int i{0}; (i < 20) and not L.done(); ++i) + { + PQXX_CHECK_EQUAL(notifs, 0, "Got notifications, but no handler called."); + std::cout << "."; + notifs = conn.await_notification(1, 0); + } + std::cout << std::endl; + + PQXX_CHECK(L.done(), "No notifications received."); + PQXX_CHECK_EQUAL(notifs, 1, "Got unexpected notifications."); +} +} // namespace + + +PQXX_REGISTER_TEST(test_079); diff --git a/ext/libpqxx-7.7.3/test/test82.cxx b/ext/libpqxx-7.7.3/test/test82.cxx new file mode 100644 index 000000000..e7b2769c7 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test82.cxx @@ -0,0 +1,154 @@ +#include + +#include + +#include "test_helpers.hxx" + + +// Test program for libpqxx. Read and print table using row iterators. +namespace +{ +void test_082() +{ + pqxx::connection conn; + pqxx::nontransaction tx{conn}; + + pqxx::test::create_pqxxevents(tx); + std::string const Table{"pqxxevents"}; + pqxx::result R{tx.exec("SELECT * FROM " + Table)}; + + PQXX_CHECK(not std::empty(R), "Got empty result."); + + std::string const nullstr("[null]"); + + for (auto const &r : R) + { + pqxx::row::const_iterator f2(r[0]); + for (auto const &f : r) + { + PQXX_CHECK_EQUAL( + (*f2).as(nullstr), f.as(nullstr), "Inconsistent iteration result."); + ++f2; + } + + PQXX_CHECK( + std::begin(r) + pqxx::row::difference_type(std::size(r)) == std::end(r), + "Row end() appears to be in the wrong place."); + PQXX_CHECK( + pqxx::row::difference_type(std::size(r)) + std::begin(r) == std::end(r), + "Row iterator addition is not commutative."); + PQXX_CHECK_EQUAL( + std::begin(r)->num(), 0, "Wrong column number at begin()."); + + pqxx::row::const_iterator f3(r[std::size(r)]); + + PQXX_CHECK(f3 == std::end(r), "Did not get end() at end of row."); + + PQXX_CHECK( + f3 > std::begin(r), "Row end() appears to precede its begin()."); + + PQXX_CHECK( + f3 >= std::end(r) and std::begin(r) < f3, + "Row iterator operator<() is broken."); + + PQXX_CHECK(f3 > std::begin(r), "Row end() not greater than begin()."); + + pqxx::row::const_iterator f4{r, std::size(r)}; + PQXX_CHECK(f4 == f3, "Row iterator constructor with offset is broken."); + + --f3; + f4 -= 1; + + PQXX_CHECK(f3 < std::end(r), "Last field in row is not before end()."); + PQXX_CHECK(f3 >= std::begin(r), "Last field in row precedes begin()."); + PQXX_CHECK( + f3 == std::end(r) - 1, "Back from end() doese not yield end()-1."); + PQXX_CHECK_EQUAL( + std::end(r) - f3, 1, "Wrong distance from last row to end()."); + + PQXX_CHECK(f4 == f3, "Row iterator operator-=() is broken."); + f4 += 1; + PQXX_CHECK(f4 == std::end(r), "Row iterator operator+=() is broken."); + + for (auto fr = std::rbegin(r); fr != std::rend(r); ++fr, --f3) + PQXX_CHECK_EQUAL( + *fr, *f3, + "Reverse traversal is not consistent with forward traversal."); + } + + // Thorough test for row::const_reverse_iterator + pqxx::row::const_reverse_iterator ri1(std::rbegin(R.front())), ri2(ri1), + ri3(std::end(R.front())); + ri2 = std::rbegin(R.front()); + + PQXX_CHECK( + ri1 == ri2, "Copy-constructed reverse_iterator is not equal to original."); + + PQXX_CHECK(ri2 == ri3, "result::end() does not generate rbegin()."); + PQXX_CHECK_EQUAL( + ri2 - ri3, 0, + "Distance between identical const_reverse_iterators was nonzero."); + + PQXX_CHECK( + pqxx::row::const_reverse_iterator(ri1.base()) == ri1, + "Back-conversion of reverse_iterator base() fails."); + + PQXX_CHECK(ri2 == ri3 + 0, "reverse_iterator+0 gives strange result."); + PQXX_CHECK(ri2 == ri3 - 0, "reverse_iterator-0 gives strange result."); + + PQXX_CHECK( + not(ri3 < ri2), + "reverse_iterator operator<() breaks on identical iterators."); + PQXX_CHECK( + ri2 <= ri3, + "reverse_iterator operator<=() breaks on identical iterators."); + PQXX_CHECK(ri3++ == ri2, "reverse_iterator post-increment is broken."); + + PQXX_CHECK_EQUAL(ri3 - ri2, 1, "Wrong reverse_iterator distance."); + PQXX_CHECK(ri3 > ri2, "reverse_iterator operator>() is broken."); + PQXX_CHECK(ri3 >= ri2, "reverse_iterator operator>=() is broken."); + PQXX_CHECK(ri2 < ri3, "reverse_iterator operator<() is broken."); + PQXX_CHECK(ri2 <= ri3, "reverse_iterator operator<=() is broken."); + PQXX_CHECK(ri3 == ri2 + 1, "Adding number to reverse_iterator goes wrong."); + PQXX_CHECK(ri2 == ri3 - 1, "Subtracting from reverse_iterator goes wrong."); + + PQXX_CHECK( + ri3 == ++ri2, "reverse_iterator pre-incremen returns wrong result."); + + PQXX_CHECK( + ri3 >= ri2, "reverse_iterator operator>=() breaks on equal iterators."); + PQXX_CHECK( + ri3 >= ri2, "reverse_iterator operator<=() breaks on equal iterators."); + PQXX_CHECK( + *ri3.base() == R.front().back(), + "reverse_iterator does not arrive at back()."); + PQXX_CHECK( + ri1->c_str()[0] == (*ri1).c_str()[0], + "reverse_iterator operator->() is inconsistent with operator*()."); + PQXX_CHECK( + ri2-- == ri3, "reverse_iterator post-decrement returns wrong result."); + PQXX_CHECK( + ri2 == --ri3, "reverse_iterator pre-increment returns wrong result."); + PQXX_CHECK( + ri2 == std::rbegin(R.front()), + "Moving iterator back and forth doesn't get it back to origin."); + + ri2 += 1; + ri3 -= -1; + + PQXX_CHECK( + ri2 != std::rbegin(R.front()), "Adding to reverse_iterator doesn't work."); + PQXX_CHECK( + ri2 != std::rbegin(R.front()), "Adding to reverse_iterator doesn't work."); + PQXX_CHECK( + ri3 == ri2, "reverse_iterator operator-=() breaks on negative numbers."); + + ri2 -= 1; + PQXX_CHECK( + ri2 == std::rbegin(R.front()), + "reverse_iterator operator+=() and operator-=() do not cancel out"); +} +} // namespace + + +PQXX_REGISTER_TEST(test_082); diff --git a/ext/libpqxx-7.7.3/test/test84.cxx b/ext/libpqxx-7.7.3/test/test84.cxx new file mode 100644 index 000000000..d35dc521b --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test84.cxx @@ -0,0 +1,109 @@ +#include +#include +#include +#include +#include + +#include +#include + +#include "test_helpers.hxx" + + +// "Adopted SQL Cursor" test program for libpqxx. Create SQL cursor, wrap it +// in a cursor stream, then use it to fetch data and check for consistent +// results. Compare results against an icursor_iterator so that is tested as +// well. +namespace +{ +void test_084() +{ + pqxx::connection conn; + pqxx::transaction tx{conn}; + + std::string const Table{"pg_tables"}, Key{"tablename"}; + + // Count rows. + pqxx::result R(tx.exec("SELECT count(*) FROM " + Table)); + + PQXX_CHECK( + R.at(0).at(0).as() > 20, + "Not enough rows in " + Table + ", cannot test."); + + // Create an SQL cursor and, for good measure, muddle up its state a bit. + std::string const CurName{"MYCUR"}, + Query{"SELECT * FROM " + Table + " ORDER BY " + Key}; + constexpr int InitialSkip{2}, GetRows{3}; + + tx.exec0("DECLARE " + tx.quote_name(CurName) + " CURSOR FOR " + Query); + tx.exec0( + "MOVE " + pqxx::to_string(InitialSkip * GetRows) + + " " + "IN " + + tx.quote_name(CurName)); + + // Wrap cursor in cursor stream. Apply some trickery to get its name inside + // a result field for this purpose. This isn't easy because it's not + // supposed to be easy; normally we'd only construct streams around existing + // SQL cursors if they were being returned by functions. + pqxx::icursorstream C{ + tx, tx.exec("SELECT '" + tx.esc(CurName) + "'")[0][0], GetRows}; + + // Create parallel cursor to check results + pqxx::icursorstream C2{tx, Query, "CHECKCUR", GetRows}; + pqxx::icursor_iterator i2{C2}; + + // Remember, our adopted cursor is at position (InitialSkip*GetRows) + pqxx::icursor_iterator i3(i2); + + PQXX_CHECK( + (i3 == i2) and not(i3 != i2), + "Equality on copy-constructed icursor_iterator is broken."); + PQXX_CHECK( + not(i3 > i2) and not(i3 < i2) and (i3 <= i2) and (i3 >= i2), + "Comparison on identical icursor_iterators is broken."); + + i3 += InitialSkip; + + PQXX_CHECK(not(i3 <= i2), "icursor_iterator operator<=() is broken."); + + pqxx::icursor_iterator iend, i4; + PQXX_CHECK(i3 != iend, "Early end to icursor_iterator iteration."); + i4 = iend; + PQXX_CHECK(i4 == iend, "Assigning empty icursor_iterator fails."); + + // Now start testing our new Cursor. + C >> R; + i2 = i3; + pqxx::result R2(*i2++); + + PQXX_CHECK_EQUAL( + std::size(R), static_cast(GetRows), + "Got unexpected number of rows."); + + PQXX_CHECK_EQUAL(R, R2, "Unexpected result at [1]"); + + C.get(R); + R2 = *i2; + PQXX_CHECK_EQUAL(R, R2, "Unexpected result at [2]"); + i2 += 1; + + C.ignore(GetRows); + C.get(R); + R2 = *++i2; + + PQXX_CHECK_EQUAL(R, R2, "Unexpected result at [3]"); + + ++i2; + R2 = *i2++; + for (int i{1}; C.get(R) and i2 != iend; R2 = *i2++, ++i) + PQXX_CHECK_EQUAL( + R, R2, "Unexpected result in iteration at " + pqxx::to_string(i)); + + PQXX_CHECK(i2 == iend, "Adopted cursor terminated early."); + PQXX_CHECK(not(C >> R), "icursor_iterator terminated early."); +} +} // namespace + + +PQXX_REGISTER_TEST(test_084); diff --git a/ext/libpqxx-7.7.3/test/test87.cxx b/ext/libpqxx-7.7.3/test/test87.cxx new file mode 100644 index 000000000..3a9a58b16 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test87.cxx @@ -0,0 +1,83 @@ +#include "pqxx/config-public-compiler.h" +#include +#include +#include +#include +#include + +#include + +#include + +#include + +#include +#include +#include + +#include "test_helpers.hxx" + + +// Test program for libpqxx. Send notification to self, and wait on the +// socket's connection for it to come in. In a simple situation you'd use +// connection::await_notification() for this, but that won't let you wait for +// multiple sockets. +namespace +{ +// Sample implementation of notification receiver. +class TestListener final : public pqxx::notification_receiver +{ + bool m_done; + +public: + explicit TestListener(pqxx::connection &conn, std::string Name) : + pqxx::notification_receiver(conn, Name), m_done(false) + {} + + void operator()(std::string const &, int be_pid) override + { + m_done = true; + PQXX_CHECK_EQUAL( + be_pid, conn().backendpid(), + "Notification came from wrong backend process."); + + std::cout << "Received notification: " << channel() << " pid=" << be_pid + << std::endl; + } + + bool done() const { return m_done; } +}; + + +void test_087() +{ + pqxx::connection conn; + + std::string const NotifName{"my notification"}; + TestListener L{conn, NotifName}; + + pqxx::perform([&conn, &L] { + pqxx::work tx{conn}; + tx.exec0("NOTIFY " + tx.quote_name(L.channel())); + tx.commit(); + }); + + int notifs{0}; + for (int i{0}; (i < 20) and not L.done(); ++i) + { + PQXX_CHECK_EQUAL(notifs, 0, "Got unexpected notifications."); + + std::cout << "."; + + pqxx::internal::wait_fd(conn.sock(), true, false); + notifs = conn.get_notifs(); + } + std::cout << std::endl; + + PQXX_CHECK(L.done(), "No notification received."); + PQXX_CHECK_EQUAL(notifs, 1, "Got unexpected number of notifications."); +} +} // namespace + + +PQXX_REGISTER_TEST(test_087); diff --git a/ext/libpqxx-7.7.3/test/test88.cxx b/ext/libpqxx-7.7.3/test/test88.cxx new file mode 100644 index 000000000..d5deaea75 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test88.cxx @@ -0,0 +1,91 @@ +#include + +#include +#include + +#include "test_helpers.hxx" + + +// Test program for libpqxx. Attempt to perform nested transactions. +namespace +{ +void test_088() +{ + pqxx::connection conn; + + pqxx::work tx0{conn}; + pqxx::test::create_pqxxevents(tx0); + + // Trivial test: create subtransactions, and commit/abort + std::cout << tx0.exec1("SELECT 'tx0 starts'")[0].c_str() << std::endl; + + pqxx::subtransaction T0a(static_cast(tx0), "T0a"); + T0a.commit(); + + pqxx::subtransaction T0b(static_cast(tx0), "T0b"); + T0b.abort(); + std::cout << tx0.exec1("SELECT 'tx0 ends'")[0].c_str() << std::endl; + tx0.commit(); + + // Basic functionality: perform query in subtransaction; abort, continue + pqxx::work tx1{conn, "tx1"}; + std::cout << tx1.exec1("SELECT 'tx1 starts'")[0].c_str() << std::endl; + pqxx::subtransaction tx1a{tx1, "tx1a"}; + std::cout << tx1a.exec1("SELECT ' a'")[0].c_str() << std::endl; + tx1a.commit(); + pqxx::subtransaction tx1b{tx1, "tx1b"}; + std::cout << tx1b.exec1("SELECT ' b'")[0].c_str() << std::endl; + tx1b.abort(); + pqxx::subtransaction tx1c{tx1, "tx1c"}; + std::cout << tx1c.exec1("SELECT ' c'")[0].c_str() << std::endl; + tx1c.commit(); + std::cout << tx1.exec1("SELECT 'tx1 ends'")[0].c_str() << std::endl; + tx1.commit(); + + // Commit/rollback functionality + pqxx::work tx2{conn, "tx2"}; + std::string const Table{"test088"}; + tx2.exec0("CREATE TEMP TABLE " + Table + "(no INTEGER, text VARCHAR)"); + + tx2.exec0("INSERT INTO " + Table + " VALUES(1,'tx2')"); + + pqxx::subtransaction tx2a{tx2, "tx2a"}; + tx2a.exec0("INSERT INTO " + Table + " VALUES(2,'tx2a')"); + tx2a.commit(); + pqxx::subtransaction tx2b{tx2, "tx2b"}; + tx2b.exec0("INSERT INTO " + Table + " VALUES(3,'tx2b')"); + tx2b.abort(); + pqxx::subtransaction tx2c{tx2, "tx2c"}; + tx2c.exec0("INSERT INTO " + Table + " VALUES(4,'tx2c')"); + tx2c.commit(); + auto const R{tx2.exec("SELECT * FROM " + Table + " ORDER BY no")}; + for (auto const &i : R) + std::cout << '\t' << i[0].c_str() << '\t' << i[1].c_str() << std::endl; + + PQXX_CHECK_EQUAL(std::size(R), 3, "Wrong number of results."); + + int expected[3]{1, 2, 4}; + for (pqxx::result::size_type n{0}; n < std::size(R); ++n) + PQXX_CHECK_EQUAL( + R[n][0].as(), expected[n], "Hit unexpected row number."); + + tx2.abort(); + + // Auto-abort should only roll back the subtransaction. + pqxx::work tx3{conn, "tx3"}; + pqxx::subtransaction tx3a(tx3, "tx3a"); + PQXX_CHECK_THROWS( + tx3a.exec("SELECT * FROM nonexistent_table WHERE nonattribute=0"), + pqxx::sql_error, "Bogus query did not fail."); + + // Subtransaction can only be aborted now, because there was an error. + tx3a.abort(); + // We're back in our top-level transaction. This did not abort. + tx3.exec1("SELECT count(*) FROM pqxxevents"); + // Make sure we can commit exactly one more level of transaction. + tx3.commit(); +} +} // namespace + + +PQXX_REGISTER_TEST(test_088); diff --git a/ext/libpqxx-7.7.3/test/test89.cxx b/ext/libpqxx-7.7.3/test/test89.cxx new file mode 100644 index 000000000..2b96dfcd2 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test89.cxx @@ -0,0 +1,44 @@ +#include + +#include +#include + +#include "test_helpers.hxx" + +// Test program for libpqxx. Attempt to perform nested queries on various +// types of connections. +namespace +{ +void test_089() +{ + pqxx::connection C; + + // Trivial test: create subtransactions, and commit/abort + pqxx::work T0(C, "T0"); + T0.exec1("SELECT 'T0 starts'"); + pqxx::subtransaction T0a(T0, "T0a"); + T0a.commit(); + pqxx::subtransaction T0b(T0, "T0b"); + T0b.abort(); + T0.exec1("SELECT 'T0 ends'"); + T0.commit(); + + // Basic functionality: perform query in subtransaction; abort, continue + pqxx::work T1(C, "T1"); + T1.exec1("SELECT 'T1 starts'"); + pqxx::subtransaction T1a(T1, "T1a"); + T1a.exec1("SELECT ' a'"); + T1a.commit(); + pqxx::subtransaction T1b(T1, "T1b"); + T1b.exec1("SELECT ' b'"); + T1b.abort(); + pqxx::subtransaction T1c(T1, "T1c"); + T1c.exec1("SELECT ' c'"); + T1c.commit(); + T1.exec1("SELECT 'T1 ends'"); + T1.commit(); +} +} // namespace + + +PQXX_REGISTER_TEST(test_089); diff --git a/ext/libpqxx-7.7.3/test/test90.cxx b/ext/libpqxx-7.7.3/test/test90.cxx new file mode 100644 index 000000000..d7d97f9f3 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test90.cxx @@ -0,0 +1,23 @@ +#include + +#include "test_helpers.hxx" + +// Test program for libpqxx. Test adorn_name. + +namespace +{ +void test_090() +{ + pqxx::connection conn; + + // Test connection's adorn_name() function for uniqueness + std::string const nametest{"basename"}; + + PQXX_CHECK_NOT_EQUAL( + conn.adorn_name(nametest), conn.adorn_name(nametest), + "\"Unique\" names are not unique."); +} +} // namespace + + +PQXX_REGISTER_TEST(test_090); diff --git a/ext/libpqxx-7.7.3/test/test_helpers.hxx b/ext/libpqxx-7.7.3/test/test_helpers.hxx new file mode 100644 index 000000000..6db3ab971 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test_helpers.hxx @@ -0,0 +1,305 @@ +#include +#include + +#include +#include + +namespace pqxx +{ +namespace test +{ +class test_failure : public std::logic_error +{ + std::string const m_file; + int m_line; + +public: + test_failure(std::string const &ffile, int fline, std::string const &desc); + + ~test_failure() noexcept override; + + std::string const &file() const noexcept { return m_file; } + int line() const noexcept { return m_line; } +}; + + +/// Drop a table, if it exists. +void drop_table(transaction_base &, std::string const &table); + + +using testfunc = void (*)(); + + +void register_test(char const name[], testfunc func); + + +/// Register a test while not inside a function. +struct registrar +{ + registrar(char const name[], testfunc func) + { + pqxx::test::register_test(name, func); + } +}; + + +// Register a test function, so the runner will run it. +#define PQXX_REGISTER_TEST(func) \ + pqxx::test::registrar tst_##func { #func, func } + + +// Unconditional test failure. +#define PQXX_CHECK_NOTREACHED(desc) \ + pqxx::test::check_notreached(__FILE__, __LINE__, (desc)) +[[noreturn]] void +check_notreached(char const file[], int line, std::string desc); + +// Verify that a condition is met, similar to assert() +#define PQXX_CHECK(condition, desc) \ + pqxx::test::check(__FILE__, __LINE__, (condition), #condition, (desc)) +void check( + char const file[], int line, bool condition, char const text[], + std::string const &desc); + +// Verify that variable has the expected value. +#define PQXX_CHECK_EQUAL(actual, expected, desc) \ + pqxx::test::check_equal( \ + __FILE__, __LINE__, (actual), #actual, (expected), #expected, (desc)) +template +inline void check_equal( + char const file[], int line, ACTUAL actual, char const actual_text[], + EXPECTED expected, char const expected_text[], std::string const &desc) +{ + if (expected == actual) + return; + std::string const fulldesc = desc + " (" + actual_text + " <> " + + expected_text + + ": " + "actual=" + + to_string(actual) + + ", " + "expected=" + + to_string(expected) + ")"; + throw test_failure(file, line, fulldesc); +} + +// Verify that two values are not equal. +#define PQXX_CHECK_NOT_EQUAL(value1, value2, desc) \ + pqxx::test::check_not_equal( \ + __FILE__, __LINE__, (value1), #value1, (value2), #value2, (desc)) +template +inline void check_not_equal( + char const file[], int line, VALUE1 value1, char const text1[], + VALUE2 value2, char const text2[], std::string const &desc) +{ + if (value1 != value2) + return; + std::string const fulldesc = desc + " (" + text1 + " == " + text2 + + ": " + "both are " + + to_string(value2) + ")"; + throw test_failure(file, line, fulldesc); +} + + +// Verify that value1 is less than value2. +#define PQXX_CHECK_LESS(value1, value2, desc) \ + pqxx::test::check_less( \ + __FILE__, __LINE__, (value1), #value1, (value2), #value2, (desc)) +// Verify that value1 is greater than value2. +#define PQXX_CHECK_GREATER(value2, value1, desc) \ + pqxx::test::check_less( \ + __FILE__, __LINE__, (value1), #value1, (value2), #value2, (desc)) +template +inline void check_less( + char const file[], int line, VALUE1 value1, char const text1[], + VALUE2 value2, char const text2[], std::string const &desc) +{ + if (value1 < value2) + return; + std::string const fulldesc = desc + " (" + text1 + " >= " + text2 + + ": " + "\"lower\"=" + + to_string(value1) + + ", " + "\"upper\"=" + + to_string(value2) + ")"; + throw test_failure(file, line, fulldesc); +} + + +// Verify that value1 is less than or equal to value2. +#define PQXX_CHECK_LESS_EQUAL(value1, value2, desc) \ + pqxx::test::check_less_equal( \ + __FILE__, __LINE__, (value1), #value1, (value2), #value2, (desc)) +// Verify that value1 is greater than or equal to value2. +#define PQXX_CHECK_GREATER_EQUAL(value2, value1, desc) \ + pqxx::test::check_less_equal( \ + __FILE__, __LINE__, (value1), #value1, (value2), #value2, (desc)) +template +inline void check_less_equal( + char const file[], int line, VALUE1 value1, char const text1[], + VALUE2 value2, char const text2[], std::string const &desc) +{ + if (value1 <= value2) + return; + std::string const fulldesc = desc + " (" + text1 + " > " + text2 + + ": " + "\"lower\"=" + + to_string(value1) + + ", " + "\"upper\"=" + + to_string(value2) + ")"; + throw test_failure(file, line, fulldesc); +} + + +struct failure_to_fail +{}; + + +namespace internal +{ +/// Syntactic placeholder: require (and accept) semicolon after block. +inline void end_of_statement() {} +} // namespace internal + + +// Verify that "action" does not throw an exception. +#define PQXX_CHECK_SUCCEEDS(action, desc) \ + { \ + try \ + { \ + action; \ + } \ + catch (std::exception const &e) \ + { \ + PQXX_CHECK_NOTREACHED( \ + std::string{desc} + " - \"" + \ + #action "\" threw exception: " + e.what()); \ + } \ + catch (...) \ + { \ + PQXX_CHECK_NOTREACHED( \ + std::string{desc} + " - \"" + #action "\" threw a non-exception!"); \ + } \ + } \ + pqxx::test::internal::end_of_statement() + +// Verify that "action" throws an exception, of any std::exception-based type. +#define PQXX_CHECK_THROWS_EXCEPTION(action, desc) \ + { \ + try \ + { \ + action; \ + throw pqxx::test::failure_to_fail(); \ + } \ + catch (pqxx::test::failure_to_fail const &) \ + { \ + PQXX_CHECK_NOTREACHED( \ + std::string{desc} + " (\"" #action "\" did not throw)"); \ + } \ + catch (std::exception const &) \ + {} \ + catch (...) \ + { \ + PQXX_CHECK_NOTREACHED( \ + std::string{desc} + " (\"" #action "\" threw non-exception type)"); \ + } \ + } \ + pqxx::test::internal::end_of_statement() + +// Verify that "action" throws "exception_type" (which is not std::exception). +#define PQXX_CHECK_THROWS(action, exception_type, desc) \ + { \ + try \ + { \ + action; \ + throw pqxx::test::failure_to_fail(); \ + } \ + catch (pqxx::test::failure_to_fail const &) \ + { \ + PQXX_CHECK_NOTREACHED( \ + std::string{desc} + " (\"" #action \ + "\" did not throw " #exception_type ")"); \ + } \ + catch (exception_type const &) \ + {} \ + catch (std::exception const &e) \ + { \ + PQXX_CHECK_NOTREACHED( \ + std::string{desc} + \ + " (\"" #action \ + "\" " \ + "threw exception other than " #exception_type ": " + \ + e.what() + ")"); \ + } \ + catch (...) \ + { \ + PQXX_CHECK_NOTREACHED( \ + std::string{desc} + " (\"" #action "\" threw non-exception type)"); \ + } \ + } \ + pqxx::test::internal::end_of_statement() + +#define PQXX_CHECK_BOUNDS(value, lower, upper, desc) \ + pqxx::test::check_bounds( \ + __FILE__, __LINE__, (value), #value, (lower), #lower, (upper), #upper, \ + (desc)) +template +inline void check_bounds( + char const file[], int line, VALUE value, char const text[], LOWER lower, + char const lower_text[], UPPER upper, char const upper_text[], + std::string const &desc) +{ + std::string const range_check = std::string{lower_text} + " < " + upper_text, + lower_check = + std::string{"!("} + text + " < " + lower_text + ")", + upper_check = std::string{text} + " < " + upper_text; + + pqxx::test::check( + file, line, lower < upper, range_check.c_str(), + desc + " (acceptable range is empty; value was " + text + ")"); + pqxx::test::check( + file, line, not(value < lower), lower_check.c_str(), + desc + " (" + text + " is below lower bound " + lower_text + ")"); + pqxx::test::check( + file, line, value < upper, upper_check.c_str(), + desc + " (" + text + " is not below upper bound " + upper_text + ")"); +} + + +// Report expected exception +void expected_exception(std::string const &); + + +// Represent result row as string. +std::string list_row(row); +// Represent result as string. +std::string list_result(result); +// Represent result iterator as string. +std::string list_result_iterator(result::const_iterator); + + +// @deprecated Set up test data for legacy tests. +void create_pqxxevents(transaction_base &); +} // namespace test + + +template<> inline std::string to_string(row const &value) +{ + return pqxx::test::list_row(value); +} + + +template<> inline std::string to_string(result const &value) +{ + return pqxx::test::list_result(value); +} + + +template<> inline std::string to_string(result::const_iterator const &value) +{ + return pqxx::test::list_result_iterator(value); +} +} // namespace pqxx diff --git a/ext/libpqxx-7.7.3/test/test_types.hxx b/ext/libpqxx-7.7.3/test/test_types.hxx new file mode 100644 index 000000000..38c045370 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/test_types.hxx @@ -0,0 +1,242 @@ +/* + * Custom types for testing & libpqxx support those types + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace pqxx +{ +template<> struct nullness : no_null +{}; + + +constexpr static auto hex_digit{"0123456789abcdef"}; + + +template<> struct string_traits +{ + static std::size_t size_buffer(std::byte const &) { return 3; } + + static zview to_buf(char *begin, char *end, std::byte const &value) + { + if (pqxx::internal::cmp_less(end - begin, size_buffer(value))) + throw pqxx::conversion_overrun{ + "Not enough buffer to convert std::byte."}; + auto uc{static_cast(value)}; + begin[0] = hex_digit[uc >> 4]; + begin[1] = hex_digit[uc & 0x0f]; + return zview{begin, 2u}; + } + + static char *into_buf(char *begin, char *end, std::byte const &value) + { + auto view{to_buf(begin, end, value)}; + return begin + std::size(view); + } +}; +} // namespace pqxx + + +class ipv4 +{ +public: + ipv4() : m_as_int{0u} {} + ipv4(ipv4 const &) = default; + ipv4(ipv4 &&) = default; + explicit ipv4(uint32_t i) : m_as_int{i} {} + ipv4( + unsigned char b1, unsigned char b2, unsigned char b3, unsigned char b4) : + ipv4() + { + set_byte(0, b1); + set_byte(1, b2); + set_byte(2, b3); + set_byte(3, b4); + } + + bool operator==(ipv4 const &o) const { return m_as_int == o.m_as_int; } + ipv4 &operator=(ipv4 const &) = default; + + /// Index bytes, from 0 to 3, in network (i.e. Big-Endian) byte order. + unsigned int operator[](int byte) const + { + if (byte < 0 or byte > 3) + throw pqxx::usage_error("Byte out of range."); + auto const shift = compute_shift(byte); + return static_cast((m_as_int >> shift) & 0xff); + } + + /// Set individual byte, in network byte order. + void set_byte(int byte, uint32_t value) + { + auto const shift = compute_shift(byte); + auto const blanked = (m_as_int & ~uint32_t(0xff << shift)); + m_as_int = (blanked | ((value & 0xff) << shift)); + } + +private: + static unsigned compute_shift(int byte) + { + if (byte < 0 or byte > 3) + throw pqxx::usage_error("Byte out of range."); + return static_cast((3 - byte) * 8); + } + + uint32_t m_as_int; +}; + + +using bytea = std::vector; + + +namespace pqxx +{ +template<> struct nullness : no_null +{}; + + +template<> struct string_traits +{ + static ipv4 from_string(std::string_view text) + { + ipv4 ts; + if (std::data(text) == nullptr) + internal::throw_null_conversion(type_name); + std::regex ipv4_regex{R"--((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}))--"}; + std::smatch match; + // Need non-temporary for `std::regex_match()` + std::string sstr{text}; + if (not std::regex_match(sstr, match, ipv4_regex) or std::size(match) != 5) + throw std::runtime_error{"Invalid ipv4 format: " + std::string{text}}; + try + { + for (std::size_t i{0}; i < 4; ++i) + ts.set_byte(int(i), uint32_t(std::stoi(match[i + 1]))); + } + catch (std::invalid_argument const &) + { + throw std::runtime_error{"Invalid ipv4 format: " + std::string{text}}; + } + catch (std::out_of_range const &) + { + throw std::runtime_error{"Invalid ipv4 format: " + std::string{text}}; + } + return ts; + } + + static char *into_buf(char *begin, char *end, ipv4 const &value) + { + if (pqxx::internal::cmp_less(end - begin, size_buffer(value))) + throw conversion_error{"Buffer too small for ipv4."}; + char *here = begin; + for (int i = 0; i < 4; ++i) + { + here = string_traits::into_buf(here, end, value[i]); + *(here - 1) = '.'; + } + *(here - 1) = '\0'; + return here; + } + + static zview to_buf(char *begin, char *end, ipv4 const &value) + { + return zview{ + begin, + static_cast(into_buf(begin, end, value) - begin - 1)}; + } + + static constexpr std::size_t size_buffer(ipv4 const &) noexcept + { + return 20; + } +}; + + +namespace +{ +inline char nibble_to_hex(unsigned nibble) +{ + if (nibble < 10) + return char('0' + nibble); + else if (nibble < 16) + return char('a' + (nibble - 10)); + else + throw std::runtime_error{"Invalid digit going into bytea."}; +} + + +inline unsigned hex_to_digit(char hex) +{ + auto x = static_cast(hex); + if (x >= '0' and x <= '9') + return x - '0'; + else if (x >= 'a' and x <= 'f') + return 10 + x - 'a'; + else if (x >= 'A' and x <= 'F') + return 10 + x - 'A'; + else + throw std::runtime_error{"Invalid hex in bytea."}; +} +} // namespace + + +template<> struct nullness : no_null +{}; + + +template<> struct string_traits +{ + static bytea from_string(std::string_view text) + { + if ((std::size(text) & 1) != 0) + throw std::runtime_error{"Odd hex size."}; + bytea value; + value.reserve((std::size(text) - 2) / 2); + for (std::size_t i = 2; i < std::size(text); i += 2) + { + auto hi = hex_to_digit(text[i]), lo = hex_to_digit(text[i + 1]); + value.push_back(static_cast((hi << 4) | lo)); + } + return value; + } + + static zview to_buf(char *begin, char *end, bytea const &value) + { + auto const need = size_buffer(value); + auto const have = end - begin; + if (std::size_t(have) < need) + throw pqxx::conversion_overrun{"Not enough space in buffer for bytea."}; + char *pos = begin; + *pos++ = '\\'; + *pos++ = 'x'; + for (unsigned char const u : value) + { + *pos++ = nibble_to_hex(unsigned(u) >> 4); + *pos++ = nibble_to_hex(unsigned(u) & 0x0f); + } + *pos++ = '\0'; + return {begin, pos - begin - 1}; + } + + static char *into_buf(char *begin, char *end, bytea const &value) + { + return begin + std::size(to_buf(begin, end, value)) + 1; + } + + static std::size_t size_buffer(bytea const &value) + { + return 2 + 2 * std::size(value) + 1; + } +}; +} // namespace pqxx diff --git a/ext/libpqxx-7.7.3/test/unit/CMakeLists.txt b/ext/libpqxx-7.7.3/test/unit/CMakeLists.txt new file mode 100644 index 000000000..f938a4181 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/CMakeLists.txt @@ -0,0 +1,25 @@ +if(NOT PostgreSQL_FOUND) + if(POLICY CMP0074) + cmake_policy(PUSH) + # CMP0074 is `OLD` by `cmake_minimum_required(VERSION 3.7)`, sets `NEW` + # to enable support CMake variable `PostgreSQL_ROOT`. + cmake_policy(SET CMP0074 NEW) + endif() + + find_package(PostgreSQL REQUIRED) + + if(POLICY CMP0074) + cmake_policy(POP) + endif() +endif() + +file(GLOB UNIT_TEST_SOURCES *.cxx) + +add_executable(unit_runner ${UNIT_TEST_SOURCES}) +target_link_libraries(unit_runner PUBLIC pqxx) +target_include_directories(unit_runner PRIVATE ${PostgreSQL_INCLUDE_DIRS}) +add_test( + NAME unit_runner + WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + COMMAND unit_runner +) diff --git a/ext/libpqxx-7.7.3/test/unit/test_array.cxx b/ext/libpqxx-7.7.3/test/unit/test_array.cxx new file mode 100644 index 000000000..229ac5145 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_array.cxx @@ -0,0 +1,548 @@ +#include + +#include "../test_helpers.hxx" + +// Test program for libpqxx array parsing. + +namespace pqxx +{ +template<> +struct nullness : no_null +{}; + + +inline std::string to_string(pqxx::array_parser::juncture const &j) +{ + using junc = pqxx::array_parser::juncture; + switch (j) + { + case junc::row_start: return "row_start"; + case junc::row_end: return "row_end"; + case junc::null_value: return "null_value"; + case junc::string_value: return "string_value"; + case junc::done: return "done"; + default: return "UNKNOWN JUNCTURE: " + to_string(static_cast(j)); + } +} +} // namespace pqxx + + +namespace +{ +void test_empty_arrays() +{ + std::pair output; + + // Parsing a null pointer just immediately returns "done". + output = pqxx::array_parser(std::string_view()).get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "get_next on null array did not return done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + // Parsing an empty array string immediately returns "done". + output = pqxx::array_parser("").get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "get_next on an empty array string did not return done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + // Parsing an empty array returns "row_start", "row_end", "done". + pqxx::array_parser empty_parser("{}"); + output = empty_parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Empty array did not start with row_start."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = empty_parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Empty array did not end with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = empty_parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "Empty array did not conclude with done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); +} + + +void test_null_value() +{ + std::pair output; + pqxx::array_parser containing_null("{NULL}"); + + output = containing_null.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Array containing null did not start with row_start."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = containing_null.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::null_value, + "Array containing null did not return null_value."); + PQXX_CHECK_EQUAL(output.second, "", "Null value was not empty."); + + output = containing_null.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Array containing null did not end with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = containing_null.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "Array containing null did not conclude with done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); +} + + +void test_single_quoted_string() +{ + std::pair output; + pqxx::array_parser parser("{'item'}"); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Array did not start with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "item", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Array did not end with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "Array did not conclude with done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); +} + + +void test_single_quoted_escaping() +{ + std::pair output; + pqxx::array_parser parser("{'don''t\\\\ care'}"); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Array did not start with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "don't\\ care", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Array did not end with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "Array did not conclude with done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); +} + + +void test_double_quoted_string() +{ + std::pair output; + pqxx::array_parser parser("{\"item\"}"); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Array did not start with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "item", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Array did not end with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "Array did not conclude with done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); +} + + +void test_double_quoted_escaping() +{ + std::pair output; + pqxx::array_parser parser(R"--({"don''t\\ care"})--"); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Array did not start with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "don''t\\ care", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Array did not end with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "Array did not conclude with done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); +} + + +// A pair of double quotes in a double-quoted string is an escaped quote. +void test_double_double_quoted_string() +{ + std::pair output; + pqxx::array_parser parser{R"--({"3"" steel"})--"}; + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Array did not start with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + + PQXX_CHECK_EQUAL(output.second, "3\" steel", "Unexpected string value."); +} + + +void test_unquoted_string() +{ + std::pair output; + pqxx::array_parser parser("{item}"); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Array did not start with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "item", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Array did not end with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "Array did not conclude with done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); +} + + +void test_multiple_values() +{ + std::pair output; + pqxx::array_parser parser("{1,2}"); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Array did not start with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "1", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "2", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Array did not end with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "Array did not conclude with done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); +} + + +void test_nested_array() +{ + std::pair output; + pqxx::array_parser parser("{{item}}"); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Array did not start with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Nested array did not start 2nd dimension with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "item", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Nested array did not end 2nd dimension with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Array did not end with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "Array did not conclude with done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); +} + + +void test_nested_array_with_multiple_entries() +{ + std::pair output; + pqxx::array_parser parser("{{1,2},{3,4}}"); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Array did not start with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Nested array did not start 2nd dimension with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "1", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "2", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Nested array did not end 2nd dimension with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_start, + "Nested array did not descend to 2nd dimension with row_start."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "3", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::string_value, + "Array did not return string_value."); + PQXX_CHECK_EQUAL(output.second, "4", "Unexpected string value."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Nested array did not leave 2nd dimension with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::row_end, + "Array did not end with row_end."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); + + output = parser.get_next(); + PQXX_CHECK_EQUAL( + output.first, pqxx::array_parser::juncture::done, + "Array did not conclude with done."); + PQXX_CHECK_EQUAL(output.second, "", "Unexpected nonempty output."); +} + + +void test_array_parse() +{ + test_empty_arrays(); + test_null_value(); + test_single_quoted_string(); + test_single_quoted_escaping(); + test_double_quoted_string(); + test_double_quoted_escaping(); + test_double_double_quoted_string(); + test_unquoted_string(); + test_multiple_values(); + test_nested_array(); + test_nested_array_with_multiple_entries(); +} + + +void test_generate_empty_array() +{ + PQXX_CHECK_EQUAL( + pqxx::to_string(std::vector{}), "{}", + "Basic array output is not as expected."); + PQXX_CHECK_EQUAL( + pqxx::to_string(std::vector{}), "{}", + "String array comes out different."); +} + + +void test_generate_null_value() +{ + PQXX_CHECK_EQUAL( + pqxx::to_string(std::vector{nullptr}), "{NULL}", + "Null array value did not come out as expected."); +} + + +void test_generate_single_item() +{ + PQXX_CHECK_EQUAL( + pqxx::to_string(std::vector{42}), "{42}", + "Numeric conversion came out wrong."); + + PQXX_CHECK_EQUAL( + pqxx::to_string(std::vector{"foo"}), "{\"foo\"}", + "String array conversion came out wrong."); +} + + +void test_generate_multiple_items() +{ + PQXX_CHECK_EQUAL( + pqxx::to_string(std::vector{5, 4, 3, 2}), "{5,4,3,2}", + "Array with multiple values is not correct."); + PQXX_CHECK_EQUAL( + pqxx::to_string(std::vector{"foo", "bar"}), + "{\"foo\",\"bar\"}", "Array with multiple strings came out wrong."); +} + + +void test_generate_nested_array() +{ + PQXX_CHECK_EQUAL( + pqxx::to_string(std::vector>{{1, 2}, {3, 4}}), + "{{1,2},{3,4}}", "Nested arrays don't work right."); +} + + +void test_generate_escaped_strings() +{ + PQXX_CHECK_EQUAL( + pqxx::to_string(std::vector{"a\\b"}), "{\"a\\\\b\"}", + "Backslashes are not escaped properly."); + PQXX_CHECK_EQUAL( + pqxx::to_string(std::vector{"x\"y\""}), "{\"x\\\"y\\\"\"}", + "Double quotes are not escaped properly."); +} + + +void test_array_generate() +{ + test_generate_empty_array(); + test_generate_null_value(); + test_generate_single_item(); + test_generate_multiple_items(); + test_generate_nested_array(); + test_generate_escaped_strings(); +} + + +void test_array_roundtrip() +{ + pqxx::connection c; + pqxx::work w{c}; + + std::vector const in{0, 1, 2, 3, 5}; + auto const r1{w.exec1("SELECT " + c.quote(in) + "::integer[]")}; + pqxx::array_parser parser{r1[0].view()}; + auto item{parser.get_next()}; + PQXX_CHECK_EQUAL( + item.first, pqxx::array_parser::juncture::row_start, + "Array did not start with row_start."); + + std::vector out; + for (item = parser.get_next(); + item.first == pqxx::array_parser::juncture::string_value; + item = parser.get_next()) + { + out.push_back(pqxx::from_string(item.second)); + } + + PQXX_CHECK_EQUAL( + item.first, pqxx::array_parser::juncture::row_end, + "Array values did not end in row_end."); + PQXX_CHECK_EQUAL( + std::size(out), std::size(in), "Array came back with different length."); + + for (std::size_t i{0}; i < std::size(in); ++i) + PQXX_CHECK_EQUAL(out[i], in[i], "Array element has changed."); + + item = parser.get_next(); + PQXX_CHECK_EQUAL( + item.first, pqxx::array_parser::juncture::done, + "Array did not end in done."); +} + + +PQXX_REGISTER_TEST(test_array_parse); +PQXX_REGISTER_TEST(test_array_generate); +PQXX_REGISTER_TEST(test_array_roundtrip); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_binarystring.cxx b/ext/libpqxx-7.7.3/test/unit/test_binarystring.cxx new file mode 100644 index 000000000..e6097d039 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_binarystring.cxx @@ -0,0 +1,211 @@ +#include +#include +#include + +#include "../test_helpers.hxx" +#include "../test_types.hxx" + + +namespace +{ +pqxx::binarystring +make_binarystring(pqxx::transaction_base &T, std::string content) +{ +#include "pqxx/internal/ignore-deprecated-pre.hxx" + return pqxx::binarystring(T.exec1("SELECT " + T.quote_raw(content))[0]); +#include "pqxx/internal/ignore-deprecated-post.hxx" +} + + +void test_binarystring() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + auto b{make_binarystring(tx, "")}; + PQXX_CHECK(std::empty(b), "Empty binarystring is not empty."); + PQXX_CHECK_EQUAL(b.str(), "", "Empty binarystring doesn't work."); + PQXX_CHECK_EQUAL(std::size(b), 0u, "Empty binarystring has nonzero size."); + PQXX_CHECK_EQUAL(b.length(), 0u, "Length/size mismatch."); + PQXX_CHECK(std::begin(b) == std::end(b), "Empty binarystring iterates."); + PQXX_CHECK( + std::cbegin(b) == std::begin(b), "Wrong cbegin for empty binarystring."); + PQXX_CHECK( + std::rbegin(b) == std::rend(b), "Empty binarystring reverse-iterates."); + PQXX_CHECK( + std::crbegin(b) == std::rbegin(b), + "Wrong crbegin for empty binarystring."); + PQXX_CHECK_THROWS( + b.at(0), std::out_of_range, "Empty binarystring accepts at()."); + + b = make_binarystring(tx, "z"); + PQXX_CHECK_EQUAL(b.str(), "z", "Basic nonempty binarystring is broken."); + PQXX_CHECK(not std::empty(b), "Nonempty binarystring is empty."); + PQXX_CHECK_EQUAL(std::size(b), 1u, "Bad binarystring size."); + PQXX_CHECK_EQUAL(b.length(), 1u, "Length/size mismatch."); + PQXX_CHECK( + std::begin(b) != std::end(b), "Nonempty binarystring does not iterate."); + PQXX_CHECK( + std::rbegin(b) != std::rend(b), + "Nonempty binarystring does not reverse-iterate."); + PQXX_CHECK(std::begin(b) + 1 == std::end(b), "Bad iteration."); + PQXX_CHECK(std::rbegin(b) + 1 == std::rend(b), "Bad reverse iteration."); + PQXX_CHECK(std::cbegin(b) == std::begin(b), "Wrong cbegin."); + PQXX_CHECK(std::cend(b) == std::end(b), "Wrong cend."); + PQXX_CHECK(std::crbegin(b) == std::rbegin(b), "Wrong crbegin."); + PQXX_CHECK(std::crend(b) == std::rend(b), "Wrong crend."); + PQXX_CHECK(b.front() == 'z', "Unexpected front()."); + PQXX_CHECK(b.back() == 'z', "Unexpected back()."); + PQXX_CHECK(b.at(0) == 'z', "Unexpected data at index 0."); + PQXX_CHECK_THROWS( + b.at(1), std::out_of_range, "Failed to catch range error."); + + std::string const simple{"ab"}; + b = make_binarystring(tx, simple); + PQXX_CHECK_EQUAL( + b.str(), simple, "Binary (un)escaping went wrong somewhere."); + PQXX_CHECK_EQUAL( + std::size(b), std::size(simple), "Escaping confuses length."); + + std::string const simple_escaped{ + tx.esc_raw(std::basic_string_view{ + reinterpret_cast(std::data(simple)), + std::size(simple)})}; + for (auto c : simple_escaped) + { + auto const uc{static_cast(c)}; + PQXX_CHECK(uc <= 127, "Non-ASCII byte in escaped string."); + } + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + PQXX_CHECK_EQUAL( + tx.quote_raw( + reinterpret_cast(simple.c_str()), + std::size(simple)), + tx.quote(b), "quote_raw is broken"); + PQXX_CHECK_EQUAL( + tx.quote(b), tx.quote_raw(simple), "Binary quoting is broken."); + PQXX_CHECK_EQUAL( + pqxx::binarystring(tx.exec1("SELECT " + tx.quote(b))[0]).str(), simple, + "Binary string is not idempotent."); +#include "pqxx/internal/ignore-deprecated-post.hxx" + + std::string const bytes("\x01\x23\x23\xa1\x2b\x0c\xff"); + b = make_binarystring(tx, bytes); + PQXX_CHECK_EQUAL(b.str(), bytes, "Binary data breaks (un)escaping."); + + std::string const nully("a\0b", 3); + b = make_binarystring(tx, nully); + PQXX_CHECK_EQUAL(b.str(), nully, "Nul byte broke binary (un)escaping."); + PQXX_CHECK_EQUAL(std::size(b), 3u, "Nul byte broke binarystring size."); + + b = make_binarystring(tx, "foo"); + PQXX_CHECK_EQUAL(std::string(b.get(), 3), "foo", "get() appears broken."); + + auto b1{make_binarystring(tx, "1")}, b2{make_binarystring(tx, "2")}; + PQXX_CHECK_NOT_EQUAL(b1.get(), b2.get(), "Madness rules."); + PQXX_CHECK_NOT_EQUAL(b1.str(), b2.str(), "Logic has no more meaning."); + b1.swap(b2); + PQXX_CHECK_NOT_EQUAL(b1.str(), b2.str(), "swap() equalized binarystrings."); + PQXX_CHECK_NOT_EQUAL(b1.str(), "1", "swap() did not happen."); + PQXX_CHECK_EQUAL(b1.str(), "2", "swap() is broken."); + PQXX_CHECK_EQUAL(b2.str(), "1", "swap() went insane."); + + b = make_binarystring(tx, "bar"); + b.swap(b); + PQXX_CHECK_EQUAL(b.str(), "bar", "Self-swap confuses binarystring."); + + b = make_binarystring(tx, "\\x"); + PQXX_CHECK_EQUAL(b.str(), "\\x", "Hex-escape header confused (un)escaping."); +} + + +void test_binarystring_conversion() +{ + constexpr char bytes[]{"f\to\0o\n\0"}; + std::string_view const data{bytes, std::size(bytes) - 1}; +#include "pqxx/internal/ignore-deprecated-pre.hxx" + pqxx::binarystring bin{data}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + auto const escaped{pqxx::to_string(bin)}; + PQXX_CHECK_EQUAL( + escaped, std::string_view{"\\x66096f006f0a00"}, "Unexpected hex escape."); + auto const restored{pqxx::from_string(escaped)}; + PQXX_CHECK_EQUAL( + std::size(restored), std::size(data), "Unescaping produced wrong length."); +} + + +void test_binarystring_stream() +{ + constexpr char bytes[]{"a\tb\0c"}; + std::string_view const data{bytes, std::size(bytes) - 1}; +#include "pqxx/internal/ignore-deprecated-pre.hxx" + pqxx::binarystring bin{data}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + + pqxx::connection conn; + pqxx::transaction tx{conn}; + tx.exec0("CREATE TEMP TABLE pqxxbinstream(id integer, bin bytea)"); + + auto to{pqxx::stream_to::table(tx, {"pqxxbinstream"})}; + to.write_values(0, bin); + to.complete(); + + auto ptr{reinterpret_cast(std::data(data))}; + auto expect{ + tx.quote(std::basic_string_view{ptr, std::size(data)})}; + PQXX_CHECK( + tx.query_value("SELECT bin = " + expect + " FROM pqxxbinstream"), + "binarystring did not stream_to properly."); + PQXX_CHECK_EQUAL( + tx.query_value("SELECT octet_length(bin) FROM pqxxbinstream"), + std::size(data), "Did the terminating zero break the bytea?"); +} + + +void test_binarystring_array_stream() +{ + pqxx::connection conn; + pqxx::transaction tx{conn}; + tx.exec0("CREATE TEMP TABLE pqxxbinstream(id integer, vec bytea[])"); + + constexpr char bytes1[]{"a\tb\0c"}, bytes2[]{"1\0.2"}; + std::string_view const data1{bytes1}, data2{bytes2}; +#include "pqxx/internal/ignore-deprecated-pre.hxx" + pqxx::binarystring bin1{data1}, bin2{data2}; + std::vector const vec{bin1, bin2}; +#include "pqxx/internal/ignore-deprecated-post.hxx" + + auto to{pqxx::stream_to::table(tx, {"pqxxbinstream"})}; + to.write_values(0, vec); + to.complete(); + + PQXX_CHECK_EQUAL( + tx.query_value( + "SELECT array_length(vec, 1) FROM pqxxbinstream"), + std::size(vec), "Array came out with wrong length."); + + auto ptr1{reinterpret_cast(std::data(data1))}, + ptr2{reinterpret_cast(std::data(data2))}; + auto expect1{ + tx.quote(std::basic_string_view{ptr1, std::size(data1)})}, + expect2{ + tx.quote(std::basic_string_view{ptr2, std::size(data2)})}; + PQXX_CHECK( + tx.query_value("SELECT vec[1] = " + expect1 + " FROM pqxxbinstream"), + "Bytea in array came out wrong."); + PQXX_CHECK( + tx.query_value("SELECT vec[2] = " + expect2 + " FROM pqxxbinstream"), + "First bytea in array worked, but second did not."); + PQXX_CHECK_EQUAL( + tx.query_value( + "SELECT octet_length(vec[1]) FROM pqxxbinstream"), + std::size(data1), "Bytea length broke inside array."); +} + + +PQXX_REGISTER_TEST(test_binarystring); +PQXX_REGISTER_TEST(test_binarystring_conversion); +PQXX_REGISTER_TEST(test_binarystring_stream); +PQXX_REGISTER_TEST(test_binarystring_array_stream); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_blob.cxx b/ext/libpqxx-7.7.3/test/unit/test_blob.cxx new file mode 100644 index 000000000..709c1f489 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_blob.cxx @@ -0,0 +1,644 @@ +#include + +#include +#include + +#include "../test_helpers.hxx" +#include "../test_types.hxx" + + +namespace +{ +void test_blob_is_useless_by_default() +{ + pqxx::blob b{}; + std::basic_string buf; + PQXX_CHECK_THROWS( + b.read(buf, 1), pqxx::usage_error, + "Read on default-constructed blob did not throw failure."); + PQXX_CHECK_THROWS( + b.write(buf), pqxx::usage_error, + "Write on default-constructed blob did not throw failure."); +} + + +void test_blob_create_makes_empty_blob() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::oid id{pqxx::blob::create(tx)}; + auto b{pqxx::blob::open_r(tx, id)}; + b.seek_end(0); + PQXX_CHECK_EQUAL(b.tell(), 0, "New blob is not empty."); +} + + +void test_blob_create_with_oid_requires_oid_be_free() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{pqxx::blob::create(tx)}; + + PQXX_CHECK_THROWS( + pqxx::ignore_unused(pqxx::blob::create(tx, id)), pqxx::failure, + "Not getting expected error when oid not free."); +} + + +void test_blob_create_with_oid_obeys_oid() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{pqxx::blob::create(tx)}; + pqxx::blob::remove(tx, id); + + auto actual_id{pqxx::blob::create(tx, id)}; + PQXX_CHECK_EQUAL(actual_id, id, "Create with oid returned different oid."); +} + + +void test_blobs_are_transactional() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::oid id{pqxx::blob::create(tx)}; + tx.abort(); + pqxx::work tx2{conn}; + PQXX_CHECK_THROWS( + pqxx::ignore_unused(pqxx::blob::open_r(tx2, id)), pqxx::failure, + "Blob from aborted transaction still exists."); +} + + +void test_blob_remove_removes_blob() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::oid id{pqxx::blob::create(tx)}; + pqxx::blob::remove(tx, id); + PQXX_CHECK_THROWS( + pqxx::ignore_unused(pqxx::blob::open_r(tx, id)), pqxx::failure, + "Attempt to open blob after removing should have failed."); +} + + +void test_blob_remove_is_not_idempotent() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::oid id{pqxx::blob::create(tx)}; + pqxx::blob::remove(tx, id); + PQXX_CHECK_THROWS( + pqxx::blob::remove(tx, id), pqxx::failure, + "Redundant remove() did not throw failure."); +} + + +void test_blob_checks_open_mode() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::oid id{pqxx::blob::create(tx)}; + pqxx::blob b_r{pqxx::blob::open_r(tx, id)}; + pqxx::blob b_w{pqxx::blob::open_w(tx, id)}; + pqxx::blob b_rw{pqxx::blob::open_rw(tx, id)}; + + std::basic_string buf{std::byte{3}, std::byte{2}, std::byte{1}}; + + // These are all allowed: + b_w.write(buf); + b_r.read(buf, 3); + b_rw.seek_end(0); + b_rw.write(buf); + b_rw.seek_abs(0); + b_rw.read(buf, 6); + + // These are not: + PQXX_CHECK_THROWS( + b_r.write(buf), pqxx::failure, "Read-only blob did not stop write."); + PQXX_CHECK_THROWS( + b_w.read(buf, 10), pqxx::failure, "Write-only blob did not stop read."); +} + + +void test_blob_supports_move() +{ + std::basic_string buf; + buf.push_back(std::byte{'x'}); + + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::oid id{pqxx::blob::create(tx)}; + pqxx::blob b1{pqxx::blob::open_rw(tx, id)}; + b1.write(buf); + + pqxx::blob b2{std::move(b1)}; + b2.seek_abs(0); + b2.read(buf, 1u); + + PQXX_CHECK_THROWS( + b1.read(buf, 1u), pqxx::usage_error, + "Blob still works after move construction."); + + b1 = std::move(b2); + b1.read(buf, 1u); + + PQXX_CHECK_THROWS( + b2.read(buf, 1u), pqxx::usage_error, + "Blob still works after move assignment."); +} + + +void test_blob_read_reads_data() +{ + std::basic_string const data{ + std::byte{'a'}, std::byte{'b'}, std::byte{'c'}}; + + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::oid id{pqxx::blob::from_buf(tx, data)}; + + std::basic_string buf; + auto b{pqxx::blob::open_rw(tx, id)}; + PQXX_CHECK_EQUAL( + b.read(buf, 2), 2u, "Full read() returned an unexpected value."); + PQXX_CHECK_EQUAL( + buf, (std::basic_string{std::byte{'a'}, std::byte{'b'}}), + "Read back the wrong data."); + PQXX_CHECK_EQUAL( + b.read(buf, 2), 1u, "Partial read() returned an unexpected value."); + PQXX_CHECK_EQUAL( + buf, (std::basic_string{std::byte{'c'}}), + "Continued read produced wrong data."); + PQXX_CHECK_EQUAL( + b.read(buf, 2), 0u, "read at end returned an unexpected value."); + PQXX_CHECK_EQUAL( + buf, (std::basic_string{}), "Read past end produced data."); +} + + +void test_blob_read_span() +{ +#if defined(PQXX_HAVE_SPAN) + std::basic_string const data{std::byte{'u'}, std::byte{'v'}, + std::byte{'w'}, std::byte{'x'}, + std::byte{'y'}, std::byte{'z'}}; + + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::oid id{pqxx::blob::from_buf(tx, data)}; + + auto b{pqxx::blob::open_r(tx, id)}; + std::basic_string string_buf; + string_buf.resize(2); + + std::span output; + + output = b.read(std::span{}); + PQXX_CHECK_EQUAL( + std::size(output), 0u, "Empty read produced nonempty buffer."); + output = b.read(string_buf); + PQXX_CHECK_EQUAL( + std::size(output), 2u, "Got unexpected buf size from blob::read()."); + PQXX_CHECK_EQUAL( + output[0], std::byte{'u'}, "Unexpected byte from blob::read()."); + PQXX_CHECK_EQUAL( + output[1], std::byte{'v'}, "Unexpected byte from blob::read()."); + + string_buf.resize(100); + output = b.read(std::span{string_buf.data(), 1}); + PQXX_CHECK_EQUAL( + std::size(output), 1u, + "Did blob::read() follow string size instead of span size?"); + PQXX_CHECK_EQUAL( + output[0], std::byte{'w'}, "Unexpected byte from blob::read()."); + + std::vector vec_buf; + vec_buf.resize(2); + auto output2{b.read(vec_buf)}; + PQXX_CHECK_EQUAL( + std::size(output2), 2u, "Got unexpected buf size from blob::read()."); + PQXX_CHECK_EQUAL( + output2[0], std::byte{'x'}, "Unexpected byte from blob::read()."); + PQXX_CHECK_EQUAL( + output2[1], std::byte{'y'}, "Unexpected byte from blob::read()."); + + vec_buf.resize(100); + output2 = b.read(vec_buf); + PQXX_CHECK_EQUAL(std::size(output2), 1u, "Weird things happened at EOF."); + PQXX_CHECK_EQUAL(output2[0], std::byte{'z'}, "Bad data at EOF."); +#endif // PQXX_HAVE_SPAN +} + + +void test_blob_reads_vector() +{ + char const content[]{"abcd"}; + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{pqxx::blob::from_buf( + tx, std::basic_string_view{ + reinterpret_cast(content), std::size(content)})}; + std::vector buf; + buf.resize(10); + auto out{pqxx::blob::open_r(tx, id).read(buf)}; + PQXX_CHECK_EQUAL( + std::size(out), std::size(content), + "Got wrong length back when reading as vector."); + PQXX_CHECK_EQUAL( + out[0], std::byte{'a'}, "Got bad data when reading as vector."); +} + + +void test_blob_write_appends_at_insertion_point() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{pqxx::blob::create(tx)}; + + auto b{pqxx::blob::open_rw(tx, id)}; + b.write(std::basic_string{std::byte{'z'}}); + b.write(std::basic_string{std::byte{'a'}}); + + std::basic_string buf; + b.read(buf, 5); + PQXX_CHECK_EQUAL( + buf, (std::basic_string{}), "Found data at the end."); + b.seek_abs(0); + b.read(buf, 5); + PQXX_CHECK_EQUAL( + buf, (std::basic_string{std::byte{'z'}, std::byte{'a'}}), + "Consecutive writes did not append correctly."); + + b.write(std::basic_string{std::byte{'x'}}); + // Blob now contains "zax". That's not we wanted... Rewind and rewrite. + b.seek_abs(1); + b.write(std::basic_string{std::byte{'y'}}); + b.seek_abs(0); + b.read(buf, 5); + PQXX_CHECK_EQUAL( + buf, + (std::basic_string{ + std::byte{'z'}, std::byte{'y'}, std::byte{'x'}}), + "Rewriting in the middle did not work right."); +} + + +void test_blob_writes_span() +{ +#if defined(PQXX_HAVE_SPAN) + pqxx::connection conn; + pqxx::work tx{conn}; + constexpr char content[]{"gfbltk"}; + std::basic_string data{ + reinterpret_cast(content), std::size(content)}; + + auto id{pqxx::blob::create(tx)}; + auto b{pqxx::blob::open_rw(tx, id)}; + b.write(std::span{data.data() + 1, 3u}); + b.seek_abs(0); + + std::vector buf; + buf.resize(4); + auto out{b.read(std::span{buf.data(), 4u})}; + PQXX_CHECK_EQUAL( + std::size(out), 3u, "Did not get expected number of bytes back."); + PQXX_CHECK_EQUAL(out[0], std::byte{'f'}, "Data did not come back right."); + PQXX_CHECK_EQUAL(out[2], std::byte{'l'}, "Data started right, ended wrong!"); +#endif // PQXX_HAVE_SPAN +} + + +void test_blob_resize_shortens_to_desired_length() +{ + std::basic_string const data{ + std::byte{'w'}, std::byte{'o'}, std::byte{'r'}, std::byte{'k'}}; + + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{pqxx::blob::from_buf(tx, data)}; + + pqxx::blob::open_w(tx, id).resize(2); + std::basic_string buf; + pqxx::blob::to_buf(tx, id, buf, 10); + PQXX_CHECK_EQUAL( + buf, (std::basic_string{std::byte{'w'}, std::byte{'o'}}), + "Truncate did not shorten correctly."); +} + + +void test_blob_resize_extends_to_desired_length() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{ + pqxx::blob::from_buf(tx, std::basic_string{std::byte{100}})}; + pqxx::blob::open_w(tx, id).resize(3); + std::basic_string buf; + pqxx::blob::to_buf(tx, id, buf, 10); + PQXX_CHECK_EQUAL( + buf, + (std::basic_string{std::byte{100}, std::byte{0}, std::byte{0}}), + "Resize did not zero-extend correctly."); +} + + +void test_blob_tell_tracks_position() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{pqxx::blob::create(tx)}; + auto b{pqxx::blob::open_rw(tx, id)}; + + PQXX_CHECK_EQUAL( + b.tell(), 0, "Empty blob started out in non-zero position."); + b.write(std::basic_string{std::byte{'e'}, std::byte{'f'}}); + PQXX_CHECK_EQUAL( + b.tell(), 2, "Empty blob started out in non-zero position."); + b.seek_abs(1); + PQXX_CHECK_EQUAL(b.tell(), 1, "tell() did not track seek."); +} + + +void test_blob_seek_sets_positions() +{ + std::basic_string data{ + std::byte{0}, std::byte{1}, std::byte{2}, std::byte{3}, std::byte{4}, + std::byte{5}, std::byte{6}, std::byte{7}, std::byte{8}, std::byte{9}}; + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{pqxx::blob::from_buf(tx, data)}; + auto b{pqxx::blob::open_r(tx, id)}; + + std::basic_string buf; + b.seek_rel(3); + b.read(buf, 1u); + PQXX_CHECK_EQUAL( + buf[0], std::byte{3}, + "seek_rel() from beginning did not take us to the right position."); + + b.seek_abs(2); + b.read(buf, 1u); + PQXX_CHECK_EQUAL( + buf[0], std::byte{2}, "seek_abs() did not take us to the right position."); + + b.seek_end(-2); + b.read(buf, 1u); + PQXX_CHECK_EQUAL( + buf[0], std::byte{8}, "seek_end() did not take us to the right position."); +} + + +void test_blob_from_buf_interoperates_with_to_buf() +{ + std::basic_string const data{std::byte{'h'}, std::byte{'i'}}; + std::basic_string buf; + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::blob::to_buf(tx, pqxx::blob::from_buf(tx, data), buf, 10); + PQXX_CHECK_EQUAL(buf, data, "from_buf()/to_buf() roundtrip did not work."); +} + + +void test_blob_append_from_buf_appends() +{ + std::basic_string const data{std::byte{'h'}, std::byte{'o'}}; + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{pqxx::blob::create(tx)}; + pqxx::blob::append_from_buf(tx, data, id); + pqxx::blob::append_from_buf(tx, data, id); + std::basic_string buf; + pqxx::blob::to_buf(tx, id, buf, 10); + PQXX_CHECK_EQUAL(buf, data + data, "append_from_buf() wrote wrong data?"); +} + + +namespace +{ +/// Wrap `std::fopen`. +/** This is just here to stop Visual Studio from advertising its own + * alternative. + */ +std::unique_ptr> +my_fopen(char const *path, char const *mode) +{ +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4996) +#endif + return {std::fopen(path, mode), std::fclose}; +#if defined(_MSC_VER) +# pragma warning(pop) +#endif +} + + +void read_file( + char const path[], std::size_t len, std::basic_string &buf) +{ + buf.resize(len); + auto f{my_fopen(path, "rb")}; + auto bytes{ + std::fread(reinterpret_cast(buf.data()), 1, len, f.get())}; + if (bytes == 0) + throw std::runtime_error{"Error reading test file."}; + buf.resize(bytes); +} + + +void write_file(char const path[], std::basic_string_view data) +{ + try + { + auto f{my_fopen(path, "wb")}; + if ( + std::fwrite( + reinterpret_cast(data.data()), 1, std::size(data), + f.get()) < std::size(data)) + std::runtime_error{"File write failed."}; + } + catch (const std::exception &) + { + std::remove(path); + throw; + } +} + + +/// Temporary file. +class TempFile +{ +public: + /// Create (and later clean up) a file at path containing data. + TempFile(char const path[], std::basic_string_view data) : + m_path(path) + { + write_file(path, data); + } + + ~TempFile() { std::remove(m_path.c_str()); } + +private: + std::string m_path; +}; +} // namespace + + +void test_blob_from_file_creates_blob_from_file_contents() +{ + char const temp_file[] = "blob-test-from_file.tmp"; + std::basic_string const data{std::byte{'4'}, std::byte{'2'}}; + + pqxx::connection conn; + pqxx::work tx{conn}; + std::basic_string buf; + + pqxx::oid id; + { + TempFile f{temp_file, data}; + id = pqxx::blob::from_file(tx, temp_file); + } + pqxx::blob::to_buf(tx, id, buf, 10); + PQXX_CHECK_EQUAL(buf, data, "Wrong data from blob::from_file()."); +} + + +void test_blob_from_file_with_oid_writes_blob() +{ + std::basic_string const data{std::byte{'6'}, std::byte{'9'}}; + char const temp_file[] = "blob-test-from_file-oid.tmp"; + std::basic_string buf; + + pqxx::connection conn; + pqxx::work tx{conn}; + + // Guarantee (more or less) that id is not in use. + auto id{pqxx::blob::create(tx)}; + pqxx::blob::remove(tx, id); + + { + TempFile f{temp_file, data}; + pqxx::blob::from_file(tx, temp_file, id); + } + pqxx::blob::to_buf(tx, id, buf, 10); + PQXX_CHECK_EQUAL(buf, data, "Wrong data from blob::from_file()."); +} + + +void test_blob_append_to_buf_appends() +{ + std::basic_string const data{ + std::byte{'b'}, std::byte{'l'}, std::byte{'u'}, std::byte{'b'}}; + + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{pqxx::blob::from_buf(tx, data)}; + + std::basic_string buf; + PQXX_CHECK_EQUAL( + pqxx::blob::append_to_buf(tx, id, 0u, buf, 1u), 1u, + "append_to_buf() returned unexpected value."); + PQXX_CHECK_EQUAL(std::size(buf), 1u, "Appended the wrong number of bytes."); + PQXX_CHECK_EQUAL( + pqxx::blob::append_to_buf(tx, id, 1u, buf, 5u), 3u, + "append_to_buf() returned unexpected value."); + PQXX_CHECK_EQUAL(std::size(buf), 4u, "Appended the wrong number of bytes."); + + PQXX_CHECK_EQUAL( + buf, data, "Reading using append_to_buf gave us wrong data."); +} + + +void test_blob_to_file_writes_file() +{ + std::basic_string const data{ + std::byte{'C'}, std::byte{'+'}, std::byte{'+'}}; + + char const temp_file[] = "blob-test-to_file.tmp"; + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{pqxx::blob::from_buf(tx, data)}; + std::basic_string buf; + + try + { + pqxx::blob::to_file(tx, id, temp_file); + read_file(temp_file, 10u, buf); + std::remove(temp_file); + } + catch (std::exception const &) + { + std::remove(temp_file); + throw; + } + PQXX_CHECK_EQUAL(buf, data, "Got wrong data from to_file()."); +} + + +void test_blob_close_leaves_blob_unusable() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + auto id{ + pqxx::blob::from_buf(tx, std::basic_string{std::byte{1}})}; + auto b{pqxx::blob::open_rw(tx, id)}; + b.close(); + std::basic_string buf; + PQXX_CHECK_THROWS( + b.read(buf, 1), pqxx::usage_error, + "Reading from closed blob did not fail right."); +} + + +void test_blob_accepts_std_filesystem_path() +{ +#if defined(PQXX_HAVE_PATH) && !defined(_WIN32) + // A bug in gcc 8's ~std::filesystem::path() causes a run-time crash. +# if !defined(__GNUC__) || (__GNUC__ > 8) + + char const temp_file[] = "blob-test-filesystem-path.tmp"; + std::basic_string const data{std::byte{'4'}, std::byte{'2'}}; + + pqxx::connection conn; + pqxx::work tx{conn}; + std::basic_string buf; + + TempFile f{temp_file, data}; + std::filesystem::path const path{temp_file}; + auto id{pqxx::blob::from_file(tx, path)}; + pqxx::blob::to_buf(tx, id, buf, 10); + PQXX_CHECK_EQUAL(buf, data, "Wrong data from blob::from_file()."); + +# endif +#endif +} + + +PQXX_REGISTER_TEST(test_blob_is_useless_by_default); +PQXX_REGISTER_TEST(test_blob_create_makes_empty_blob); +PQXX_REGISTER_TEST(test_blob_create_with_oid_requires_oid_be_free); +PQXX_REGISTER_TEST(test_blob_create_with_oid_obeys_oid); +PQXX_REGISTER_TEST(test_blobs_are_transactional); +PQXX_REGISTER_TEST(test_blob_remove_removes_blob); +PQXX_REGISTER_TEST(test_blob_remove_is_not_idempotent); +PQXX_REGISTER_TEST(test_blob_checks_open_mode); +PQXX_REGISTER_TEST(test_blob_supports_move); +PQXX_REGISTER_TEST(test_blob_read_reads_data); +PQXX_REGISTER_TEST(test_blob_reads_vector); +PQXX_REGISTER_TEST(test_blob_read_span); +PQXX_REGISTER_TEST(test_blob_write_appends_at_insertion_point); +PQXX_REGISTER_TEST(test_blob_writes_span); +PQXX_REGISTER_TEST(test_blob_resize_shortens_to_desired_length); +PQXX_REGISTER_TEST(test_blob_resize_extends_to_desired_length); +PQXX_REGISTER_TEST(test_blob_tell_tracks_position); +PQXX_REGISTER_TEST(test_blob_seek_sets_positions); +PQXX_REGISTER_TEST(test_blob_from_buf_interoperates_with_to_buf); +PQXX_REGISTER_TEST(test_blob_append_from_buf_appends); +PQXX_REGISTER_TEST(test_blob_from_file_creates_blob_from_file_contents); +PQXX_REGISTER_TEST(test_blob_from_file_with_oid_writes_blob); +PQXX_REGISTER_TEST(test_blob_append_to_buf_appends); +PQXX_REGISTER_TEST(test_blob_to_file_writes_file); +PQXX_REGISTER_TEST(test_blob_close_leaves_blob_unusable); +PQXX_REGISTER_TEST(test_blob_accepts_std_filesystem_path); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_cancel_query.cxx b/ext/libpqxx-7.7.3/test/unit/test_cancel_query.cxx new file mode 100644 index 000000000..5329d195f --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_cancel_query.cxx @@ -0,0 +1,25 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_cancel_query() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + // Calling cancel_query() while none is in progress has no effect. + conn.cancel_query(); + + // Nothing much is guaranteed about cancel_query, except that it doesn't make + // the process die in flames. + pqxx::pipeline p{tx, "test_cancel_query"}; + p.retain(0); + p.insert("SELECT pg_sleep(1)"); + conn.cancel_query(); +} + + +PQXX_REGISTER_TEST(test_cancel_query); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_column.cxx b/ext/libpqxx-7.7.3/test/unit/test_column.cxx new file mode 100644 index 000000000..9c50faff4 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_column.cxx @@ -0,0 +1,61 @@ +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_table_column() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + + tx.exec0("CREATE TEMP TABLE pqxxfoo (x varchar, y integer, z integer)"); + tx.exec0("INSERT INTO pqxxfoo VALUES ('xx', 1, 2)"); + auto R{tx.exec("SELECT z,y,x FROM pqxxfoo")}; + auto X{tx.exec("SELECT x,y,z,99 FROM pqxxfoo")}; + + pqxx::row::size_type x{R.table_column(2)}, y{R.table_column(1)}, + z{R.table_column(static_cast(0))}; + + PQXX_CHECK_EQUAL(x, 0, "Wrong column number."); + PQXX_CHECK_EQUAL(y, 1, "Wrong column number."); + PQXX_CHECK_EQUAL(z, 2, "Wrong column number."); + + x = R.table_column("x"); + y = R.table_column("y"); + z = R.table_column("z"); + + PQXX_CHECK_EQUAL(x, 0, "Wrong number for named column."); + PQXX_CHECK_EQUAL(y, 1, "Wrong number for named column."); + PQXX_CHECK_EQUAL(z, 2, "Wrong number for named column."); + + pqxx::row::size_type xx{X[0].table_column(static_cast(0))}, + yx{X[0].table_column(pqxx::row::size_type(1))}, zx{X[0].table_column("z")}; + + PQXX_CHECK_EQUAL(xx, 0, "Bad result from table_column(int)."); + PQXX_CHECK_EQUAL(yx, 1, "Bad result from table_column(size_type)."); + PQXX_CHECK_EQUAL(zx, 2, "Bad result from table_column(string)."); + + for (pqxx::row::size_type i{0}; i < std::size(R[0]); ++i) + PQXX_CHECK_EQUAL( + R[0][i].table_column(), R.table_column(i), + "Bad result from column_table()."); + + int col; + PQXX_CHECK_THROWS_EXCEPTION( + col = R.table_column(3), "table_column() with invalid index didn't fail."); + pqxx::ignore_unused(col); + + PQXX_CHECK_THROWS_EXCEPTION( + col = R.table_column("nonexistent"), + "table_column() with invalid column name didn't fail."); + pqxx::ignore_unused(col); + + PQXX_CHECK_THROWS_EXCEPTION( + col = X.table_column(3), "table_column() on non-table didn't fail."); + pqxx::ignore_unused(col); +} +} // namespace + + +PQXX_REGISTER_TEST(test_table_column); diff --git a/ext/libpqxx-7.7.3/test/unit/test_composite.cxx b/ext/libpqxx-7.7.3/test/unit/test_composite.cxx new file mode 100644 index 000000000..dcd65360c --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_composite.cxx @@ -0,0 +1,98 @@ +#include "../test_helpers.hxx" + +#include "pqxx/composite" +#include "pqxx/transaction" + +namespace +{ +void test_composite() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + tx.exec0("CREATE TYPE pqxxfoo AS (a integer, b text)"); + auto const r{tx.exec1("SELECT '(5,hello)'::pqxxfoo")}; + + int a; + std::string b; + pqxx::parse_composite(r[0].view(), a, b); + + PQXX_CHECK_EQUAL(a, 5, "Integer composite field came back wrong."); + PQXX_CHECK_EQUAL(b, "hello", "String composite field came back wrong."); +} + + +void test_composite_escapes() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::row r; + tx.exec0("CREATE TYPE pqxxsingle AS (x text)"); + std::string s; + + r = tx.exec1(R"--(SELECT '("a""b")'::pqxxsingle)--"); + pqxx::parse_composite(r[0].view(), s); + PQXX_CHECK_EQUAL( + s, "a\"b", "Double-double-quotes escaping did not parse correctly."); + + r = tx.exec1(R"--(SELECT '("a\"b")'::pqxxsingle)--"); + pqxx::parse_composite(r[0].view(), s); + PQXX_CHECK_EQUAL(s, "a\"b", "Backslash escaping did not parse correctly."); +} + + +void test_composite_handles_nulls() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::row r; + + tx.exec0("CREATE TYPE pqxxnull AS (a integer)"); + int nonnull; + r = tx.exec1("SELECT '()'::pqxxnull"); + PQXX_CHECK_THROWS( + pqxx::parse_composite(r[0].view(), nonnull), pqxx::conversion_error, + "No conversion error when reading a null into a nulless variable."); + std::optional nullable{5}; + pqxx::parse_composite(r[0].view(), nullable); + PQXX_CHECK( + not nullable.has_value(), "Null integer came out as having a value."); + + tx.exec0("CREATE TYPE pqxxnulls AS (a integer, b integer)"); + std::optional a{2}, b{4}; + r = tx.exec1("SELECT '(,)'::pqxxnulls"); + pqxx::parse_composite(r[0].view(), a, b); + PQXX_CHECK(not a.has_value(), "Null first integer stored as value."); + PQXX_CHECK(not b.has_value(), "Null second integer stored as value."); +} + + +void test_composite_renders_to_string() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + char buf[1000]; + + pqxx::composite_into_buf( + std::begin(buf), std::end(buf), 355, "foo", "b\na\\r"); + PQXX_CHECK_EQUAL( + std::string{buf}, "(355,\"foo\",\"b\na\\\\r\")", + "Composite was not rendered as expected."); + + tx.exec0("CREATE TYPE pqxxcomp AS (a integer, b text, c text)"); + auto const r{tx.exec1("SELECT '" + std::string{buf} + "'::pqxxcomp")}; + + int a; + std::string b, c; + bool const nonnull{r[0].composite_to(a, b, c)}; + PQXX_CHECK(nonnull, "Mistaken nullness."); + PQXX_CHECK_EQUAL(a, 355, "Int came back wrong."); + PQXX_CHECK_EQUAL(b, "foo", "Simple string came back wrong."); + PQXX_CHECK_EQUAL(c, "b\na\\r", "Escaping went wrong."); +} + + +PQXX_REGISTER_TEST(test_composite); +PQXX_REGISTER_TEST(test_composite_escapes); +PQXX_REGISTER_TEST(test_composite_handles_nulls); +PQXX_REGISTER_TEST(test_composite_renders_to_string); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_connection.cxx b/ext/libpqxx-7.7.3/test/unit/test_connection.cxx new file mode 100644 index 000000000..bbb780854 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_connection.cxx @@ -0,0 +1,212 @@ +#include + +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_connection_string_constructor() +{ + pqxx::connection c1{""}; + pqxx::connection c2{std::string{}}; +} + + +void test_move_constructor() +{ + pqxx::connection c1; + PQXX_CHECK(c1.is_open(), "New connection is not open."); + + pqxx::connection c2{std::move(c1)}; + + PQXX_CHECK(not c1.is_open(), "Moving did not close original connection."); + PQXX_CHECK(c2.is_open(), "Moved constructor is not open."); + + pqxx::work tx{c2}; + PQXX_CHECK_EQUAL(tx.query_value("SELECT 5"), 5, "Weird result!"); + + PQXX_CHECK_THROWS( + pqxx::connection c3{std::move(c2)}, pqxx::usage_error, + "Moving a connection with a transaction open should be an error."); +} + + +void test_move_assign() +{ + pqxx::connection c1; + pqxx::connection c2; + + c2.close(); + + c2 = std::move(c1); + + PQXX_CHECK(not c1.is_open(), "Connection still open after being moved out."); + PQXX_CHECK(c2.is_open(), "Moved constructor is not open."); + + { + pqxx::work tx1{c2}; + PQXX_CHECK_EQUAL(tx1.query_value("SELECT 8"), 8, "What!?"); + + pqxx::connection c3; + PQXX_CHECK_THROWS( + c3 = std::move(c2), pqxx::usage_error, + "Moving a connection with a transaction open should be an error."); + + PQXX_CHECK_THROWS( + c2 = std::move(c3), pqxx::usage_error, + "Moving a connection onto one with a transaction open should be " + "an error."); + } + + // After failed move attempts, the connection is still usable. + pqxx::work tx2{c2}; + PQXX_CHECK_EQUAL(tx2.query_value("SELECT 6"), 6, "Huh!?"); +} + + +void test_encrypt_password() +{ + pqxx::connection c; + auto pw{c.encrypt_password("user", "password")}; + PQXX_CHECK(not std::empty(pw), "Encrypted password was empty."); + PQXX_CHECK_EQUAL( + std::strlen(pw.c_str()), std::size(pw), + "Encrypted password contains a null byte."); +} + + +void test_connection_string() +{ + pqxx::connection c; + std::string const connstr{c.connection_string()}; + +#if defined(_MSC_VER) +# pragma warning(push) +# pragma warning(disable : 4996) +#endif + if (std::getenv("PGUSER") == nullptr) +#if defined(_MSC_VER) +# pragma warning(pop) +#endif + { + PQXX_CHECK( + connstr.find("user=" + std::string{c.username()}) != std::string::npos, + "Connection string did not specify user name: " + connstr); + } + else + { + PQXX_CHECK( + connstr.find("user=" + std::string{c.username()}) == std::string::npos, + "Connection string specified user name, even when using default: " + + connstr); + } +} + + +#if defined(PQXX_HAVE_CONCEPTS) +template std::size_t length(STR const &str) +{ + return std::size(str); +} + + +std::size_t length(char const str[]) +{ + return std::strlen(str); +} +#endif // PQXX_HAVE_CONCEPTS + + +template void test_params_type() +{ +#if defined(PQXX_HAVE_CONCEPTS) + using item_t = std::remove_reference_t< + decltype(*std::declval>())>; + using key_t = decltype(std::get<0>(std::declval())); + using value_t = decltype(std::get<1>(std::declval())); + + // Set some parameters that are relatively safe to change arbitrarily. + MAP const params{{ + {key_t{"application_name"}, value_t{"pqxx-test"}}, + {key_t{"connect_timeout"}, value_t{"96"}}, + {key_t{"keepalives_idle"}, value_t{"771"}}, + }}; + + // Can we create a connection from these parameters? + pqxx::connection c{params}; + + // Check that the parameters came through in the connection string. + // We don't know the exact format, but the parameters have to be in there. + auto const min_size{std::accumulate( + std::cbegin(params), std::cend(params), std::size(params) - 1, + [](auto count, auto item) { + return count + length(std::get<0>(item)) + 1 + length(std::get<1>(item)); + })}; + + auto const connstr{c.connection_string()}; + PQXX_CHECK_GREATER_EQUAL( + std::size(connstr), min_size, + "Connection string can't possibly contain the options we gave."); + for (auto const &[key, value] : params) + { + PQXX_CHECK_NOT_EQUAL( + connstr.find(key), std::string::npos, + "Could not find param name '" + std::string{key} + + "' in connection string: " + connstr); + PQXX_CHECK_NOT_EQUAL( + connstr.find(value), std::string::npos, + "Could not find value for '" + std::string{value} + + "' in connection string: " + connstr); + } +#endif // PQXX_HAVE_CONCEPTS +} + + +void test_connection_params() +{ + // Connecting in this way supports a wide variety of formats for the + // parameters. + test_params_type>(); + test_params_type>(); + test_params_type>(); + test_params_type>(); + test_params_type>(); + test_params_type>>(); + test_params_type>>(); + test_params_type>>(); + test_params_type>>(); +} + + +void test_raw_connection() +{ + pqxx::connection conn1; + PQXX_CHECK(conn1.is_open(), "Fresh connection is not open!"); + pqxx::work tx1{conn1}; + PQXX_CHECK_EQUAL( + tx1.query_value("SELECT 8"), 8, "Something weird happened."); + pqxx::internal::pq::PGconn *raw{std::move(conn1).release_raw_connection()}; + PQXX_CHECK(raw != nullptr, "Raw connection is null."); + PQXX_CHECK( + not conn1.is_open(), + "Releasing raw connection did not close pqxx::connection."); + + pqxx::connection conn2{pqxx::connection::seize_raw_connection(raw)}; + PQXX_CHECK( + conn2.is_open(), "Can't produce open connection from raw connection."); + pqxx::work tx2{conn2}; + PQXX_CHECK_EQUAL( + tx2.query_value("SELECT 9"), 9, + "Raw connection did not produce a working new connection."); +} + + +PQXX_REGISTER_TEST(test_connection_string_constructor); +PQXX_REGISTER_TEST(test_move_constructor); +PQXX_REGISTER_TEST(test_move_assign); +PQXX_REGISTER_TEST(test_encrypt_password); +PQXX_REGISTER_TEST(test_connection_string); +PQXX_REGISTER_TEST(test_connection_params); +PQXX_REGISTER_TEST(test_raw_connection); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_cursor.cxx b/ext/libpqxx-7.7.3/test/unit/test_cursor.cxx new file mode 100644 index 000000000..8761f6946 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_cursor.cxx @@ -0,0 +1,50 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_stateless_cursor_provides_random_access(pqxx::connection &conn) +{ + pqxx::work tx{conn}; + pqxx::stateless_cursor< + pqxx::cursor_base::read_only, pqxx::cursor_base::owned> + c{tx, "SELECT * FROM generate_series(0, 3)", "count", false}; + + auto r{c.retrieve(1, 2)}; + PQXX_CHECK_EQUAL(std::size(r), 1, "Wrong number of rows from retrieve()."); + PQXX_CHECK_EQUAL(r[0][0].as(), 1, "Cursor retrieved wrong data."); + + r = c.retrieve(3, 10); + PQXX_CHECK_EQUAL(std::size(r), 1, "Expected 1 row retrieving past end."); + PQXX_CHECK_EQUAL(r[0][0].as(), 3, "Wrong data retrieved at end."); + + r = c.retrieve(0, 1); + PQXX_CHECK_EQUAL(std::size(r), 1, "Wrong number of rows back at beginning."); + PQXX_CHECK_EQUAL(r[0][0].as(), 0, "Wrong data back at beginning."); +} + + +void test_stateless_cursor_ignores_trailing_semicolon(pqxx::connection &conn) +{ + pqxx::work tx{conn}; + pqxx::stateless_cursor< + pqxx::cursor_base::read_only, pqxx::cursor_base::owned> + c{tx, "SELECT * FROM generate_series(0, 3) ;; ; \n \t ", "count", false}; + + auto r{c.retrieve(1, 2)}; + PQXX_CHECK_EQUAL(std::size(r), 1, "Trailing semicolon confused retrieve()."); +} + + +void test_cursor() +{ + pqxx::connection conn; + test_stateless_cursor_provides_random_access(conn); + test_stateless_cursor_ignores_trailing_semicolon(conn); +} + + +PQXX_REGISTER_TEST(test_cursor); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_encodings.cxx b/ext/libpqxx-7.7.3/test/unit/test_encodings.cxx new file mode 100644 index 000000000..4374671c6 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_encodings.cxx @@ -0,0 +1,114 @@ +#include "../test_helpers.hxx" + +#include "pqxx/internal/encodings.hxx" + + +namespace +{ +void test_scan_ascii() +{ + auto const scan{pqxx::internal::get_glyph_scanner( + pqxx::internal::encoding_group::MONOBYTE)}; + std::string const text{"hello"}; + + PQXX_CHECK_EQUAL( + scan(text.c_str(), std::size(text), 0), 1ul, + "Monobyte scanner acting up."); + PQXX_CHECK_EQUAL( + scan(text.c_str(), std::size(text), 1), 2ul, + "Monobyte scanner is inconsistent."); +} + + +void test_scan_utf8() +{ + auto const scan{ + pqxx::internal::get_glyph_scanner(pqxx::internal::encoding_group::UTF8)}; + + // Thai: "Khrab". + std::string const text{"\xe0\xb8\x95\xe0\xb8\xa3\xe0\xb8\xb1\xe0\xb8\x9a"}; + PQXX_CHECK_EQUAL( + scan(text.c_str(), std::size(text), 0), 3ul, + "UTF-8 scanner mis-scanned Thai khor khwai."); + PQXX_CHECK_EQUAL( + scan(text.c_str(), std::size(text), 3), 6ul, + "UTF-8 scanner mis-scanned Thai ror reua."); +} + + +void test_for_glyphs_empty() +{ + bool iterated{false}; + pqxx::internal::for_glyphs( + pqxx::internal::encoding_group::MONOBYTE, + [&iterated](char const *, char const *) { iterated = true; }, "", 0); + PQXX_CHECK(!iterated, "Empty string went through an iteration."); +} + + +void test_for_glyphs_ascii() +{ + std::string const text{"hi"}; + std::vector points; + + pqxx::internal::for_glyphs( + pqxx::internal::encoding_group::UTF8, + [&points](char const *gbegin, char const *gend) { + points.push_back(gend - gbegin); + }, + text.c_str(), std::size(text)); + + PQXX_CHECK_EQUAL(std::size(points), 2u, "Wrong number of ASCII iterations."); + PQXX_CHECK_EQUAL(points[0], 1u, "ASCII iteration started off wrong."); + PQXX_CHECK_EQUAL(points[1], 1u, "ASCII iteration was inconsistent."); +} + + +void test_for_glyphs_utf8() +{ + // Greek: alpha omega. + std::string const text{"\xce\x91\xce\xa9"}; + std::vector points; + + pqxx::internal::for_glyphs( + pqxx::internal::encoding_group::UTF8, + [&points](char const *gbegin, char const *gend) { + points.push_back(gend - gbegin); + }, + text.c_str(), std::size(text)); + + PQXX_CHECK_EQUAL(std::size(points), 2u, "Wrong number of UTF-8 iterations."); + PQXX_CHECK_EQUAL(points[0], 2u, "UTF-8 iteration started off wrong."); + PQXX_CHECK_EQUAL(points[1], 2u, "ASCII iteration was inconsistent."); + + // Greek lambda, ASCII plus sign, Old Persian Gu. + std::string const mix{"\xce\xbb+\xf0\x90\x8e\xa6"}; + points.clear(); + + pqxx::internal::for_glyphs( + pqxx::internal::encoding_group::UTF8, + [&points](char const *gbegin, char const *gend) { + points.push_back(gend - gbegin); + }, + mix.c_str(), std::size(mix)); + + PQXX_CHECK_EQUAL(std::size(points), 3u, "Mixed UTF-8 iteration is broken."); + PQXX_CHECK_EQUAL(points[0], 2u, "Mixed UTF-8 iteration started off wrong."); + PQXX_CHECK_EQUAL(points[1], 1u, "Mixed UTF-8 iteration got ASCII wrong."); + PQXX_CHECK_EQUAL( + points[2], 4u, "Mixed UTF-8 iteration got long char wrong."); +} + + +void test_encodings() +{ + test_scan_ascii(); + test_scan_utf8(); + test_for_glyphs_empty(); + test_for_glyphs_ascii(); + test_for_glyphs_utf8(); +} + + +PQXX_REGISTER_TEST(test_encodings); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_error_verbosity.cxx b/ext/libpqxx-7.7.3/test/unit/test_error_verbosity.cxx new file mode 100644 index 000000000..67dc88763 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_error_verbosity.cxx @@ -0,0 +1,37 @@ +#include + +#include "../test_helpers.hxx" + +extern "C" +{ +#include +} + +namespace +{ +void test_error_verbosity() +{ + PQXX_CHECK_EQUAL( + static_cast(pqxx::error_verbosity::terse), + static_cast(PQERRORS_TERSE), + "error_verbosity enum should match PGVerbosity."); + PQXX_CHECK_EQUAL( + static_cast(pqxx::error_verbosity::normal), + static_cast(PQERRORS_DEFAULT), + "error_verbosity enum should match PGVerbosity."); + PQXX_CHECK_EQUAL( + static_cast(pqxx::error_verbosity::verbose), + static_cast(PQERRORS_VERBOSE), + "error_verbosity enum should match PGVerbosity."); + + pqxx::connection conn; + pqxx::work tx{conn}; + conn.set_verbosity(pqxx::error_verbosity::terse); + tx.exec1("SELECT 1"); + conn.set_verbosity(pqxx::error_verbosity::verbose); + tx.exec1("SELECT 2"); +} + + +PQXX_REGISTER_TEST(test_error_verbosity); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_errorhandler.cxx b/ext/libpqxx-7.7.3/test/unit/test_errorhandler.cxx new file mode 100644 index 000000000..c2cd4a797 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_errorhandler.cxx @@ -0,0 +1,223 @@ +#include + +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +class TestErrorHandler final : public pqxx::errorhandler +{ +public: + TestErrorHandler( + pqxx::connection &c, std::vector &activated_handlers, + bool retval = true) : + pqxx::errorhandler(c), + return_value(retval), + message(), + handler_list(activated_handlers) + {} + + bool operator()(char const msg[]) noexcept override + { + message = std::string{msg}; + handler_list.push_back(this); + return return_value; + } + + bool return_value; + std::string message; + std::vector &handler_list; +}; +} // namespace + + +namespace pqxx +{ +template<> struct nullness +{ + // clang warns about these being unused. And clang 6 won't accept a + // [[maybe_unused]] attribute on them either! + + // static inline constexpr bool has_null{true}; + // static inline constexpr bool always_null{false}; + + static constexpr bool is_null(TestErrorHandler *e) noexcept + { + return e == nullptr; + } + static constexpr TestErrorHandler *null() noexcept { return nullptr; } +}; + + +template<> struct string_traits +{ + static constexpr std::size_t size_buffer(TestErrorHandler *const &) noexcept + { + return 100; + } + + static char *into_buf(char *begin, char *end, TestErrorHandler *const &value) + { + std::string text{"TestErrorHandler at " + pqxx::to_string(value)}; + if (pqxx::internal::cmp_greater_equal(std::size(text), end - begin)) + throw conversion_overrun{"Not enough buffer for TestErrorHandler."}; + std::memcpy(begin, text.c_str(), std::size(text) + 1); + return begin + std::size(text) + 1; + } +}; +} // namespace pqxx + + +namespace +{ +void test_process_notice_calls_errorhandler(pqxx::connection &c) +{ + std::vector dummy; + TestErrorHandler handler(c, dummy); + c.process_notice("Error!\n"); + PQXX_CHECK_EQUAL(handler.message, "Error!\n", "Error not handled."); +} + + +void test_error_handlers_get_called_newest_to_oldest(pqxx::connection &c) +{ + std::vector handlers; + TestErrorHandler h1(c, handlers); + TestErrorHandler h2(c, handlers); + TestErrorHandler h3(c, handlers); + c.process_notice("Warning.\n"); + PQXX_CHECK_EQUAL(h3.message, "Warning.\n", "Message not handled."); + PQXX_CHECK_EQUAL(h2.message, "Warning.\n", "Broken handling chain."); + PQXX_CHECK_EQUAL(h1.message, "Warning.\n", "Insane handling chain."); + PQXX_CHECK_EQUAL(std::size(handlers), 3u, "Wrong number of handler calls."); + PQXX_CHECK_EQUAL(&h3, handlers[0], "Unexpected handling order."); + PQXX_CHECK_EQUAL(&h2, handlers[1], "Insane handling order."); + PQXX_CHECK_EQUAL(&h1, handlers[2], "Impossible handling order."); +} + +void test_returning_false_stops_error_handling(pqxx::connection &c) +{ + std::vector handlers; + TestErrorHandler starved(c, handlers); + TestErrorHandler blocker(c, handlers, false); + c.process_notice("Error output.\n"); + PQXX_CHECK_EQUAL(std::size(handlers), 1u, "Handling chain was not stopped."); + PQXX_CHECK_EQUAL(handlers[0], &blocker, "Wrong handler got message."); + PQXX_CHECK_EQUAL(blocker.message, "Error output.\n", "Didn't get message."); + PQXX_CHECK_EQUAL(starved.message, "", "Message received; it shouldn't be."); +} + +void test_destroyed_error_handlers_are_not_called(pqxx::connection &c) +{ + std::vector handlers; + { + TestErrorHandler doomed(c, handlers); + } + c.process_notice("Unheard output."); + PQXX_CHECK( + std::empty(handlers), "Message was received on dead errorhandler."); +} + +void test_destroying_connection_unregisters_handlers() +{ + TestErrorHandler *survivor; + std::vector handlers; + { + pqxx::connection c; + survivor = new TestErrorHandler(c, handlers); + } + // Make some pointless use of survivor just to prove that this doesn't crash. + (*survivor)("Hi"); + PQXX_CHECK_EQUAL( + std::size(handlers), 1u, "Ghost of dead ex-connection haunts handler."); + delete survivor; +} + + +class MinimalErrorHandler final : public pqxx::errorhandler +{ +public: + explicit MinimalErrorHandler(pqxx::connection &c) : pqxx::errorhandler(c) {} + bool operator()(char const[]) noexcept override { return true; } +}; + + +void test_get_errorhandlers(pqxx::connection &c) +{ + std::unique_ptr eh3; + auto const handlers_before{c.get_errorhandlers()}; + std::size_t const base_handlers{std::size(handlers_before)}; + + { + MinimalErrorHandler eh1(c); + auto const handlers_with_eh1{c.get_errorhandlers()}; + PQXX_CHECK_EQUAL( + std::size(handlers_with_eh1), base_handlers + 1, + "Registering a handler didn't create exactly one handler."); + PQXX_CHECK_EQUAL( + std::size_t(*std::rbegin(handlers_with_eh1)), std::size_t(&eh1), + "Wrong handler or wrong order."); + + { + MinimalErrorHandler eh2(c); + auto const handlers_with_eh2{c.get_errorhandlers()}; + PQXX_CHECK_EQUAL( + std::size(handlers_with_eh2), base_handlers + 2, + "Adding second handler didn't work."); + PQXX_CHECK_EQUAL( + std::size_t(*(std::rbegin(handlers_with_eh2) + 1)), std::size_t(&eh1), + "Second handler upset order."); + PQXX_CHECK_EQUAL( + std::size_t(*std::rbegin(handlers_with_eh2)), std::size_t(&eh2), + "Second handler isn't right."); + } + auto const handlers_without_eh2{c.get_errorhandlers()}; + PQXX_CHECK_EQUAL( + std::size(handlers_without_eh2), base_handlers + 1, + "Handler destruction produced wrong-sized handlers list."); + PQXX_CHECK_EQUAL( + std::size_t(*std::rbegin(handlers_without_eh2)), std::size_t(&eh1), + "Destroyed wrong handler."); + + eh3 = std::make_unique(c); + auto const handlers_with_eh3{c.get_errorhandlers()}; + PQXX_CHECK_EQUAL( + std::size(handlers_with_eh3), base_handlers + 2, + "Remove-and-add breaks."); + PQXX_CHECK_EQUAL( + std::size_t(*std::rbegin(handlers_with_eh3)), std::size_t(eh3.get()), + "Added wrong third handler."); + } + auto const handlers_without_eh1{c.get_errorhandlers()}; + PQXX_CHECK_EQUAL( + std::size(handlers_without_eh1), base_handlers + 1, + "Destroying oldest handler didn't work as expected."); + PQXX_CHECK_EQUAL( + std::size_t(*std::rbegin(handlers_without_eh1)), std::size_t(eh3.get()), + "Destroyed wrong handler."); + + eh3.reset(); + + auto const handlers_without_all{c.get_errorhandlers()}; + PQXX_CHECK_EQUAL( + std::size(handlers_without_all), base_handlers, + "Destroying all custom handlers didn't work as expected."); +} + + +void test_errorhandler() +{ + pqxx::connection conn; + test_process_notice_calls_errorhandler(conn); + test_error_handlers_get_called_newest_to_oldest(conn); + test_returning_false_stops_error_handling(conn); + test_destroyed_error_handlers_are_not_called(conn); + test_destroying_connection_unregisters_handlers(); + test_get_errorhandlers(conn); +} + + +PQXX_REGISTER_TEST(test_errorhandler); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_escape.cxx b/ext/libpqxx-7.7.3/test/unit/test_escape.cxx new file mode 100644 index 000000000..baff4d62c --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_escape.cxx @@ -0,0 +1,228 @@ +#include + +#include + +#include "../test_helpers.hxx" + +namespace +{ +using namespace std::literals; + + +void compare_esc( + pqxx::connection &c, pqxx::transaction_base &t, char const text[]) +{ + std::size_t const len{std::size(std::string{text})}; + PQXX_CHECK_EQUAL( + c.esc(std::string_view{text, len}), t.esc(std::string_view{text, len}), + "Connection & transaction escape differently."); + + PQXX_CHECK_EQUAL( + t.esc(std::string_view{text, len}), t.esc(text), + "Length argument to esc() changes result."); + + PQXX_CHECK_EQUAL( + t.esc(std::string{text}), t.esc(text), + "esc(std::string()) differs from esc(char const[])."); + + PQXX_CHECK_EQUAL( + text, + t.query_value( + "SELECT '" + t.esc(std::string_view{text, len}) + "'"), + "esc() is not idempotent."); + + PQXX_CHECK_EQUAL( + t.esc(std::string_view{text, len}), t.esc(text), + "Oversized buffer affects esc()."); +} + + +void test_esc(pqxx::connection &c, pqxx::transaction_base &t) +{ + PQXX_CHECK_EQUAL( + t.esc(std::string_view{"", 0}), "", + "Empty string doesn't escape properly."); + PQXX_CHECK_EQUAL( + t.esc(std::string_view{"'", 1}), "''", + "Single quote escaped incorrectly."); + PQXX_CHECK_EQUAL( + t.esc(std::string_view{"hello"}), "hello", "Trivial escape went wrong."); + char const *const escstrings[]{"x", " ", "", nullptr}; + for (std::size_t i{0}; escstrings[i] != nullptr; ++i) + compare_esc(c, t, escstrings[i]); +} + + +void test_quote(pqxx::connection &c, pqxx::transaction_base &t) +{ + PQXX_CHECK_EQUAL(t.quote("x"), "'x'", "Basic quote() fails."); + PQXX_CHECK_EQUAL( + t.quote(1), "'1'", "quote() not dealing with int properly."); + PQXX_CHECK_EQUAL(t.quote(0), "'0'", "Quoting zero is a problem."); + char const *const null_ptr{nullptr}; + PQXX_CHECK_EQUAL(t.quote(null_ptr), "NULL", "Not quoting NULL correctly."); + PQXX_CHECK_EQUAL( + t.quote(std::string{"'"}), "''''", "Escaping quotes goes wrong."); + + PQXX_CHECK_EQUAL( + t.quote("x"), c.quote("x"), + "Connection and transaction quote differently."); + + char const *test_strings[]{"", "x", "\\", "\\\\", "'", + "''", "\\'", "\t", "\n", nullptr}; + + for (std::size_t i{0}; test_strings[i] != nullptr; ++i) + { + auto r{t.query_value("SELECT " + t.quote(test_strings[i]))}; + PQXX_CHECK_EQUAL( + r, test_strings[i], "Selecting quoted string does not come back equal."); + } +} + + +void test_quote_name(pqxx::transaction_base &t) +{ + PQXX_CHECK_EQUAL( + "\"A b\"", t.quote_name("A b"), "Escaped identifier is not as expected."); + PQXX_CHECK_EQUAL( + std::string{"A b"}, + t.exec("SELECT 1 AS " + t.quote_name("A b")).column_name(0), + "Escaped identifier does not work in SQL."); +} + + +void test_esc_raw_unesc_raw(pqxx::transaction_base &t) +{ + constexpr char binary[]{"1\0023\\4x5"}; + std::basic_string const data( + reinterpret_cast(binary), std::size(binary)); + std::string const escaped{t.esc_raw( + std::basic_string_view{std::data(data), std::size(binary)})}; + + for (auto const i : escaped) + { + PQXX_CHECK_GREATER( + static_cast(static_cast(i)), 7u, + "Non-ASCII character in escaped data: " + escaped); + PQXX_CHECK_LESS( + static_cast(static_cast(i)), 127u, + "Non-ASCII character in escaped data: " + escaped); + } + + for (auto const i : escaped) + PQXX_CHECK( + isprint(i), "Unprintable character in escaped data: " + escaped); + + PQXX_CHECK_EQUAL( + escaped, "\\x3102335c34783500", "Binary data escaped wrong."); + PQXX_CHECK_EQUAL( + std::size(t.unesc_bin(escaped)), std::size(data), + "Wrong size after unescaping."); + auto unescaped{t.unesc_bin(escaped)}; + PQXX_CHECK_EQUAL( + std::size(unescaped), std::size(data), + "Unescaping did not restore original size."); + for (std::size_t i{0}; i < std::size(unescaped); ++i) + PQXX_CHECK_EQUAL( + int(unescaped[i]), int(data[i]), + "Unescaping binary data did not restore byte #" + pqxx::to_string(i) + + "."); +} + + +void test_esc_like(pqxx::transaction_base &tx) +{ + PQXX_CHECK_EQUAL(tx.esc_like(""), "", "esc_like breaks on empty string."); + PQXX_CHECK_EQUAL(tx.esc_like("abc"), "abc", "esc_like is broken."); + PQXX_CHECK_EQUAL(tx.esc_like("_"), "\\_", "esc_like fails on underscore."); + PQXX_CHECK_EQUAL(tx.esc_like("%"), "\\%", "esc_like fails on %."); + PQXX_CHECK_EQUAL( + tx.esc_like("a%b_c"), "a\\%b\\_c", "esc_like breaks on mix."); + PQXX_CHECK_EQUAL( + tx.esc_like("_", '+'), "+_", "esc_like ignores escape character."); +} + + +void test_escaping() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + test_esc(conn, tx); + test_quote(conn, tx); + test_quote_name(tx); + test_esc_raw_unesc_raw(tx); + test_esc_like(tx); +} + + +void test_esc_escapes_into_buffer() +{ +#if defined(PQXX_HAVE_CONCEPTS) + pqxx::connection conn; + pqxx::work tx{conn}; + + std::string buffer; + buffer.resize(20); + + auto const text{"Ain't"sv}; + auto escaped_text{tx.esc(text, buffer)}; + PQXX_CHECK_EQUAL(escaped_text, "Ain''t", "Escaping into buffer went wrong."); + + std::basic_string const data{std::byte{0x22}, std::byte{0x43}}; + auto escaped_data(tx.esc(data, buffer)); + PQXX_CHECK_EQUAL(escaped_data, "\\x2243", "Binary data escaped wrong."); +#endif +} + + +void test_esc_accepts_various_types() +{ +#if defined(PQXX_HAVE_CONCEPTS) && defined(PQXX_HAVE_SPAN) + pqxx::connection conn; + pqxx::work tx{conn}; + + std::string buffer; + buffer.resize(20); + + std::string const text{"it's"}; + auto escaped_text{tx.esc(text, buffer)}; + PQXX_CHECK_EQUAL(escaped_text, "it''s", "Escaping into buffer went wrong."); + + std::vector const data{std::byte{0x23}, std::byte{0x44}}; + auto escaped_data(tx.esc(data, buffer)); + PQXX_CHECK_EQUAL(escaped_data, "\\x2344", "Binary data escaped wrong."); +#endif +} + + +void test_binary_esc_checks_buffer_length() +{ +#if defined(PQXX_HAVE_CONCEPTS) && defined(PQXX_HAVE_SPAN) + pqxx::connection conn; + pqxx::work tx{conn}; + + std::string buf; + std::basic_string bin{ + std::byte{'b'}, std::byte{'o'}, std::byte{'o'}}; + + buf.resize(2 * std::size(bin) + 3); + pqxx::ignore_unused(tx.esc(bin, buf)); + PQXX_CHECK_EQUAL(int{buf[0]}, int{'\\'}, "Unexpected binary escape format."); + PQXX_CHECK_NOT_EQUAL( + int(buf[std::size(buf) - 2]), int('\0'), "Escaped binary ends too soon."); + PQXX_CHECK_EQUAL( + int(buf[std::size(buf) - 1]), int('\0'), "Terminating zero is missing."); + + buf.resize(2 * std::size(bin) + 2); + PQXX_CHECK_THROWS( + pqxx::ignore_unused(tx.esc(bin, buf)), pqxx::range_error, + "Didn't get expected exception from escape overrun."); +#endif +} + + +PQXX_REGISTER_TEST(test_escaping); +PQXX_REGISTER_TEST(test_esc_escapes_into_buffer); +PQXX_REGISTER_TEST(test_esc_accepts_various_types); +PQXX_REGISTER_TEST(test_binary_esc_checks_buffer_length); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_exceptions.cxx b/ext/libpqxx-7.7.3/test/unit/test_exceptions.cxx new file mode 100644 index 000000000..4b84b7fba --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_exceptions.cxx @@ -0,0 +1,45 @@ +#include +#include + +#include "../test_helpers.hxx" + + +namespace +{ +void test_exceptions() +{ + std::string const broken_query{"SELECT HORRIBLE ERROR"}, + err{"Error message"}; + + try + { + throw pqxx::sql_error{err, broken_query}; + } + catch (std::exception const &e) + { + PQXX_CHECK_EQUAL(e.what(), err, "Exception contains wrong message."); + auto downcast{dynamic_cast(&e)}; + PQXX_CHECK( + downcast != nullptr, "exception-to-sql_error downcast is broken."); + PQXX_CHECK_EQUAL( + downcast->query(), broken_query, + "Getting query from pqxx exception is broken."); + } + + pqxx::connection conn; + pqxx::work tx{conn}; + try + { + tx.exec("INVALID QUERY HERE"); + } + catch (pqxx::syntax_error const &e) + { + // SQL syntax error has sqlstate error 42601. + PQXX_CHECK_EQUAL( + e.sqlstate(), "42601", "Unexpected sqlstate on syntax error."); + } +} + + +PQXX_REGISTER_TEST(test_exceptions); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_field.cxx b/ext/libpqxx-7.7.3/test/unit/test_field.cxx new file mode 100644 index 000000000..013ea1a82 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_field.cxx @@ -0,0 +1,57 @@ +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_field() +{ + pqxx::connection c; + pqxx::work tx{c}; + auto const r1{tx.exec1("SELECT 9")}; + auto const &f1{r1[0]}; + + PQXX_CHECK_EQUAL(f1.as(), "9", "as() is broken."); + PQXX_CHECK_EQUAL( + f1.as("z"), "9", "as(string) is broken."); + + PQXX_CHECK_EQUAL(f1.as(), 9, "as() is broken."); + PQXX_CHECK_EQUAL(f1.as(10), 9, "as(int) is broken."); + + std::string s; + PQXX_CHECK(f1.to(s), "to(string) failed."); + PQXX_CHECK_EQUAL(s, "9", "to(string) is broken."); + s = "x"; + PQXX_CHECK(f1.to(s, std::string{"7"}), "to(string, string) failed."); + PQXX_CHECK_EQUAL(s, "9", "to(string, string) is broken."); + + int i{}; + PQXX_CHECK(f1.to(i), "to(int) failed."); + PQXX_CHECK_EQUAL(i, 9, "to(int) is broken."); + i = 8; + PQXX_CHECK(f1.to(i, 12), "to(int, int) failed."); + PQXX_CHECK_EQUAL(i, 9, "to(int, int) is broken."); + + auto const r2{tx.exec1("SELECT NULL")}; + auto const f2{r2[0]}; + i = 100; + PQXX_CHECK_THROWS( + f2.as(), pqxx::conversion_error, "Null conversion failed to throw."); + PQXX_CHECK_EQUAL(i, 100, "Null conversion touched its output."); + + PQXX_CHECK_EQUAL(f2.as(66), 66, "as default is broken."); + + PQXX_CHECK(!(f2.to(i)), "to(int) failed to report a null."); + PQXX_CHECK(!(f2.to(i, 54)), "to(int, int) failed to report a null."); + PQXX_CHECK_EQUAL(i, 54, "to(int, int) failed to default."); + + auto const r3{tx.exec("SELECT generate_series(1, 5)")}; + PQXX_CHECK_EQUAL(r3.at(3, 0).as(), 4, "Two-argument at() went wrong."); +#if defined(PQXX_HAVE_MULTIDIMENSIONAL_SUBSCRIPT) + PQXX_CHECK_EQUAL((r3[3, 0].as()), 4, "Two-argument [] went wrong."); +#endif +} + + +PQXX_REGISTER_TEST(test_field); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_float.cxx b/ext/libpqxx-7.7.3/test/unit/test_float.cxx new file mode 100644 index 000000000..a0463e4d0 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_float.cxx @@ -0,0 +1,175 @@ +#include + +#include + +#include "../test_helpers.hxx" + +namespace +{ +/// Test conversions for some floating-point type. +template void infinity_test() +{ + T inf{std::numeric_limits::infinity()}; + std::string inf_string; + T back_conversion; + + inf_string = pqxx::to_string(inf); + pqxx::from_string(inf_string, back_conversion); + PQXX_CHECK_LESS( + T(999999999), back_conversion, + "Infinity doesn't convert back to something huge."); + + inf_string = pqxx::to_string(-inf); + pqxx::from_string(inf_string, back_conversion); + PQXX_CHECK_LESS( + back_conversion, -T(999999999), "Negative infinity is broken"); +} + +void test_infinities() +{ + infinity_test(); + infinity_test(); + infinity_test(); +} + + +/// Reproduce bug #262: repeated float conversions break without charconv. +template void bug_262() +{ + pqxx::connection conn; + conn.prepare("stmt", "select cast($1 as float)"); + pqxx::work tr{conn}; + + // We must use the same float type both for passing the value to the + // statement and for retrieving result of the statement execution. This is + // due to an internal stringstream being instantiated as a a parameterized + // thread-local singleton. So, there are separate stream, + // stream, stream, but every such instance is a + // singleton. We should use only one of them for this test. + + pqxx::row row; + + // Nothing bad here, select a float value. + // The stream is clear, so just fill it with the value and extract str(). + row = tr.exec1("SELECT 1.0"); + + // This works properly, but as we parse the value from the stream, the + // seeking cursor moves towards the EOF. When the inevitable EOF happens + // 'eof' flag is set in the stream and 'good' flag is unset. + row[0].as(); + + // The second try. Select a float value again. The stream is not clean, so + // we need to put an empty string into its buffer {stream.str("");}. This + // resets the seeking cursor to 0. Then we will put the value using + // operator<<(). + // ... + // ... + // OOPS. stream.str("") does not reset 'eof' flag and 'good' flag! We are + // trying to read from EOF! This is no good. + // Throws on unpatched pqxx v6.4.5 + row = tr.exec1("SELECT 2.0"); + + // We won't get here without patch. The following statements are just for + // demonstration of how are intended to work. If we + // simply just reset the stream flags properly, this would work fine. + // The most obvious patch is just explicitly stream.seekg(0). + row[0].as(); + row = tr.exec1("SELECT 3.0"); + row[0].as(); +} + + +/// Test for bug #262. +void test_bug_262() +{ + bug_262(); + bug_262(); + bug_262(); +} + + +/// Test conversion of malformed floating-point values. +void test_bad_float() +{ + float x [[maybe_unused]]; + PQXX_CHECK_THROWS( + x = pqxx::from_string(""), pqxx::conversion_error, + "Conversion of empty string to float was not caught."); + + PQXX_CHECK_THROWS( + x = pqxx::from_string("Infancy"), pqxx::conversion_error, + "Misleading infinity was not caught."); + PQXX_CHECK_THROWS( + x = pqxx::from_string("-Infighting"), pqxx::conversion_error, + "Misleading negative infinity was not caught."); + + PQXX_CHECK_THROWS( + x = pqxx::from_string("Nanny"), pqxx::conversion_error, + "Conversion of misleading NaN was not caught."); +} + + +template void test_float_length(T value) +{ + auto const text{pqxx::to_string(value)}; + PQXX_CHECK_GREATER_EQUAL( + pqxx::size_buffer(value), std::size(text) + 1, + "Not enough buffer space for " + text + "."); +} + + +/// Test conversion of long float values to strings. +void test_long_float() +{ + test_float_length(0.1f); + test_float_length(0.1); + + test_float_length(std::numeric_limits::denorm_min()); + test_float_length(-std::numeric_limits::denorm_min()); + test_float_length(std::numeric_limits::min()); + test_float_length(-std::numeric_limits::min()); + test_float_length(std::numeric_limits::max()); + test_float_length(-std::numeric_limits::max()); + test_float_length(-std::nextafter(1.0f, 2.0f)); + + test_float_length(std::numeric_limits::denorm_min()); + test_float_length(-std::numeric_limits::denorm_min()); + test_float_length(std::numeric_limits::min()); + test_float_length(-std::numeric_limits::min()); + test_float_length(std::numeric_limits::max()); + test_float_length(-std::numeric_limits::max()); + test_float_length(-std::nextafter(1.0, 2.0)); + + test_float_length(std::numeric_limits::denorm_min()); + test_float_length(-std::numeric_limits::denorm_min()); + test_float_length(std::numeric_limits::min()); + test_float_length(-std::numeric_limits::min()); + test_float_length(std::numeric_limits::max()); + test_float_length(-std::numeric_limits::max()); + test_float_length(-std::nextafter(1.0L, 2.0L)); + + // Ahem. I'm not proud of this. We really can't assume much about the + // floating-point types, but I'd really like to try a few things to see that + // buffer sizes are in the right ballpark. So, if "double" is at least 64 + // bits, check for some examples of long conversions. + if constexpr (sizeof(double) >= 8) + { + auto constexpr awkward{-2.2250738585072014e-308}; + auto const text{pqxx::to_string(awkward)}; + PQXX_CHECK_LESS_EQUAL( + std::size(text), 25u, text + " converted to too long a string."); + } + if constexpr (sizeof(double) <= 8) + { + auto const text{pqxx::to_string(0.99)}; + PQXX_CHECK_LESS_EQUAL( + pqxx::size_buffer(0.99), 25u, text + " converted to too long a string."); + } +} + + +PQXX_REGISTER_TEST(test_infinities); +PQXX_REGISTER_TEST(test_bug_262); +PQXX_REGISTER_TEST(test_bad_float); +PQXX_REGISTER_TEST(test_long_float); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_largeobject.cxx b/ext/libpqxx-7.7.3/test/unit/test_largeobject.cxx new file mode 100644 index 000000000..8184fa2f8 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_largeobject.cxx @@ -0,0 +1,58 @@ +#include +#include + +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_stream_large_object() +{ + pqxx::connection conn; + + // Construct a really nasty string. (Don't just construct a std::string from + // a char[] constant, because it'll terminate at the embedded zero.) + // + // The crucial thing is the "ff" byte at the beginning. It tests for + // possible conflation between "eof" (-1) and a char which just happens to + // have the same bit pattern as an 8-bit value of -1. This conflation can be + // a problem when it occurs at buffer boundaries. + constexpr char bytes[]{"\xff\0end"}; + std::string const contents{bytes, std::size(bytes)}; + + pqxx::work tx{conn}; +#include "pqxx/internal/ignore-deprecated-pre.hxx" + pqxx::largeobject new_obj{tx}; + + pqxx::olostream write{tx, new_obj}; + write << contents; + write.flush(); + + pqxx::largeobjectaccess check{tx, new_obj, std::ios::in | std::ios::binary}; + std::array buf; + std::size_t const len{ + static_cast(check.read(std::data(buf), std::size(buf)))}; + PQXX_CHECK_EQUAL(len, std::size(contents), "olostream truncated data."); + std::string const check_str{std::data(buf), len}; + PQXX_CHECK_EQUAL(check_str, contents, "olostream mangled data."); + + pqxx::ilostream read{tx, new_obj}; + std::string read_back; + std::string chunk; + while (read >> chunk) read_back += chunk; + + new_obj.remove(tx); + + PQXX_CHECK_EQUAL(read_back, contents, "Got wrong data from ilostream."); + PQXX_CHECK_EQUAL( + std::size(read_back), std::size(contents), "ilostream truncated data."); + PQXX_CHECK_EQUAL( + std::size(read_back), std::size(bytes), "ilostream truncated data."); +#include "pqxx/internal/ignore-deprecated-post.hxx" +} + + +PQXX_REGISTER_TEST(test_stream_large_object); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_nonblocking_connect.cxx b/ext/libpqxx-7.7.3/test/unit/test_nonblocking_connect.cxx new file mode 100644 index 000000000..d1a32ac64 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_nonblocking_connect.cxx @@ -0,0 +1,27 @@ +#include + +#include + +#include "../test_helpers.hxx" + + +namespace +{ +void test_nonblocking_connect() +{ + pqxx::connecting nbc; + while (not nbc.done()) + { + pqxx::internal::wait_fd( + nbc.sock(), nbc.wait_to_read(), nbc.wait_to_write()); + nbc.process(); + } + + pqxx::connection conn{std::move(nbc).produce()}; + pqxx::work tx{conn}; + PQXX_CHECK_EQUAL(tx.query_value("SELECT 10"), 10, "Bad value!?"); +} + + +PQXX_REGISTER_TEST(test_nonblocking_connect); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_notification.cxx b/ext/libpqxx-7.7.3/test/unit/test_notification.cxx new file mode 100644 index 000000000..6487169b4 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_notification.cxx @@ -0,0 +1,86 @@ +#include + +#include + +#include + +#include + +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +class TestReceiver final : public pqxx::notification_receiver +{ +public: + std::string payload; + int backend_pid; + + TestReceiver(pqxx::connection &c, std::string const &channel_name) : + pqxx::notification_receiver(c, channel_name), + payload(), + backend_pid(0) + {} + + virtual void + operator()(std::string const &payload_string, int backend) override + { + this->payload = payload_string; + this->backend_pid = backend; + } +}; + + +void test_receive( + pqxx::transaction_base &t, std::string const &channel, + char const payload[] = nullptr) +{ + pqxx::connection &conn(t.conn()); + + std::string SQL{"NOTIFY \"" + channel + "\""}; + if (payload != nullptr) + SQL += ", " + t.quote(payload); + + TestReceiver receiver{t.conn(), channel}; + + // Clear out any previously pending notifications that might otherwise + // confuse the test. + conn.get_notifs(); + + // Notify, and receive. + t.exec(SQL); + t.commit(); + + int notifs{0}; + for (int i{0}; (i < 10) and (notifs == 0); + ++i, pqxx::internal::wait_for(1'000'000u)) + notifs = conn.get_notifs(); + + PQXX_CHECK_EQUAL(notifs, 1, "Got wrong number of notifications."); + PQXX_CHECK_EQUAL(receiver.backend_pid, conn.backendpid(), "Bad pid."); + if (payload == nullptr) + PQXX_CHECK(std::empty(receiver.payload), "Unexpected payload."); + else + PQXX_CHECK_EQUAL(receiver.payload, payload, "Bad payload."); +} + + +void test_notification() +{ + pqxx::connection conn; + TestReceiver receiver(conn, "mychannel"); + PQXX_CHECK_EQUAL(receiver.channel(), "mychannel", "Bad channel."); + + pqxx::work tx{conn}; + test_receive(tx, "channel1"); + + pqxx::nontransaction u(conn); + test_receive(u, "channel2", "payload"); +} + + +PQXX_REGISTER_TEST(test_notification); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_pipeline.cxx b/ext/libpqxx-7.7.3/test/unit/test_pipeline.cxx new file mode 100644 index 000000000..63b97662b --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_pipeline.cxx @@ -0,0 +1,64 @@ +#include + +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_pipeline() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + + // A pipeline grabs transaction focus, blocking regular queries and such. + pqxx::pipeline pipe(tx, "test_pipeline_detach"); + PQXX_CHECK_THROWS( + tx.exec("SELECT 1"), std::logic_error, + "Pipeline does not block regular queries"); + + // Flushing a pipeline relinquishes transaction focus. + pipe.flush(); + auto r{tx.exec("SELECT 2")}; + PQXX_CHECK_EQUAL( + std::size(r), 1, "Wrong query result after flushing pipeline."); + PQXX_CHECK_EQUAL( + r[0][0].as(), 2, "Query returns wrong data after flushing pipeline."); + + // Inserting a query makes the pipeline grab transaction focus back. + auto q{pipe.insert("SELECT 2")}; + PQXX_CHECK_THROWS( + tx.exec("SELECT 3"), std::logic_error, + "Pipeline does not block regular queries"); + + // Invoking complete() also detaches the pipeline from the transaction. + pipe.complete(); + r = tx.exec("SELECT 4"); + PQXX_CHECK_EQUAL(std::size(r), 1, "Wrong query result after complete()."); + PQXX_CHECK_EQUAL( + r[0][0].as(), 4, "Query returns wrong data after complete()."); + + // The complete() also received any pending query results from the backend. + r = pipe.retrieve(q); + PQXX_CHECK_EQUAL(std::size(r), 1, "Wrong result from pipeline."); + PQXX_CHECK_EQUAL(r[0][0].as(), 2, "Pipeline returned wrong data."); + + // We can cancel while the pipe is empty, and things will still work. + pipe.cancel(); + + // Issue a query and cancel it. Measure time to see that we don't really + // wait. + using clock = std::chrono::steady_clock; + auto const start{clock::now()}; + pipe.retain(0); + pipe.insert("pg_sleep(10)"); + pipe.cancel(); + auto const finish{clock::now()}; + auto const seconds{ + std::chrono::duration_cast(finish - start).count()}; + PQXX_CHECK_LESS(seconds, 5, "Canceling a sleep took suspiciously long."); +} +} // namespace + +PQXX_REGISTER_TEST(test_pipeline); diff --git a/ext/libpqxx-7.7.3/test/unit/test_prepared_statement.cxx b/ext/libpqxx-7.7.3/test/unit/test_prepared_statement.cxx new file mode 100644 index 000000000..c9eb4ee84 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_prepared_statement.cxx @@ -0,0 +1,334 @@ +#include +#include +#include +#include + +#include + +#include "../test_helpers.hxx" + +// Test program for libpqxx. Define and use prepared statements. + +#define COMPARE_RESULTS(name, lhs, rhs) \ + PQXX_CHECK_EQUAL( \ + rhs, lhs, \ + "Executing " name " as prepared statement yields different results."); + +namespace +{ +using namespace std::literals; + + +template std::string stringize(pqxx::transaction_base &t, T i) +{ + return stringize(t, pqxx::to_string(i)); +} + + +// Substitute variables in raw query. This is not likely to be very robust, +// but it should do for just this test. The main shortcomings are escaping, +// and not knowing when to quote the variables. +// Note we do the replacement backwards (meaning forward_only iterators won't +// do!) to avoid substituting e.g. "$12" as "$1" first. +template +std::string +subst(pqxx::transaction_base &t, std::string q, ITER patbegin, ITER patend) +{ + ptrdiff_t i{distance(patbegin, patend)}; + for (ITER arg{patend}; i > 0; --i) + { + --arg; + std::string const marker{"$" + pqxx::to_string(i)}, + var{stringize(t, *arg)}; + std::string::size_type const msz{std::size(marker)}; + while (q.find(marker) != std::string::npos) + q.replace(q.find(marker), msz, var); + } + return q; +} + + +template +std::string +subst(pqxx::transaction_base &t, std::string const &q, CNTNR const &patterns) +{ + return subst(t, q, std::begin(patterns), std::end(patterns)); +} + + +void test_registration_and_invocation() +{ + constexpr auto count_to_5{"SELECT * FROM generate_series(1, 5)"}; + + pqxx::connection c; + pqxx::work tx1{c}; + + // Prepare a simple statement. + tx1.conn().prepare("CountToFive", count_to_5); + + // The statement returns exactly what you'd expect. + COMPARE_RESULTS( + "CountToFive", tx1.exec_prepared("CountToFive"), tx1.exec(count_to_5)); + + // Re-preparing it is an error. + PQXX_CHECK_THROWS( + tx1.conn().prepare("CountToFive", count_to_5), pqxx::sql_error, + "Did not report re-definition of prepared statement."); + + tx1.abort(); + pqxx::work tx2{c}; + + // Executing a nonexistent prepared statement is also an error. + PQXX_CHECK_THROWS( + tx2.exec_prepared("NonexistentStatement"), pqxx::sql_error, + "Did not report invocation of nonexistent prepared statement."); +} + + +void test_basic_args() +{ + pqxx::connection c; + c.prepare("EchoNum", "SELECT $1::int"); + pqxx::work tx{c}; + auto r{tx.exec_prepared("EchoNum", 7)}; + PQXX_CHECK_EQUAL( + std::size(r), 1, "Did not get 1 row from prepared statement."); + PQXX_CHECK_EQUAL(std::size(r.front()), 1, "Did not get exactly one column."); + PQXX_CHECK_EQUAL(r[0][0].as(), 7, "Got wrong result."); + + auto rw{tx.exec_prepared1("EchoNum", 8)}; + PQXX_CHECK_EQUAL( + std::size(rw), 1, "Did not get 1 column from exec_prepared1."); + PQXX_CHECK_EQUAL(rw[0].as(), 8, "Got wrong result."); +} + + +void test_multiple_params() +{ + pqxx::connection c; + c.prepare("CountSeries", "SELECT * FROM generate_series($1::int, $2::int)"); + pqxx::work tx{c}; + auto r{tx.exec_prepared_n(4, "CountSeries", 7, 10)}; + PQXX_CHECK_EQUAL( + std::size(r), 4, "Wrong number of rows, but no error raised."); + PQXX_CHECK_EQUAL(r.front().front().as(), 7, "Wrong $1."); + PQXX_CHECK_EQUAL(r.back().front().as(), 10, "Wrong $2."); + + c.prepare("Reversed", "SELECT * FROM generate_series($2::int, $1::int)"); + r = tx.exec_prepared_n(3, "Reversed", 8, 6); + PQXX_CHECK_EQUAL( + r.front().front().as(), 6, "Did parameters get reordered?"); + PQXX_CHECK_EQUAL( + r.back().front().as(), 8, "$2 did not come through properly."); +} + + +void test_nulls() +{ + pqxx::connection c; + pqxx::work tx{c}; + c.prepare("EchoStr", "SELECT $1::varchar"); + auto rw{tx.exec_prepared1("EchoStr", nullptr)}; + PQXX_CHECK(rw.front().is_null(), "nullptr did not translate to null."); + + char const *n{nullptr}; + rw = tx.exec_prepared1("EchoStr", n); + PQXX_CHECK(rw.front().is_null(), "Null pointer did not translate to null."); +} + + +void test_strings() +{ + pqxx::connection c; + pqxx::work tx{c}; + c.prepare("EchoStr", "SELECT $1::varchar"); + auto rw{tx.exec_prepared1("EchoStr", "foo")}; + PQXX_CHECK_EQUAL( + rw.front().as(), "foo", "Wrong string result."); + + char const nasty_string[]{R"--('\"\)--"}; + rw = tx.exec_prepared1("EchoStr", nasty_string); + PQXX_CHECK_EQUAL( + rw.front().as(), std::string(nasty_string), + "Prepared statement did not quote/escape correctly."); + + rw = tx.exec_prepared1("EchoStr", std::string{nasty_string}); + PQXX_CHECK_EQUAL( + rw.front().as(), std::string(nasty_string), + "Quoting/escaping went wrong in std::string."); + + char nonconst[]{"non-const C string"}; + rw = tx.exec_prepared1("EchoStr", nonconst); + PQXX_CHECK_EQUAL( + rw.front().as(), std::string(nonconst), + "Non-const C string passed incorrectly."); +} + + +void test_binary() +{ + pqxx::connection c; + pqxx::work tx{c}; + c.prepare("EchoBin", "SELECT $1::bytea"); + constexpr char raw_bytes[]{"Binary\0bytes'\"with\tweird\xff bytes"}; + std::string const input{raw_bytes, std::size(raw_bytes)}; + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + { + pqxx::binarystring const bin{input}; + auto rw{tx.exec_prepared1("EchoBin", bin)}; + PQXX_CHECK_EQUAL( + pqxx::binarystring(rw[0]).str(), input, + "Binary string came out damaged."); + } +#include "pqxx/internal/ignore-deprecated-post.hxx" + + { + std::basic_string bytes{ + reinterpret_cast(raw_bytes), std::size(raw_bytes)}; + auto bp{tx.exec_prepared1("EchoBin", bytes)}; + auto bval{bp[0].as>()}; + PQXX_CHECK_EQUAL( + (std::string_view{ + reinterpret_cast(bval.c_str()), std::size(bval)}), + input, "Binary string parameter went wrong."); + } + + // Now try it with a complex type that ultimately uses the conversions of + // std::basic_string, but complex enough that the call may + // convert the data to a text string on the libpqxx side. Which would be + // okay, except of course it's likely to be slower. + + { + auto ptr{std::make_shared>( + reinterpret_cast(raw_bytes), std::size(raw_bytes))}; + auto rp{tx.exec_prepared1("EchoBin", ptr)}; + auto pval{rp[0].as>()}; + PQXX_CHECK_EQUAL( + (std::string_view{ + reinterpret_cast(pval.c_str()), std::size(pval)}), + input, "Binary string as shared_ptr-to-optional went wrong."); + } + + { + auto opt{std::optional>{ + std::in_place, reinterpret_cast(raw_bytes), + std::size(raw_bytes)}}; + auto op{tx.exec_prepared1("EchoBin", opt)}; + auto oval{op[0].as>()}; + PQXX_CHECK_EQUAL( + (std::string_view{ + reinterpret_cast(oval.c_str()), std::size(oval)}), + input, "Binary string as shared_ptr-to-optional went wrong."); + } + +#if defined(PQXX_HAVE_CONCEPTS) + // By the way, it doesn't have to be a std::basic_string. Any contiguous + // range will do. + { + std::vector data{std::byte{'x'}, std::byte{'v'}}; + auto op{tx.exec_prepared1("EchoBin", data)}; + auto oval{op[0].as>()}; + PQXX_CHECK_EQUAL( + std::size(oval), 2u, "Binary data came back as wrong length."); + PQXX_CHECK_EQUAL(static_cast(oval[0]), int('x'), "Wrong data."); + PQXX_CHECK_EQUAL(static_cast(oval[1]), int('v'), "Wrong data."); + } +#endif +} + + +void test_params() +{ + pqxx::connection c; + pqxx::work tx{c}; + c.prepare("Concat2Numbers", "SELECT 10 * $1 + $2"); + std::vector values{3, 9}; + pqxx::params params; + params.reserve(std::size(values)); + params.append_multi(values); + + auto const rw39{tx.exec_prepared1("Concat2Numbers", params)}; + PQXX_CHECK_EQUAL( + rw39.front().as(), 39, + "Dynamic prepared-statement parameters went wrong."); + + c.prepare("Concat4Numbers", "SELECT 1000*$1 + 100*$2 + 10*$3 + $4"); + auto const rw1396{tx.exec_prepared1("Concat4Numbers", 1, params, 6)}; + PQXX_CHECK_EQUAL( + rw1396.front().as(), 1396, + "Dynamic params did not interleave with static ones properly."); +} + + +void test_optional() +{ + pqxx::connection c; + pqxx::work tx{c}; + c.prepare("EchoNum", "SELECT $1::int"); + pqxx::row rw{ + tx.exec_prepared1("EchoNum", std::optional{std::in_place, 10})}; + PQXX_CHECK_EQUAL( + rw.front().as(), 10, + "optional (with value) did not return the right value."); + + rw = tx.exec_prepared1("EchoNum", std::optional{}); + PQXX_CHECK( + rw.front().is_null(), "optional without value did not come out as null."); +} + + +void test_prepared_statements() +{ + test_registration_and_invocation(); + test_basic_args(); + test_multiple_params(); + test_nulls(); + test_strings(); + test_binary(); + test_params(); + + test_optional(); +} + + +void test_placeholders_generates_names() +{ + using pqxx::operator""_zv; + pqxx::placeholders name; + PQXX_CHECK_EQUAL(name.view(), "$1"_zv, "Bad placeholders initial zview."); + PQXX_CHECK_EQUAL(name.view(), "$1"sv, "Bad placeholders string_view."); + PQXX_CHECK_EQUAL(name.get(), "$1", "Bad placeholders::get()."); + + name.next(); + PQXX_CHECK_EQUAL(name.view(), "$2"_zv, "Incorrect placeholders::next()."); + + name.next(); + PQXX_CHECK_EQUAL(name.view(), "$3"_zv, "Incorrect placeholders::next()."); + name.next(); + PQXX_CHECK_EQUAL(name.view(), "$4"_zv, "Incorrect placeholders::next()."); + name.next(); + PQXX_CHECK_EQUAL(name.view(), "$5"_zv, "Incorrect placeholders::next()."); + name.next(); + PQXX_CHECK_EQUAL(name.view(), "$6"_zv, "Incorrect placeholders::next()."); + name.next(); + PQXX_CHECK_EQUAL(name.view(), "$7"_zv, "Incorrect placeholders::next()."); + name.next(); + PQXX_CHECK_EQUAL(name.view(), "$8"_zv, "Incorrect placeholders::next()."); + name.next(); + PQXX_CHECK_EQUAL(name.view(), "$9"_zv, "Incorrect placeholders::next()."); + name.next(); + PQXX_CHECK_EQUAL(name.view(), "$10"_zv, "Incorrect placeholders carry."); + name.next(); + PQXX_CHECK_EQUAL(name.view(), "$11"_zv, "Incorrect placeholders 11."); + + while (name.count() < 999) name.next(); + PQXX_CHECK_EQUAL(name.view(), "$999"_zv, "Incorrect placeholders 999."); + name.next(); + PQXX_CHECK_EQUAL(name.view(), "$1000"_zv, "Incorrect large placeholder."); +} + + +PQXX_REGISTER_TEST(test_prepared_statements); +PQXX_REGISTER_TEST(test_placeholders_generates_names); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_range.cxx b/ext/libpqxx-7.7.3/test/unit/test_range.cxx new file mode 100644 index 000000000..b40d9b043 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_range.cxx @@ -0,0 +1,555 @@ +#include +#include + +#include "../test_helpers.hxx" + + +namespace +{ +void test_range_construct() +{ + using optint = std::optional; + using oibound = pqxx::inclusive_bound>; + using oxbound = pqxx::inclusive_bound>; + PQXX_CHECK_THROWS( + (pqxx::range{oibound{optint{}}, oibound{optint{}}}), + pqxx::argument_error, "Inclusive bound accepted a null."); + PQXX_CHECK_THROWS( + (pqxx::range{oxbound{optint{}}, oxbound{optint{}}}), + pqxx::argument_error, "Exclusive bound accepted a null."); + + using ibound = pqxx::inclusive_bound; + PQXX_CHECK_THROWS( + (pqxx::range{ibound{1}, ibound{0}}), pqxx::range_error, + "Range constructor accepted backwards range."); + + PQXX_CHECK_THROWS( + (pqxx::range{ + pqxx::inclusive_bound{-1000.0}, + pqxx::inclusive_bound{-std::numeric_limits::infinity()}}), + pqxx::range_error, + "Was able to construct range with infinity bound at the wrong end."); +} + + +void test_range_equality() +{ + using range = pqxx::range; + using ibound = pqxx::inclusive_bound; + using xbound = pqxx::exclusive_bound; + using ubound = pqxx::no_bound; + + PQXX_CHECK_EQUAL( + range{}, range{}, "Default-constructed range is not consistent."); + PQXX_CHECK_EQUAL( + (range{xbound{0}, xbound{0}}), (range{xbound{5}, xbound{5}}), + "Empty ranges at different values are not equal."); + + PQXX_CHECK_EQUAL( + (range{ubound{}, ubound{}}), (range{ubound{}, ubound{}}), + "Universal range is inconsistent."); + PQXX_CHECK_EQUAL( + (range{ibound{5}, ibound{8}}), (range{ibound{5}, ibound{8}}), + "Inclusive range is inconsistent."); + PQXX_CHECK_EQUAL( + (range{xbound{5}, xbound{8}}), (range{xbound{5}, xbound{8}}), + "Exclusive range is inconsistent."); + PQXX_CHECK_EQUAL( + (range{xbound{5}, ibound{8}}), (range{xbound{5}, ibound{8}}), + "Left-exclusive interval is not equal to itself."); + PQXX_CHECK_EQUAL( + (range{ibound{5}, xbound{8}}), (range{ibound{5}, xbound{8}}), + "Right-exclusive interval is not equal to itself."); + PQXX_CHECK_EQUAL( + (range{ubound{}, ibound{8}}), (range{ubound{}, ibound{8}}), + "Unlimited lower bound does not compare equal to same."); + PQXX_CHECK_EQUAL( + (range{ibound{8}, ubound{}}), (range{ibound{8}, ubound{}}), + "Unlimited upper bound does not compare equal to same."); + + PQXX_CHECK_NOT_EQUAL( + (range{ibound{5}, ibound{8}}), (range{xbound{5}, ibound{8}}), + "Equality does not detect inclusive vs. exclusive lower bound."); + PQXX_CHECK_NOT_EQUAL( + (range{ibound{5}, ibound{8}}), (range{ubound{}, ibound{8}}), + "Equality does not detect inclusive vs. unlimited lower bound."); + PQXX_CHECK_NOT_EQUAL( + (range{xbound{5}, ibound{8}}), (range{ubound{}, ibound{8}}), + "Equality does not detect exclusive vs. unlimited lower bound."); + PQXX_CHECK_NOT_EQUAL( + (range{ibound{5}, ibound{8}}), (range{ibound{5}, xbound{8}}), + "Equality does not detect inclusive vs. exclusive upper bound."); + PQXX_CHECK_NOT_EQUAL( + (range{ibound{5}, ibound{8}}), (range{ibound{5}, ubound{}}), + "Equality does not detect inclusive vs. unlimited upper bound."); + PQXX_CHECK_NOT_EQUAL( + (range{ibound{5}, xbound{8}}), (range{ibound{5}, ubound{}}), + "Equality does not detect exclusive vs. unlimited upper bound."); + + PQXX_CHECK_NOT_EQUAL( + (range{ibound{5}, ibound{8}}), (range{ibound{4}, ibound{8}}), + "Equality does not compare lower inclusive bound value."); + PQXX_CHECK_NOT_EQUAL( + (range{xbound{5}, ibound{8}}), (range{xbound{4}, ibound{8}}), + "Equality does not compare lower exclusive bound value."); + PQXX_CHECK_NOT_EQUAL( + (range{xbound{5}, ibound{8}}), (range{xbound{5}, ibound{7}}), + "Equality does not compare upper inclusive bound value."); + PQXX_CHECK_NOT_EQUAL( + (range{xbound{5}, xbound{8}}), (range{xbound{5}, xbound{7}}), + "Equality does not compare lower exclusive bound value."); +} + + +void test_range_empty() +{ + using range = pqxx::range; + using ibound = pqxx::inclusive_bound; + using xbound = pqxx::exclusive_bound; + using ubound = pqxx::no_bound; + PQXX_CHECK((range{}.empty()), "Default-constructed range is not empty."); + PQXX_CHECK( + (range{ibound{10}, xbound{10}}).empty(), + "Right-exclusive zero-length interval is not empty."); + PQXX_CHECK( + (range{xbound{10}, ibound{10}}).empty(), + "Left-exclusive zero-length interval is not empty."); + PQXX_CHECK( + (range{xbound{10}, xbound{10}}).empty(), + "Exclusive zero-length interval is not empty."); + + PQXX_CHECK( + not(range{ibound{10}, ibound{10}}).empty(), + "Inclusive zero-length interval is empty."); + PQXX_CHECK( + not(range{xbound{10}, ibound{11}}.empty()), + "Interval is incorrectly empty."); + PQXX_CHECK( + not(range{ubound{}, ubound{}}.empty()), + "Double-unlimited interval is empty."); + PQXX_CHECK( + not(range{ubound{}, xbound{0}}.empty()), + "Left-unlimited interval is empty."); + PQXX_CHECK( + not(range{xbound{0}, ubound{}}.empty()), + "Right-unlimited interval is empty."); +} + + +void test_range_contains() +{ + using range = pqxx::range; + using ibound = pqxx::inclusive_bound; + using xbound = pqxx::exclusive_bound; + using ubound = pqxx::no_bound; + + PQXX_CHECK(not(range{}.contains(-1)), "Empty range contains a value."); + PQXX_CHECK(not(range{}.contains(0)), "Empty range contains a value."); + PQXX_CHECK(not(range{}.contains(1)), "Empty range contains a value."); + + PQXX_CHECK( + not(range{ibound{5}, ibound{8}}.contains(4)), + "Inclusive range contains value outside its left bound."); + PQXX_CHECK( + (range{ibound{5}, ibound{8}}.contains(5)), + "Inclusive range does not contain value on its left bound."); + PQXX_CHECK( + (range{ibound{5}, ibound{8}}.contains(6)), + "Inclusive range does not contain value inside it."); + PQXX_CHECK( + (range{ibound{5}, ibound{8}}.contains(8)), + "Inclusive range does not contain value on its right bound."); + PQXX_CHECK( + not(range{ibound{5}, ibound{8}}.contains(9)), + "Inclusive range contains value outside its right bound."); + + PQXX_CHECK( + not(range{ibound{5}, xbound{8}}.contains(4)), + "Left-inclusive range contains value outside its left bound."); + PQXX_CHECK( + (range{ibound{5}, xbound{8}}.contains(5)), + "Left-inclusive range does not contain value on its left bound."); + PQXX_CHECK( + (range{ibound{5}, xbound{8}}.contains(6)), + "Left-inclusive range does not contain value inside it."); + PQXX_CHECK( + not(range{ibound{5}, xbound{8}}.contains(8)), + "Left-inclusive range contains value on its right bound."); + PQXX_CHECK( + not(range{ibound{5}, xbound{8}}.contains(9)), + "Left-inclusive range contains value outside its right bound."); + + PQXX_CHECK( + not(range{xbound{5}, ibound{8}}.contains(4)), + "Right-inclusive range contains value outside its left bound."); + PQXX_CHECK( + not(range{xbound{5}, ibound{8}}.contains(5)), + "Right-inclusive range does contains value on its left bound."); + PQXX_CHECK( + (range{xbound{5}, ibound{8}}.contains(6)), + "Right-inclusive range does not contain value inside it."); + PQXX_CHECK( + (range{xbound{5}, ibound{8}}.contains(8)), + "Right-inclusive range does not contain value on its right bound."); + PQXX_CHECK( + not(range{xbound{5}, ibound{8}}.contains(9)), + "Right-inclusive range contains value outside its right bound."); + + PQXX_CHECK( + not(range{xbound{5}, xbound{8}}.contains(4)), + "Exclusive range contains value outside its left bound."); + PQXX_CHECK( + not(range{xbound{5}, xbound{8}}.contains(5)), + "Exclusive range contains value on its left bound."); + PQXX_CHECK( + (range{xbound{5}, xbound{8}}.contains(6)), + "Exclusive range does not contain value inside it."); + PQXX_CHECK( + not(range{xbound{5}, xbound{8}}.contains(8)), + "Exclusive range does contains value on its right bound."); + PQXX_CHECK( + not(range{xbound{5}, xbound{8}}.contains(9)), + "Exclusive range contains value outside its right bound."); + + PQXX_CHECK( + (range{ubound{}, ibound{8}}.contains(7)), + "Right-inclusive range does not contain value inside it."); + PQXX_CHECK( + (range{ubound{}, ibound{8}}.contains(8)), + "Right-inclusive range does not contain value on its right bound."); + PQXX_CHECK( + not(range{ubound{}, ibound{8}}.contains(9)), + "Right-inclusive range contains value outside its right bound."); + + PQXX_CHECK( + (range{ubound{}, xbound{8}}.contains(7)), + "Right-exclusive range does not contain value inside it."); + PQXX_CHECK( + not(range{ubound{}, xbound{8}}.contains(8)), + "Right-exclusive range contains value on its right bound."); + PQXX_CHECK( + not(range{ubound{}, xbound{8}}.contains(9)), + "Right-exclusive range contains value outside its right bound."); + + PQXX_CHECK( + not(range{ibound{5}, ubound{}}.contains(4)), + "Left-inclusive range contains value outside its left bound."); + PQXX_CHECK( + (range{ibound{5}, ubound{}}.contains(5)), + "Left-inclusive range does not contain value on its left bound."); + PQXX_CHECK( + (range{ibound{5}, ubound{}}.contains(6)), + "Left-inclusive range does not contain value inside it."); + + PQXX_CHECK( + not(range{xbound{5}, ubound{}}.contains(4)), + "Left-exclusive range contains value outside its left bound."); + PQXX_CHECK( + not(range{xbound{5}, ubound{}}.contains(5)), + "Left-exclusive range contains value on its left bound."); + PQXX_CHECK( + (range{xbound{5}, ubound{}}.contains(6)), + "Left-exclusive range does not contain value inside it."); + + PQXX_CHECK( + (range{ubound{}, ubound{}}.contains(-1)), "Value not in universal range."); + PQXX_CHECK( + (range{ubound{}, ubound{}}.contains(0)), "Value not in universal range."); + PQXX_CHECK( + (range{ubound{}, ubound{}}.contains(1)), "Value not in universal range."); +} + + +void test_float_range_contains() +{ + using range = pqxx::range; + using ibound = pqxx::inclusive_bound; + using xbound = pqxx::exclusive_bound; + using ubound = pqxx::no_bound; + using limits = std::numeric_limits; + constexpr auto inf{limits::infinity()}; + + PQXX_CHECK( + not(range{ibound{4.0}, ibound{8.0}}.contains(3.9)), + "Float inclusive range contains value beyond its lower bound."); + PQXX_CHECK( + (range{ibound{4.0}, ibound{8.0}}.contains(4.0)), + "Float inclusive range does not contain its lower bound value."); + PQXX_CHECK( + (range{ibound{4.0}, ibound{8.0}}.contains(5.0)), + "Float inclusive range does not contain value inside it."); + + PQXX_CHECK( + (range{ibound{0}, ibound{inf}}).contains(9999.0), + "Range to infinity did not include large number."); + PQXX_CHECK( + not(range{ibound{0}, ibound{inf}}.contains(-0.1)), + "Range to infinity includes number outside it."); + PQXX_CHECK( + (range{ibound{0}, xbound{inf}}.contains(9999.0)), + "Range to exclusive infinity did not include large number."); + PQXX_CHECK( + (range{ibound{0}, ibound{inf}}).contains(inf), + "Range to inclusive infinity does not include infinity."); + PQXX_CHECK( + not(range{ibound{0}, xbound{inf}}.contains(inf)), + "Range to exclusive infinity includes infinity."); + PQXX_CHECK( + (range{ibound{0}, ubound{}}).contains(inf), + "Right-unlimited range does not include infinity."); + + PQXX_CHECK( + (range{ibound{-inf}, ibound{0}}).contains(-9999.0), + "Range from infinity did not include large negative number."); + PQXX_CHECK( + not(range{ibound{-inf}, ibound{0}}.contains(0.1)), + "Range from infinity includes number outside it."); + PQXX_CHECK( + (range{xbound{-inf}, ibound{0}}).contains(-9999.0), + "Range from exclusive infinity did not include large negative number."); + PQXX_CHECK( + (range{ibound{-inf}, ibound{0}}).contains(-inf), + "Range from inclusive infinity does not include negative infinity."); + PQXX_CHECK( + not(range{xbound{-inf}, ibound{0}}).contains(-inf), + "Range to infinity exclusive includes negative infinity."); + PQXX_CHECK( + (range{ubound{}, ibound{0}}).contains(-inf), + "Left-unlimited range does not include negative infinity."); +} + + +void test_range_subset() +{ + using range = pqxx::range; + using traits = pqxx::string_traits; + + std::string_view subsets[][2]{ + {"empty", "empty"}, {"(,)", "empty"}, {"(0,1)", "empty"}, + {"(,)", "[-10,10]"}, {"(,)", "(-10,10)"}, {"(,)", "(,)"}, + {"(,10)", "(,10)"}, {"(,10)", "(,9)"}, {"(,10]", "(,10)"}, + {"(,10]", "(,10]"}, {"(1,)", "(10,)"}, {"(1,)", "(9,)"}, + {"[1,)", "(10,)"}, {"[1,)", "[10,)"}, {"[0,5]", "[1,4]"}, + {"(0,5)", "[1,4]"}, + }; + for (auto const [super, sub] : subsets) + PQXX_CHECK( + traits::from_string(super).contains(traits::from_string(sub)), + pqxx::internal::concat( + "Range '", super, "' did not contain '", sub, "'.")); + + std::string_view non_subsets[][2]{ + {"empty", "[0,0]"}, {"empty", "(,)"}, {"[-10,10]", "(,)"}, + {"(-10,10)", "(,)"}, {"(,9)", "(,10)"}, {"(,10)", "(,10]"}, + {"[1,4]", "[0,4]"}, {"[1,4]", "[1,5]"}, {"(0,10)", "[0,10]"}, + {"(0,10)", "(0,10]"}, {"(0,10)", "[0,10)"}, + }; + for (auto const [super, sub] : non_subsets) + PQXX_CHECK( + not traits::from_string(super).contains(traits::from_string(sub)), + pqxx::internal::concat("Range '", super, "' contained '", sub, "'.")); +} + + +void test_range_to_string() +{ + using range = pqxx::range; + using ibound = pqxx::inclusive_bound; + using xbound = pqxx::exclusive_bound; + using ubound = pqxx::no_bound; + + PQXX_CHECK_EQUAL( + pqxx::to_string(range{}), "empty", "Empty range came out wrong."); + + PQXX_CHECK_EQUAL( + pqxx::to_string(range{ibound{5}, ibound{8}}), "[5,8]", + "Inclusive range came out wrong."); + PQXX_CHECK_EQUAL( + pqxx::to_string(range{xbound{5}, ibound{8}}), "(5,8]", + "Left-exclusive range came out wrong."); + PQXX_CHECK_EQUAL( + pqxx::to_string(range{ibound{5}, xbound{8}}), "[5,8)", + "Right-exclusive range came out wrong."); + PQXX_CHECK_EQUAL( + pqxx::to_string(range{xbound{5}, xbound{8}}), "(5,8)", + "Exclusive range came out wrong."); + + // Unlimited boundaries can use brackets or parentheses. Doesn't matter. + // We cheat and use some white-box knowledge of our implementation here. + PQXX_CHECK_EQUAL( + pqxx::to_string(range{ubound{}, ubound{}}), "(,)", + "Universal range came out unexpected."); + PQXX_CHECK_EQUAL( + pqxx::to_string(range{ubound{}, ibound{8}}), "(,8]", + "Left-unlimited range came out unexpected."); + PQXX_CHECK_EQUAL( + pqxx::to_string(range{ubound{}, xbound{8}}), "(,8)", + "Left-unlimited range came out unexpected."); + PQXX_CHECK_EQUAL( + pqxx::to_string(range{ibound{5}, ubound{}}), "[5,)", + "Right-unlimited range came out unexpected."); + PQXX_CHECK_EQUAL( + pqxx::to_string(range{xbound{5}, ubound{}}), "(5,)", + "Right-unlimited range came out unexpected."); +} + + +void test_parse_range() +{ + using range = pqxx::range; + using ubound = pqxx::no_bound; + using traits = pqxx::string_traits; + + constexpr std::string_view empties[]{"empty", "EMPTY", "eMpTy"}; + for (auto empty : empties) + PQXX_CHECK( + traits::from_string(empty).empty(), + pqxx::internal::concat( + "This was supposed to produce an empty range: '", empty, "'")); + + constexpr std::string_view universals[]{"(,)", "[,)", "(,]", "[,]"}; + for (auto univ : universals) + PQXX_CHECK_EQUAL( + traits::from_string(univ), (range{ubound{}, ubound{}}), + pqxx::internal::concat( + "This was supposed to produce a universal range: '", univ, "'")); + + PQXX_CHECK( + traits::from_string("(0,10]").lower_bound().is_exclusive(), + "Exclusive lower bound did not parse right."); + PQXX_CHECK( + traits::from_string("[0,10]").lower_bound().is_inclusive(), + "Inclusive lower bound did not parse right."); + PQXX_CHECK( + traits::from_string("(0,10)").upper_bound().is_exclusive(), + "Exclusive upper bound did not parse right."); + PQXX_CHECK( + traits::from_string("[0,10]").upper_bound().is_inclusive(), + "Inclusive upper bound did not parse right."); + + PQXX_CHECK_EQUAL( + *traits::from_string("(\"0\",\"10\")").lower_bound().value(), 0, + "Quoted range boundary did not parse right."); + PQXX_CHECK_EQUAL( + *traits::from_string("(\"0\",\"10\")").upper_bound().value(), 10, + "Quoted upper boundary did not parse right."); + + auto floats{ + pqxx::string_traits>::from_string("(0,1.0)")}; + PQXX_CHECK_GREATER( + *floats.lower_bound().value(), -0.001, + "Float lower bound is out of range."); + PQXX_CHECK_LESS( + *floats.lower_bound().value(), 0.001, + "Float lower bound is out of range."); + PQXX_CHECK_GREATER( + *floats.upper_bound().value(), 0.999, + "Float upper bound is out of range."); + PQXX_CHECK_LESS( + *floats.upper_bound().value(), 1.001, + "Float upper bound is out of range."); +} + + +void test_parse_bad_range() +{ + using range = pqxx::range; + using conv_err = pqxx::conversion_error; + using traits = pqxx::string_traits; + constexpr std::string_view bad_ranges[]{ + "", "x", "e", "empt", "emptyy", "()", + "[]", "(empty)", "(empty, 0)", "(0, empty)", ",", "(,", + ",)", "(1,2,3)", "(4,5x)", "(null, 0)", "[0, 1.0]", "[1.0, 0]", + }; + + for (auto bad : bad_ranges) + PQXX_CHECK_THROWS( + pqxx::ignore_unused(traits::from_string(bad)), conv_err, + pqxx::internal::concat( + "This range wasn't supposed to parse: '", bad, "'")); +} + + +/// Parse ranges lhs and rhs, return their intersection as a string. +template +std::string intersect(std::string_view lhs, std::string_view rhs) +{ + using traits = pqxx::string_traits>; + return pqxx::to_string(traits::from_string(lhs) & traits::from_string(rhs)); +} + + +void test_range_intersection() +{ + // Intersections and their expected results, in text form. + // Each row contains two ranges, and their intersection. + std::string_view intersections[][3]{ + {"empty", "empty", "empty"}, + {"(,)", "empty", "empty"}, + {"[,]", "empty", "empty"}, + {"empty", "[0,10]", "empty"}, + {"(,)", "(,)", "(,)"}, + {"(,)", "(5,8)", "(5,8)"}, + {"(,)", "[5,8)", "[5,8)"}, + {"(,)", "(5,8]", "(5,8]"}, + {"(,)", "[5,8]", "[5,8]"}, + {"(-1000,10)", "(0,1000)", "(0,10)"}, + {"[-1000,10)", "(0,1000)", "(0,10)"}, + {"(-1000,10]", "(0,1000)", "(0,10]"}, + {"[-1000,10]", "(0,1000)", "(0,10]"}, + {"[0,100]", "[0,100]", "[0,100]"}, + {"[0,100]", "[0,100)", "[0,100)"}, + {"[0,100]", "(0,100]", "(0,100]"}, + {"[0,100]", "(0,100)", "(0,100)"}, + {"[0,10]", "[11,20]", "empty"}, + {"[0,10]", "(11,20]", "empty"}, + {"[0,10]", "[11,20)", "empty"}, + {"[0,10]", "(11,20)", "empty"}, + {"[0,10]", "[10,11]", "[10,10]"}, + {"[0,10)", "[10,11]", "empty"}, + {"[0,10]", "(10,11]", "empty"}, + {"[0,10)", "(10,11]", "empty"}, + }; + for (auto [left, right, expected] : intersections) + { + PQXX_CHECK_EQUAL( + intersect(left, right), expected, + pqxx::internal::concat( + "Intersection of '", left, "' and '", right, + " produced unexpected result.")); + PQXX_CHECK_EQUAL( + intersect(right, left), expected, + pqxx::internal::concat( + "Intersection of '", left, "' and '", right, " was asymmetric.")); + } +} + + +void test_range_conversion() +{ + std::string_view const ranges[]{ + "empty", "(,)", "(,10)", "(0,)", "[0,10]", "[0,10)", "(0,10]", "(0,10)", + }; + + for (auto r : ranges) + { + auto const shortr{pqxx::from_string>(r)}; + pqxx::range intr{shortr}; + PQXX_CHECK_EQUAL( + pqxx::to_string(intr), r, "Converted range looks different."); + } +} + + +PQXX_REGISTER_TEST(test_range_construct); +PQXX_REGISTER_TEST(test_range_equality); +PQXX_REGISTER_TEST(test_range_empty); +PQXX_REGISTER_TEST(test_range_contains); +PQXX_REGISTER_TEST(test_float_range_contains); +PQXX_REGISTER_TEST(test_range_subset); +PQXX_REGISTER_TEST(test_range_to_string); +PQXX_REGISTER_TEST(test_parse_range); +PQXX_REGISTER_TEST(test_parse_bad_range); +PQXX_REGISTER_TEST(test_range_intersection); +PQXX_REGISTER_TEST(test_range_conversion); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_read_transaction.cxx b/ext/libpqxx-7.7.3/test/unit/test_read_transaction.cxx new file mode 100644 index 000000000..402f2a06f --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_read_transaction.cxx @@ -0,0 +1,22 @@ +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_read_transaction() +{ + pqxx::connection conn; + pqxx::read_transaction tx{conn}; + PQXX_CHECK_EQUAL( + tx.exec("SELECT 1")[0][0].as(), 1, + "Bad result from read transaction."); + + PQXX_CHECK_THROWS( + tx.exec("CREATE TABLE should_not_exist(x integer)"), pqxx::sql_error, + "Read-only transaction allows database to be modified."); +} + + +PQXX_REGISTER_TEST(test_read_transaction); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_result_iteration.cxx b/ext/libpqxx-7.7.3/test/unit/test_result_iteration.cxx new file mode 100644 index 000000000..3d26e3c3e --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_result_iteration.cxx @@ -0,0 +1,137 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_result_iteration() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::result r{tx.exec("SELECT generate_series(1, 3)")}; + + PQXX_CHECK(std::end(r) != std::begin(r), "Broken begin/end."); + PQXX_CHECK(std::rend(r) != std::rbegin(r), "Broken rbegin/rend."); + + PQXX_CHECK(std::cbegin(r) == std::begin(r), "Wrong cbegin."); + PQXX_CHECK(std::cend(r) == std::end(r), "Wrong cend."); + PQXX_CHECK(std::crbegin(r) == std::rbegin(r), "Wrong crbegin."); + PQXX_CHECK(std::crend(r) == std::rend(r), "Wrong crend."); + + PQXX_CHECK_EQUAL(r.front().front().as(), 1, "Unexpected front()."); + PQXX_CHECK_EQUAL(r.back().front().as(), 3, "Unexpected back()."); +} + + +void test_result_iter() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::result r{tx.exec("SELECT generate_series(1, 3)")}; + + int total{0}; + for (auto const &[i] : r.iter()) total += i; + PQXX_CHECK_EQUAL(total, 6, "iter() loop did not get the right values."); +} + + +void test_result_iterator_swap() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::result r{tx.exec("SELECT generate_series(1, 3)")}; + + auto head{std::begin(r)}, next{head + 1}; + head.swap(next); + PQXX_CHECK_EQUAL(head[0].as(), 2, "Result iterator swap is wrong."); + PQXX_CHECK_EQUAL(next[0].as(), 1, "Result iterator swap is crazy."); + + auto tail{std::rbegin(r)}, prev{tail + 1}; + tail.swap(prev); + PQXX_CHECK_EQUAL(tail[0].as(), 2, "Reverse iterator swap is wrong."); + PQXX_CHECK_EQUAL(prev[0].as(), 3, "Reverse iterator swap is crazy."); +} + + +void test_result_iterator_assignment() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::result r{tx.exec("SELECT generate_series(1, 3)")}; + + pqxx::result::const_iterator fwd; + pqxx::result::const_reverse_iterator rev; + + fwd = std::begin(r); + PQXX_CHECK_EQUAL( + fwd[0].as(), std::begin(r)[0].as(), + "Result iterator assignment is wrong."); + + rev = std::rbegin(r); + PQXX_CHECK_EQUAL( + rev[0].as(), std::rbegin(r)[0].as(), + "Reverse iterator assignment is wrong."); +} + + +void check_employee(std::string name, int salary) +{ + PQXX_CHECK(name == "x" or name == "y" or name == "z", "Unknown name."); + PQXX_CHECK( + salary == 1000 or salary == 1200 or salary == 1500, "Unknown salary."); +} + + +void test_result_for_each() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + tx.exec0("CREATE TEMP TABLE employee(name varchar, salary int)"); + auto fill{pqxx::stream_to::table(tx, {"employee"}, {"name", "salary"})}; + fill.write_values("x", 1000); + fill.write_values("y", 1200); + fill.write_values("z", 1500); + fill.complete(); + + auto const res{tx.exec("SELECT name, salary FROM employee ORDER BY name")}; + + // Use for_each with a function. + res.for_each(check_employee); + + // Use for_each with a simple lambda. + res.for_each( + [](std::string name, int salary) { check_employee(name, salary); }); + + // Use for_each with a lambda closure. + std::string names{}; + int total{0}; + + res.for_each([&names, &total](std::string name, int salary) { + names.append(name); + total += salary; + }); + PQXX_CHECK_EQUAL( + names, "xyz", "result::for_each did not accumulate names correctly."); + PQXX_CHECK_EQUAL(total, 1000 + 1200 + 1500, "Salaries added up wrong."); + + // In addition to regular conversions, you can receive arguments as + // string_view, or as references. + names.clear(); + total = 0; + res.for_each([&names, &total](std::string_view &&name, int const &salary) { + names.append(name); + total += salary; + }); + PQXX_CHECK_EQUAL( + names, "xyz", "result::for_each did not accumulate names correctly."); + PQXX_CHECK_EQUAL(total, 1000 + 1200 + 1500, "Salaries added up wrong."); +} + + +PQXX_REGISTER_TEST(test_result_iteration); +PQXX_REGISTER_TEST(test_result_iter); +PQXX_REGISTER_TEST(test_result_iterator_swap); +PQXX_REGISTER_TEST(test_result_iterator_assignment); +PQXX_REGISTER_TEST(test_result_for_each); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_result_slicing.cxx b/ext/libpqxx-7.7.3/test/unit/test_result_slicing.cxx new file mode 100644 index 000000000..be2055ee3 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_result_slicing.cxx @@ -0,0 +1,157 @@ +#include + +#include "../test_helpers.hxx" + +#include "pqxx/internal/ignore-deprecated-pre.hxx" + +namespace pqxx +{ +template<> struct nullness : no_null +{}; + +template<> +struct nullness + : no_null +{}; + + +template<> struct string_traits +{ + static constexpr zview text{"[row::const_iterator]"}; + static zview to_buf(char *, char *, row::const_iterator const &) + { + return text; + } + static char *into_buf(char *begin, char *end, row::const_iterator const &) + { + if ((end - begin) <= 30) + throw conversion_overrun{"Not enough buffer for const row iterator."}; + std::memcpy(begin, text.c_str(), std::size(text) + 1); + return begin + std::size(text); + } + static constexpr std::size_t + size_buffer(row::const_iterator const &) noexcept + { + return std::size(text) + 1; + } +}; + + +template<> struct string_traits +{ + static constexpr zview text{"[row::const_reverse_iterator]"}; + static pqxx::zview + to_buf(char *, char *, row::const_reverse_iterator const &) + { + return text; + } + static char * + into_buf(char *begin, char *end, row::const_reverse_iterator const &) + { + if ((end - begin) <= 30) + throw conversion_overrun{"Not enough buffer for const row iterator."}; + std::memcpy(begin, text.c_str(), std::size(text) + 1); + return begin + std::size(text); + } + static constexpr std::size_t + size_buffer(row::const_reverse_iterator const &) noexcept + { + return 100; + } +}; +} // namespace pqxx + +namespace +{ +void test_result_slicing() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + auto r{tx.exec("SELECT 1")}; + + PQXX_CHECK(not std::empty(r[0]), "A plain row shows up as empty."); + + // Empty slice at beginning of row. + pqxx::row s{r[0].slice(0, 0)}; + PQXX_CHECK(std::empty(s), "Empty slice does not show up as empty."); + PQXX_CHECK_EQUAL(std::size(s), 0, "Slicing produces wrong row size."); + PQXX_CHECK_EQUAL( + std::begin(s), std::end(s), "Slice begin()/end() are broken."); + PQXX_CHECK_EQUAL( + std::rbegin(s), std::rend(s), "Slice rbegin()/rend() are broken."); + + PQXX_CHECK_THROWS(s.at(0), pqxx::range_error, "at() does not throw."); + pqxx::row slice; + PQXX_CHECK_THROWS( + slice = r[0].slice(0, 2), pqxx::range_error, "No range check."); + pqxx::ignore_unused(slice); + PQXX_CHECK_THROWS( + slice = r[0].slice(1, 0), pqxx::range_error, "Can reverse-slice."); + pqxx::ignore_unused(slice); + + // Empty slice at end of row. + s = r[0].slice(1, 1); + PQXX_CHECK(std::empty(s), "empty() is broken."); + PQXX_CHECK_EQUAL(std::size(s), 0, "size() is broken."); + PQXX_CHECK_EQUAL(std::begin(s), std::end(s), "begin()/end() are broken."); + PQXX_CHECK_EQUAL( + std::rbegin(s), std::rend(s), "rbegin()/rend() are broken."); + + PQXX_CHECK_THROWS(s.at(0), pqxx::range_error, "at() is inconsistent."); + + // Slice that matches the entire row. + s = r[0].slice(0, 1); + PQXX_CHECK(not std::empty(s), "Nonempty slice shows up as empty."); + PQXX_CHECK_EQUAL(std::size(s), 1, "size() breaks for non-empty slice."); + PQXX_CHECK_EQUAL(std::begin(s) + 1, std::end(s), "Iteration is broken."); + PQXX_CHECK_EQUAL( + std::rbegin(s) + 1, std::rend(s), "Reverse iteration is broken."); + PQXX_CHECK_EQUAL(s.at(0).as(), 1, "Accessing a slice is broken."); + PQXX_CHECK_EQUAL(s[0].as(), 1, "operator[] is broken."); + PQXX_CHECK_THROWS(s.at(1).as(), pqxx::range_error, "at() is off."); + + // Meaningful slice at beginning of row. + r = tx.exec("SELECT 1, 2, 3"); + s = r[0].slice(0, 1); + PQXX_CHECK(not std::empty(s), "Slicing confuses empty()."); + PQXX_CHECK_THROWS( + s.at(1).as(), pqxx::range_error, "at() does not enforce slice."); + + // Meaningful slice that skips an initial column. + s = r[0].slice(1, 2); + PQXX_CHECK( + not std::empty(s), "Slicing away leading columns confuses empty()."); + PQXX_CHECK_EQUAL(s[0].as(), 2, "Slicing offset is broken."); + PQXX_CHECK_EQUAL( + std::begin(s)->as(), 2, "Iteration uses wrong offset."); + PQXX_CHECK_EQUAL( + std::begin(s) + 1, std::end(s), "Iteration has wrong range."); + PQXX_CHECK_EQUAL( + std::rbegin(s) + 1, std::rend(s), "Reverse iteration has wrong range."); + PQXX_CHECK_THROWS( + s.at(1).as(), pqxx::range_error, "Offset slicing is broken."); + + // Column names in a slice. + r = tx.exec("SELECT 1 AS one, 2 AS two, 3 AS three"); + s = r[0].slice(1, 2); + PQXX_CHECK_EQUAL(s["two"].as(), 2, "Column addressing breaks."); + PQXX_CHECK_THROWS( + pqxx::ignore_unused(s.column_number("one")), pqxx::argument_error, + "Can access column name before slice."); + PQXX_CHECK_THROWS( + pqxx::ignore_unused(s.column_number("three")), pqxx::argument_error, + "Can access column name after slice."); + PQXX_CHECK_EQUAL( + s.column_number("Two"), 0, "Column name is case sensitive."); + + // Identical column names. + r = tx.exec("SELECT 1 AS x, 2 AS x"); + s = r[0].slice(1, 2); + PQXX_CHECK_EQUAL(s["x"].as(), 2, "Identical column names break slice."); +} + + +PQXX_REGISTER_TEST(test_result_slicing); +} // namespace + +#include "pqxx/internal/ignore-deprecated-post.hxx" diff --git a/ext/libpqxx-7.7.3/test/unit/test_row.cxx b/ext/libpqxx-7.7.3/test/unit/test_row.cxx new file mode 100644 index 000000000..41770e3b1 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_row.cxx @@ -0,0 +1,84 @@ +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_row() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::result rows{tx.exec("SELECT 1, 2, 3")}; + pqxx::row r{rows[0]}; + PQXX_CHECK_EQUAL(std::size(r), 3, "Unexpected row size."); + PQXX_CHECK_EQUAL(r.at(0).as(), 1, "Wrong value at index 0."); + PQXX_CHECK(std::begin(r) != std::end(r), "Broken row iteration."); + PQXX_CHECK(std::begin(r) < std::end(r), "Row begin does not precede end."); + PQXX_CHECK(std::cbegin(r) == std::begin(r), "Wrong cbegin."); + PQXX_CHECK(std::cend(r) == std::end(r), "Wrong cend."); + PQXX_CHECK(std::rbegin(r) != std::rend(r), "Broken reverse row iteration."); + PQXX_CHECK(std::crbegin(r) == std::rbegin(r), "Wrong crbegin."); + PQXX_CHECK(std::crend(r) == std::rend(r), "Wrong crend."); + PQXX_CHECK_EQUAL(r.front().as(), 1, "Wrong row front()."); + PQXX_CHECK_EQUAL(r.back().as(), 3, "Wrong row back()."); +} + + +void test_row_iterator() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::result rows{tx.exec("SELECT 1, 2, 3")}; + + auto i{std::begin(rows[0])}; + PQXX_CHECK_EQUAL(i->as(), 1, "Row iterator is wrong."); + auto i2{i}; + PQXX_CHECK_EQUAL(i2->as(), 1, "Row iterator copy is wrong."); + i2++; + PQXX_CHECK_EQUAL(i2->as(), 2, "Row iterator increment is wrong."); + pqxx::row::const_iterator i3; + i3 = i2; + PQXX_CHECK_EQUAL(i3->as(), 2, "Row iterator assignment is wrong."); + + auto r{std::rbegin(rows[0])}; + PQXX_CHECK_EQUAL(r->as(), 3, "Row reverse iterator is wrong."); + auto r2{r}; + PQXX_CHECK_EQUAL(r2->as(), 3, "Row reverse iterator copy is wrong."); + r2++; + PQXX_CHECK_EQUAL( + r2->as(), 2, "Row reverse iterator increment is wrong."); + pqxx::row::const_reverse_iterator r3; + r3 = r2; + PQXX_CHECK_EQUAL( + i3->as(), 2, "Row reverse iterator assignment is wrong."); +} + + +void test_row_as() +{ + using pqxx::operator"" _zv; + + pqxx::connection conn; + pqxx::work tx{conn}; + + pqxx::row const r{tx.exec1("SELECT 1, 2, 3")}; + auto [one, two, three]{r.as()}; + static_assert(std::is_same_v); + static_assert(std::is_same_v); + static_assert(std::is_same_v); + PQXX_CHECK_EQUAL(one, 1, "row::as() did not produce the right int."); + PQXX_CHECK_GREATER(two, 1.9, "row::as() did not produce the right float."); + PQXX_CHECK_LESS(two, 2.1, "row::as() did not produce the right float."); + PQXX_CHECK_EQUAL( + three, "3"_zv, "row::as() did not produce the right zview."); + + PQXX_CHECK_EQUAL( + std::get<0>(tx.exec1("SELECT 999").as()), 999, + "Unary tuple did not extract right."); +} + + +PQXX_REGISTER_TEST(test_row); +PQXX_REGISTER_TEST(test_row_iterator); +PQXX_REGISTER_TEST(test_row_as); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_separated_list.cxx b/ext/libpqxx-7.7.3/test/unit/test_separated_list.cxx new file mode 100644 index 000000000..748379b47 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_separated_list.cxx @@ -0,0 +1,33 @@ +#include + +#include "../test_helpers.hxx" + +// Test program for separated_list. + +namespace +{ +void test_separated_list() +{ + PQXX_CHECK_EQUAL( + pqxx::separated_list(",", std::vector{}), "", + "Empty list came out wrong."); + + PQXX_CHECK_EQUAL( + pqxx::separated_list(",", std::vector{5}), "5", + "Single-element list came out wrong."); + + PQXX_CHECK_EQUAL( + pqxx::separated_list(",", std::vector{3, 6}), "3,6", + "Things go wrong once separators come in."); + + std::vector const nums{1, 2, 3}; + PQXX_CHECK_EQUAL( + pqxx::separated_list( + "+", std::begin(nums), std::end(nums), + [](auto elt) { return *elt * 2; }), + "2+4+6", "Accessors don't seem to work."); +} + + +PQXX_REGISTER_TEST(test_separated_list); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_simultaneous_transactions.cxx b/ext/libpqxx-7.7.3/test/unit/test_simultaneous_transactions.cxx new file mode 100644 index 000000000..9c1e2fe9c --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_simultaneous_transactions.cxx @@ -0,0 +1,20 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_simultaneous_transactions() +{ + pqxx::connection conn; + + pqxx::nontransaction n1{conn}; + PQXX_CHECK_THROWS( + pqxx::nontransaction n2{conn}, std::logic_error, + "Allowed to open simultaneous nontransactions."); +} + + +PQXX_REGISTER_TEST(test_simultaneous_transactions); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_sql_cursor.cxx b/ext/libpqxx-7.7.3/test/unit/test_sql_cursor.cxx new file mode 100644 index 000000000..a0109a00b --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_sql_cursor.cxx @@ -0,0 +1,270 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_forward_sql_cursor() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + + // Plain owned, scoped, forward-only read-only cursor. + pqxx::internal::sql_cursor forward( + tx, "SELECT generate_series(1, 4)", "forward", + pqxx::cursor_base::forward_only, pqxx::cursor_base::read_only, + pqxx::cursor_base::owned, false); + + PQXX_CHECK_EQUAL(forward.pos(), 0, "Wrong initial position"); + PQXX_CHECK_EQUAL(forward.endpos(), -1, "Wrong initial endpos()"); + + auto empty_result{forward.empty_result()}; + PQXX_CHECK_EQUAL(std::size(empty_result), 0, "Empty result not empty"); + + auto displacement{0}; + auto one{forward.fetch(1, displacement)}; + PQXX_CHECK_EQUAL(std::size(one), 1, "Fetched wrong number of rows"); + PQXX_CHECK_EQUAL(one[0][0].as(), "1", "Unexpected result"); + PQXX_CHECK_EQUAL(displacement, 1, "Wrong displacement"); + PQXX_CHECK_EQUAL(forward.pos(), 1, "In wrong position"); + + auto offset{forward.move(1, displacement)}; + PQXX_CHECK_EQUAL(offset, 1, "Unexpected offset from move()"); + PQXX_CHECK_EQUAL(displacement, 1, "Unexpected displacement after move()"); + PQXX_CHECK_EQUAL(forward.pos(), 2, "Wrong position after move()"); + PQXX_CHECK_EQUAL(forward.endpos(), -1, "endpos() unexpectedly set"); + + auto row{forward.fetch(0, displacement)}; + PQXX_CHECK_EQUAL(std::size(row), 0, "fetch(0, displacement) returns rows"); + PQXX_CHECK_EQUAL(displacement, 0, "Unexpected displacement after fetch(0)"); + PQXX_CHECK_EQUAL(forward.pos(), 2, "fetch(0, displacement) affected pos()"); + + row = forward.fetch(0); + PQXX_CHECK_EQUAL(std::size(row), 0, "fetch(0) fetched wrong number of rows"); + PQXX_CHECK_EQUAL(forward.pos(), 2, "fetch(0) moved cursor"); + PQXX_CHECK_EQUAL(forward.pos(), 2, "fetch(0) affected pos()"); + + offset = forward.move(1); + PQXX_CHECK_EQUAL(offset, 1, "move(1) returned unexpected value"); + PQXX_CHECK_EQUAL(forward.pos(), 3, "move(1) after fetch(0) broke"); + + row = forward.fetch(1); + PQXX_CHECK_EQUAL( + std::size(row), 1, "fetch(1) returned wrong number of rows"); + PQXX_CHECK_EQUAL(forward.pos(), 4, "fetch(1) results in bad pos()"); + PQXX_CHECK_EQUAL(row[0][0].as(), "4", "pos() is lying"); + + empty_result = forward.fetch(1, displacement); + PQXX_CHECK_EQUAL(std::size(empty_result), 0, "Got rows at end of cursor"); + PQXX_CHECK_EQUAL(forward.pos(), 5, "Not at one-past-end position"); + PQXX_CHECK_EQUAL(forward.endpos(), 5, "Failed to notice end position"); + PQXX_CHECK_EQUAL(displacement, 1, "Wrong displacement at end position"); + + offset = forward.move(5, displacement); + PQXX_CHECK_EQUAL(offset, 0, "move() lied at end of result set"); + PQXX_CHECK_EQUAL(forward.pos(), 5, "pos() is beyond end"); + PQXX_CHECK_EQUAL(forward.endpos(), 5, "endpos() changed after end position"); + PQXX_CHECK_EQUAL(displacement, 0, "Wrong displacement after end position"); + + // Move through entire result set at once. + pqxx::internal::sql_cursor forward2( + tx, "SELECT generate_series(1, 4)", "forward", + pqxx::cursor_base::forward_only, pqxx::cursor_base::read_only, + pqxx::cursor_base::owned, false); + + // Move through entire result set at once. + offset = forward2.move(pqxx::cursor_base::all(), displacement); + PQXX_CHECK_EQUAL(offset, 4, "Unexpected number of rows in result set"); + PQXX_CHECK_EQUAL(displacement, 5, "displacement != rows+1"); + PQXX_CHECK_EQUAL(forward2.pos(), 5, "Bad pos() after skipping all rows"); + PQXX_CHECK_EQUAL(forward2.endpos(), 5, "Bad endpos() after skipping"); + + pqxx::internal::sql_cursor forward3( + tx, "SELECT generate_series(1, 4)", "forward", + pqxx::cursor_base::forward_only, pqxx::cursor_base::read_only, + pqxx::cursor_base::owned, false); + + // Fetch entire result set at once. + auto rows{forward3.fetch(pqxx::cursor_base::all(), displacement)}; + PQXX_CHECK_EQUAL( + std::size(rows), 4, "Unexpected number of rows in result set"); + PQXX_CHECK_EQUAL(displacement, 5, "displacement != rows+1"); + PQXX_CHECK_EQUAL(forward3.pos(), 5, "Bad pos() after fetching all rows"); + PQXX_CHECK_EQUAL(forward3.endpos(), 5, "Bad endpos() after fetching"); + + pqxx::internal::sql_cursor forward_empty( + tx, "SELECT generate_series(0, -1)", "forward_empty", + pqxx::cursor_base::forward_only, pqxx::cursor_base::read_only, + pqxx::cursor_base::owned, false); + + offset = forward_empty.move(3, displacement); + PQXX_CHECK_EQUAL(forward_empty.pos(), 1, "Bad pos() at end of result"); + PQXX_CHECK_EQUAL(forward_empty.endpos(), 1, "Bad endpos() in empty result"); + PQXX_CHECK_EQUAL(displacement, 1, "Bad displacement in empty result"); + PQXX_CHECK_EQUAL(offset, 0, "move() in empty result counted rows"); +} + +void test_scroll_sql_cursor() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + pqxx::internal::sql_cursor scroll( + tx, "SELECT generate_series(1, 10)", "scroll", + pqxx::cursor_base::random_access, pqxx::cursor_base::read_only, + pqxx::cursor_base::owned, false); + + PQXX_CHECK_EQUAL(scroll.pos(), 0, "Scroll cursor's initial pos() is wrong"); + PQXX_CHECK_EQUAL(scroll.endpos(), -1, "New scroll cursor has endpos() set"); + + auto rows{scroll.fetch(pqxx::cursor_base::next())}; + PQXX_CHECK_EQUAL(std::size(rows), 1, "Scroll cursor is broken"); + PQXX_CHECK_EQUAL(scroll.pos(), 1, "Scroll cursor's pos() is broken"); + PQXX_CHECK_EQUAL(scroll.endpos(), -1, "endpos() set prematurely"); + + // Turn cursor around. This is where we begin to feel SQL cursors' + // semantics: we pre-decrement, ending up on the position in front of the + // first row and returning no rows. + rows = scroll.fetch(pqxx::cursor_base::prior()); + PQXX_CHECK_EQUAL(std::empty(rows), true, "Turning around on fetch() broke"); + PQXX_CHECK_EQUAL(scroll.pos(), 0, "pos() is not back at zero"); + PQXX_CHECK_EQUAL( + scroll.endpos(), -1, "endpos() set on wrong side of result"); + + // Bounce off the left-hand side of the result set. Can't move before the + // starting position. + auto offset{0}, displacement{0}; + offset = scroll.move(-3, displacement); + PQXX_CHECK_EQUAL(offset, 0, "Rows found before beginning"); + PQXX_CHECK_EQUAL(displacement, 0, "Failed to bounce off beginning"); + PQXX_CHECK_EQUAL(scroll.pos(), 0, "pos() moved back from zero"); + PQXX_CHECK_EQUAL(scroll.endpos(), -1, "endpos() set on left-side bounce"); + + // Try bouncing off the left-hand side a little harder. Take 4 paces away + // from the boundary and run into it. + offset = scroll.move(4, displacement); + PQXX_CHECK_EQUAL(offset, 4, "Offset mismatch"); + PQXX_CHECK_EQUAL(displacement, 4, "Displacement mismatch"); + PQXX_CHECK_EQUAL(scroll.pos(), 4, "Position mismatch"); + PQXX_CHECK_EQUAL(scroll.endpos(), -1, "endpos() set at weird time"); + + offset = scroll.move(-10, displacement); + PQXX_CHECK_EQUAL(offset, 3, "Offset mismatch"); + PQXX_CHECK_EQUAL(displacement, -4, "Displacement mismatch"); + PQXX_CHECK_EQUAL(scroll.pos(), 0, "Hard bounce failed"); + PQXX_CHECK_EQUAL(scroll.endpos(), -1, "endpos() set during hard bounce"); + + rows = scroll.fetch(3); + PQXX_CHECK_EQUAL(scroll.pos(), 3, "Bad pos()"); + PQXX_CHECK_EQUAL(std::size(rows), 3, "Wrong number of rows"); + PQXX_CHECK_EQUAL(rows[2][0].as(), 3, "pos() does not match data"); + rows = scroll.fetch(-1); + PQXX_CHECK_EQUAL(scroll.pos(), 2, "Bad pos()"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 2, "pos() does not match data"); + + rows = scroll.fetch(1); + PQXX_CHECK_EQUAL(scroll.pos(), 3, "Bad pos() after inverse turnaround"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 3, "Data position mismatch"); +} + + +void test_adopted_sql_cursor() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + + tx.exec0( + "DECLARE adopted SCROLL CURSOR FOR " + "SELECT generate_series(1, 3)"); + pqxx::internal::sql_cursor adopted(tx, "adopted", pqxx::cursor_base::owned); + PQXX_CHECK_EQUAL(adopted.pos(), -1, "Adopted cursor has known pos()"); + PQXX_CHECK_EQUAL(adopted.endpos(), -1, "Adopted cursor has known endpos()"); + + auto displacement{0}; + auto rows{adopted.fetch(pqxx::cursor_base::all(), displacement)}; + PQXX_CHECK_EQUAL(std::size(rows), 3, "Wrong number of rows in result"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 1, "Wrong result data"); + PQXX_CHECK_EQUAL(rows[2][0].as(), 3, "Wrong result data"); + PQXX_CHECK_EQUAL(displacement, 4, "Wrong displacement"); + PQXX_CHECK_EQUAL( + adopted.pos(), -1, "End-of-result set pos() on adopted cur"); + PQXX_CHECK_EQUAL(adopted.endpos(), -1, "endpos() set too early"); + + rows = adopted.fetch(pqxx::cursor_base::backward_all(), displacement); + PQXX_CHECK_EQUAL(std::size(rows), 3, "Wrong number of rows in result"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 3, "Wrong result data"); + PQXX_CHECK_EQUAL(rows[2][0].as(), 1, "Wrong result data"); + PQXX_CHECK_EQUAL(displacement, -4, "Wrong displacement"); + PQXX_CHECK_EQUAL(adopted.pos(), 0, "Failed to recognize starting position"); + PQXX_CHECK_EQUAL(adopted.endpos(), -1, "endpos() set too early"); + + auto offset{adopted.move(pqxx::cursor_base::all())}; + PQXX_CHECK_EQUAL(offset, 3, "Unexpected move() offset"); + PQXX_CHECK_EQUAL(adopted.pos(), 4, "Bad position on adopted cursor"); + PQXX_CHECK_EQUAL(adopted.endpos(), 4, "endpos() not set properly"); + + // Owned adopted cursors are cleaned up on destruction. + pqxx::connection conn2; + pqxx::work tx2(conn2, "tx2"); + tx2.exec0( + "DECLARE adopted2 CURSOR FOR " + "SELECT generate_series(1, 3)"); + { + pqxx::internal::sql_cursor(tx2, "adopted2", pqxx::cursor_base::owned); + } + // Modern backends: accessing the cursor now is an error, as you'd expect. + PQXX_CHECK_THROWS( + tx2.exec("FETCH 1 IN adopted2"), pqxx::sql_error, + "Owned adopted cursor not cleaned up"); + + tx2.abort(); + + pqxx::work tx3(conn2, "tx3"); + tx3.exec( + "DECLARE adopted3 CURSOR FOR " + "SELECT generate_series(1, 3)"); + { + pqxx::internal::sql_cursor(tx3, "adopted3", pqxx::cursor_base::loose); + } + tx3.exec("MOVE 1 IN adopted3"); +} + +void test_hold_cursor() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + + // "With hold" cursor is kept after commit. + pqxx::internal::sql_cursor with_hold( + tx, "SELECT generate_series(1, 3)", "hold_cursor", + pqxx::cursor_base::forward_only, pqxx::cursor_base::read_only, + pqxx::cursor_base::owned, true); + tx.commit(); + pqxx::work tx2(conn, "tx2"); + auto rows{with_hold.fetch(1)}; + PQXX_CHECK_EQUAL( + std::size(rows), 1, "Did not get 1 row from with-hold cursor"); + + // Cursor without hold is closed on commit. + pqxx::internal::sql_cursor no_hold( + tx2, "SELECT generate_series(1, 3)", "no_hold_cursor", + pqxx::cursor_base::forward_only, pqxx::cursor_base::read_only, + pqxx::cursor_base::owned, false); + tx2.commit(); + pqxx::work tx3(conn, "tx3"); + PQXX_CHECK_THROWS( + no_hold.fetch(1), pqxx::sql_error, "Cursor not closed on commit"); +} + + +void cursor_tests() +{ + test_forward_sql_cursor(); + test_scroll_sql_cursor(); + test_adopted_sql_cursor(); + test_hold_cursor(); +} + + +PQXX_REGISTER_TEST(cursor_tests); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_stateless_cursor.cxx b/ext/libpqxx-7.7.3/test/unit/test_stateless_cursor.cxx new file mode 100644 index 000000000..79307c19e --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_stateless_cursor.cxx @@ -0,0 +1,85 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_stateless_cursor() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + + pqxx::stateless_cursor< + pqxx::cursor_base::read_only, pqxx::cursor_base::owned> + empty(tx, "SELECT generate_series(0, -1)", "empty", false); + + auto rows{empty.retrieve(0, 0)}; + PQXX_CHECK_EQUAL(std::empty(rows), true, "Empty result not empty"); + rows = empty.retrieve(0, 1); + PQXX_CHECK_EQUAL(std::size(rows), 0, "Empty result returned rows"); + + PQXX_CHECK_EQUAL(empty.size(), 0, "Empty cursor not empty"); + + PQXX_CHECK_THROWS( + empty.retrieve(1, 0), std::out_of_range, "Empty cursor tries to retrieve"); + + pqxx::stateless_cursor< + pqxx::cursor_base::read_only, pqxx::cursor_base::owned> + stateless(tx, "SELECT generate_series(0, 9)", "stateless", false); + + PQXX_CHECK_EQUAL(stateless.size(), 10, "stateless_cursor::size() mismatch"); + + // Retrieve nothing. + rows = stateless.retrieve(1, 1); + PQXX_CHECK_EQUAL(std::size(rows), 0, "1-to-1 retrieval not empty"); + + // Retrieve two rows. + rows = stateless.retrieve(1, 3); + PQXX_CHECK_EQUAL(std::size(rows), 2, "Retrieved wrong number of rows"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 1, "Data/position mismatch"); + PQXX_CHECK_EQUAL(rows[1][0].as(), 2, "Data/position mismatch"); + + // Retrieve same rows in reverse. + rows = stateless.retrieve(2, 0); + PQXX_CHECK_EQUAL(std::size(rows), 2, "Retrieved wrong number of rows"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 2, "Data/position mismatch"); + PQXX_CHECK_EQUAL(rows[1][0].as(), 1, "Data/position mismatch"); + + // Retrieve beyond end. + rows = stateless.retrieve(9, 13); + PQXX_CHECK_EQUAL(std::size(rows), 1, "Row count wrong at end"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 9, "Data/pos mismatch at end"); + + // Retrieve beyond beginning. + rows = stateless.retrieve(0, -4); + PQXX_CHECK_EQUAL(std::size(rows), 1, "Row count wrong at beginning"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 0, "Data/pos mismatch at beginning"); + + // Retrieve entire result set backwards. + rows = stateless.retrieve(10, -15); + PQXX_CHECK_EQUAL( + std::size(rows), 10, "Reverse complete retrieval is broken"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 9, "Data mismatch"); + PQXX_CHECK_EQUAL(rows[9][0].as(), 0, "Data mismatch"); + + // Normal usage pattern: step through result set, 4 rows at a time. + rows = stateless.retrieve(0, 4); + PQXX_CHECK_EQUAL(std::size(rows), 4, "Wrong batch size"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 0, "Batch in wrong place"); + PQXX_CHECK_EQUAL(rows[3][0].as(), 3, "Batch in wrong place"); + + rows = stateless.retrieve(4, 8); + PQXX_CHECK_EQUAL(std::size(rows), 4, "Wrong batch size"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 4, "Batch in wrong place"); + PQXX_CHECK_EQUAL(rows[3][0].as(), 7, "Batch in wrong place"); + + rows = stateless.retrieve(8, 12); + PQXX_CHECK_EQUAL(std::size(rows), 2, "Wrong batch size"); + PQXX_CHECK_EQUAL(rows[0][0].as(), 8, "Batch in wrong place"); + PQXX_CHECK_EQUAL(rows[1][0].as(), 9, "Batch in wrong place"); +} + + +PQXX_REGISTER_TEST(test_stateless_cursor); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_strconv.cxx b/ext/libpqxx-7.7.3/test/unit/test_strconv.cxx new file mode 100644 index 000000000..602084e18 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_strconv.cxx @@ -0,0 +1,143 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +enum colour +{ + red, + green, + blue +}; +enum class weather : short +{ + hot, + cold, + wet +}; +enum class many : unsigned long long +{ + bottom = 0, + top = std::numeric_limits::max(), +}; +} // namespace + +namespace pqxx +{ +PQXX_DECLARE_ENUM_CONVERSION(colour); +PQXX_DECLARE_ENUM_CONVERSION(weather); +PQXX_DECLARE_ENUM_CONVERSION(many); +} // namespace pqxx + + +namespace +{ +void test_strconv_bool() +{ + PQXX_CHECK_EQUAL(pqxx::to_string(false), "false", "Wrong to_string(false)."); + PQXX_CHECK_EQUAL(pqxx::to_string(true), "true", "Wrong to_string(true)."); + + bool result; + pqxx::from_string("false", result); + PQXX_CHECK_EQUAL(result, false, "Wrong from_string('false')."); + pqxx::from_string("FALSE", result); + PQXX_CHECK_EQUAL(result, false, "Wrong from_string('FALSE')."); + pqxx::from_string("f", result); + PQXX_CHECK_EQUAL(result, false, "Wrong from_string('f')."); + pqxx::from_string("F", result); + PQXX_CHECK_EQUAL(result, false, "Wrong from_string('F')."); + pqxx::from_string("0", result); + PQXX_CHECK_EQUAL(result, false, "Wrong from_string('0')."); + pqxx::from_string("true", result); + PQXX_CHECK_EQUAL(result, true, "Wrong from_string('true')."); + pqxx::from_string("TRUE", result); + PQXX_CHECK_EQUAL(result, true, "Wrong from_string('TRUE')."); + pqxx::from_string("t", result); + PQXX_CHECK_EQUAL(result, true, "Wrong from_string('t')."); + pqxx::from_string("T", result); + PQXX_CHECK_EQUAL(result, true, "Wrong from_string('T')."); + pqxx::from_string("1", result); + PQXX_CHECK_EQUAL(result, true, "Wrong from_string('1')."); +} + + +void test_strconv_enum() +{ + PQXX_CHECK_EQUAL(pqxx::to_string(red), "0", "Enum value did not convert."); + PQXX_CHECK_EQUAL(pqxx::to_string(green), "1", "Enum value did not convert."); + PQXX_CHECK_EQUAL(pqxx::to_string(blue), "2", "Enum value did not convert."); + + colour col; + pqxx::from_string("2", col); + PQXX_CHECK_EQUAL(col, blue, "Could not recover enum value from string."); +} + + +void test_strconv_class_enum() +{ + PQXX_CHECK_EQUAL( + pqxx::to_string(weather::hot), "0", "Class enum value did not convert."); + PQXX_CHECK_EQUAL( + pqxx::to_string(weather::wet), "2", "Enum value did not convert."); + + weather w; + pqxx::from_string("2", w); + PQXX_CHECK_EQUAL( + w, weather::wet, "Could not recover class enum value from string."); + + PQXX_CHECK_EQUAL( + pqxx::to_string(many::bottom), "0", + "Small wide enum did not convert right."); + PQXX_CHECK_EQUAL( + pqxx::to_string(many::top), + pqxx::to_string(std::numeric_limits::max()), + "Large wide enum did not convert right."); +} + + +void test_strconv_optional() +{ + PQXX_CHECK_THROWS( + pqxx::to_string(std::optional{}), pqxx::conversion_error, + "Converting an empty optional did not throw conversion error."); + PQXX_CHECK_EQUAL( + pqxx::to_string(std::optional{std::in_place, 10}), "10", + "std::optional does not convert right."); + PQXX_CHECK_EQUAL( + pqxx::to_string(std::optional{std::in_place, -10000}), "-10000", + "std::optional does not convert right."); +} + + +void test_strconv_smart_pointer() +{ + PQXX_CHECK_THROWS( + pqxx::to_string(std::unique_ptr{}), pqxx::conversion_error, + "Converting an empty unique_ptr did not throw conversion error."); + PQXX_CHECK_EQUAL( + pqxx::to_string(std::make_unique(10)), "10", + "std::unique_ptr does not convert right."); + PQXX_CHECK_EQUAL( + pqxx::to_string(std::make_unique(-10000)), "-10000", + "std::unique_ptr does not convert right."); + + PQXX_CHECK_THROWS( + pqxx::to_string(std::shared_ptr{}), pqxx::conversion_error, + "Converting an empty shared_ptr did not throw conversion error."); + PQXX_CHECK_EQUAL( + pqxx::to_string(std::make_shared(10)), "10", + "std::shared_ptr does not convert right."); + PQXX_CHECK_EQUAL( + pqxx::to_string(std::make_shared(-10000)), "-10000", + "std::shared_ptr does not convert right."); +} + + +PQXX_REGISTER_TEST(test_strconv_bool); +PQXX_REGISTER_TEST(test_strconv_enum); +PQXX_REGISTER_TEST(test_strconv_class_enum); +PQXX_REGISTER_TEST(test_strconv_optional); +PQXX_REGISTER_TEST(test_strconv_smart_pointer); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_stream_from.cxx b/ext/libpqxx-7.7.3/test/unit/test_stream_from.cxx new file mode 100644 index 000000000..d8adb8bd1 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_stream_from.cxx @@ -0,0 +1,344 @@ +#include +#include + +#include "../test_helpers.hxx" +#include "../test_types.hxx" + +#include +#include +#include +#include +#include +#include + +#include + + +namespace +{ +void test_nonoptionals(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto extractor{pqxx::stream_from::query( + tx, "SELECT * FROM stream_from_test ORDER BY number0")}; + PQXX_CHECK(extractor, "stream_from failed to initialize."); + + std::tuple got_tuple; + + try + { + // We can't read the "910" row -- it contains nulls, which our tuple does + // not accept. + extractor >> got_tuple; + PQXX_CHECK_NOTREACHED( + "Failed to fail to stream null values into null-less fields."); + } + catch (pqxx::conversion_error const &e) + { + std::string const what{e.what()}; + if (what.find("null") == std::string::npos) + throw; + pqxx::test::expected_exception( + "Could not stream nulls into null-less fields: " + what); + } + + // The stream is still good though. + // The second tuple is fine. + extractor >> got_tuple; + PQXX_CHECK(extractor, "Stream ended prematurely."); + + PQXX_CHECK_EQUAL(std::get<0>(got_tuple), 1234, "Bad value."); + // Don't know much about the timestamp, but let's assume it starts with a + // year in the second millennium. + PQXX_CHECK( + std::get<1>(got_tuple).at(0) == '2', "Bad value. Expected timestamp."); + PQXX_CHECK_LESS( + std::size(std::get<1>(got_tuple)), 40u, "Unexpected length."); + PQXX_CHECK_GREATER( + std::size(std::get<1>(got_tuple)), 20u, "Unexpected length."); + PQXX_CHECK_EQUAL(std::get<2>(got_tuple), 4321, "Bad value."); + PQXX_CHECK_EQUAL(std::get<3>(got_tuple), (ipv4{8, 8, 8, 8}), "Bad value."); + PQXX_CHECK_EQUAL(std::get<4>(got_tuple), "hello\n \tworld", "Bad value."); + PQXX_CHECK_EQUAL( + std::get<5>(got_tuple), (bytea{'\x00', '\x01', '\x02'}), "Bad value."); + + // The third tuple contains some nulls. For what it's worth, when we *know* + // that we're getting nulls, we can stream them into nullptr_t fields. + std::tuple< + int, std::string, std::nullptr_t, std::nullptr_t, std::string, bytea> + tup_w_nulls; + + extractor >> tup_w_nulls; + PQXX_CHECK(extractor, "Stream ended prematurely."); + + PQXX_CHECK_EQUAL(std::get<0>(tup_w_nulls), 5678, "Bad value."); + PQXX_CHECK(std::get<2>(tup_w_nulls) == nullptr, "Bad null."); + PQXX_CHECK(std::get<3>(tup_w_nulls) == nullptr, "Bad null."); + + // We're at the end of the stream. + extractor >> tup_w_nulls; + PQXX_CHECK(not extractor, "Stream did not end."); + + // Of course we can't stream a non-null value into a nullptr field. + auto ex2{pqxx::stream_from::query(tx, "SELECT 1")}; + std::tuple null_tup; + try + { + ex2 >> null_tup; + PQXX_CHECK_NOTREACHED( + "stream_from should have refused to convert non-null value to " + "nullptr_t."); + } + catch (pqxx::conversion_error const &e) + { + std::string const what{e.what()}; + if (what.find("null") == std::string::npos) + throw; + pqxx::test::expected_exception( + std::string{"Could not extract row: "} + what); + } + ex2 >> null_tup; + PQXX_CHECK(not ex2, "Stream did not end."); + + PQXX_CHECK_SUCCEEDS( + tx.exec1("SELECT 1"), "Could not use transaction after stream_from."); +} + + +void test_bad_tuples(pqxx::connection &conn) +{ + pqxx::work tx{conn}; + auto extractor{pqxx::stream_from::table(tx, {"stream_from_test"})}; + PQXX_CHECK(extractor, "stream_from failed to initialize"); + + std::tuple got_tuple_too_short; + try + { + extractor >> got_tuple_too_short; + PQXX_CHECK_NOTREACHED("stream_from improperly read first row"); + } + catch (pqxx::usage_error const &e) + { + std::string what{e.what()}; + if ( + what.find("1") == std::string::npos or + what.find("6") == std::string::npos) + throw; + pqxx::test::expected_exception("Tuple is wrong size: " + what); + } + + std::tuple + got_tuple_too_long; + try + { + extractor >> got_tuple_too_long; + PQXX_CHECK_NOTREACHED("stream_from improperly read first row"); + } + catch (pqxx::usage_error const &e) + { + std::string what{e.what()}; + if ( + what.find("6") == std::string::npos or + what.find("7") == std::string::npos) + throw; + pqxx::test::expected_exception("Could not extract row: " + what); + } + + extractor.complete(); +} + + +#define ASSERT_FIELD_EQUAL(OPT, VAL) \ + PQXX_CHECK(static_cast(OPT), "unexpected null field"); \ + PQXX_CHECK_EQUAL(*OPT, VAL, "field value mismatch") +#define ASSERT_FIELD_NULL(OPT) \ + PQXX_CHECK(not static_cast(OPT), "expected null field") + + +template class O> +void test_optional(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto extractor{pqxx::stream_from::query( + tx, "SELECT * FROM stream_from_test ORDER BY number0")}; + PQXX_CHECK(extractor, "stream_from failed to initialize"); + + std::tuple, O, O, O, O> + got_tuple; + + extractor >> got_tuple; + PQXX_CHECK(extractor, "stream_from failed to read third row"); + PQXX_CHECK_EQUAL(std::get<0>(got_tuple), 910, "field value mismatch"); + ASSERT_FIELD_NULL(std::get<1>(got_tuple)); + ASSERT_FIELD_NULL(std::get<2>(got_tuple)); + ASSERT_FIELD_NULL(std::get<3>(got_tuple)); + ASSERT_FIELD_EQUAL(std::get<4>(got_tuple), "\\N"); + ASSERT_FIELD_EQUAL(std::get<5>(got_tuple), bytea{}); + + extractor >> got_tuple; + PQXX_CHECK(extractor, "stream_from failed to read first row."); + PQXX_CHECK_EQUAL(std::get<0>(got_tuple), 1234, "Field value mismatch."); + PQXX_CHECK( + static_cast(std::get<1>(got_tuple)), "Unexpected null field."); + // PQXX_CHECK_EQUAL(*std::get<1>(got_tuple), , "field value mismatch"); + ASSERT_FIELD_EQUAL(std::get<2>(got_tuple), 4321); + ASSERT_FIELD_EQUAL(std::get<3>(got_tuple), (ipv4{8, 8, 8, 8})); + ASSERT_FIELD_EQUAL(std::get<4>(got_tuple), "hello\n \tworld"); + ASSERT_FIELD_EQUAL(std::get<5>(got_tuple), (bytea{'\x00', '\x01', '\x02'})); + + extractor >> got_tuple; + PQXX_CHECK(extractor, "stream_from failed to read second row"); + PQXX_CHECK_EQUAL(std::get<0>(got_tuple), 5678, "field value mismatch"); + ASSERT_FIELD_EQUAL(std::get<1>(got_tuple), "2018-11-17 21:23:00"); + ASSERT_FIELD_NULL(std::get<2>(got_tuple)); + ASSERT_FIELD_NULL(std::get<3>(got_tuple)); + ASSERT_FIELD_EQUAL(std::get<4>(got_tuple), "\u3053\u3093\u306b\u3061\u308f"); + ASSERT_FIELD_EQUAL( + std::get<5>(got_tuple), (bytea{'f', 'o', 'o', ' ', 'b', 'a', 'r', '\0'})); + + extractor >> got_tuple; + PQXX_CHECK(not extractor, "stream_from failed to detect end of stream"); + + extractor.complete(); +} + + +void test_stream_from() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + tx.exec0( + "CREATE TEMP TABLE stream_from_test (" + "number0 INT NOT NULL," + "ts1 TIMESTAMP NULL," + "number2 INT NULL," + "addr3 INET NULL," + "txt4 TEXT NULL," + "bin5 BYTEA NOT NULL" + ")"); + tx.exec_params( + "INSERT INTO stream_from_test VALUES ($1,$2,$3,$4,$5,$6)", 910, nullptr, + nullptr, nullptr, "\\N", bytea{}); + tx.exec_params( + "INSERT INTO stream_from_test VALUES ($1,$2,$3,$4,$5,$6)", 1234, "now", + 4321, ipv4{8, 8, 8, 8}, "hello\n \tworld", bytea{'\x00', '\x01', '\x02'}); + tx.exec_params( + "INSERT INTO stream_from_test VALUES ($1,$2,$3,$4,$5,$6)", 5678, + "2018-11-17 21:23:00", nullptr, nullptr, "\u3053\u3093\u306b\u3061\u308f", + bytea{'f', 'o', 'o', ' ', 'b', 'a', 'r', '\0'}); + tx.commit(); + + test_nonoptionals(conn); + test_bad_tuples(conn); + std::cout << "testing `std::unique_ptr` as optional...\n"; + test_optional(conn); + std::cout << "testing `std::optional` as optional...\n"; + test_optional(conn); +} + + +void test_stream_from_does_escaping() +{ + std::string const input{"a\t\n\n\n \\b\nc"}; + pqxx::connection conn; + pqxx::work tx{conn}; + tx.exec0("CREATE TEMP TABLE badstr (str text)"); + tx.exec0("INSERT INTO badstr (str) VALUES (" + tx.quote(input) + ")"); + auto reader{pqxx::stream_from::table(tx, {"badstr"})}; + std::tuple out; + reader >> out; + PQXX_CHECK_EQUAL( + std::get<0>(out), input, "stream_from got weird characters wrong."); +} + + +void test_stream_from_does_iteration() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + tx.exec0("CREATE TEMP TABLE str (s text)"); + tx.exec0("INSERT INTO str (s) VALUES ('foo')"); + auto reader{pqxx::stream_from::table(tx, {"str"})}; + + int i{0}; + std::string out; + for (std::tuple t : reader.iter()) + { + i++; + out = std::get<0>(t); + } + PQXX_CHECK_EQUAL(i, 1, "Wrong number of iterations."); + PQXX_CHECK_EQUAL(out, "foo", "Got wrong string."); + + tx.exec0("INSERT INTO str (s) VALUES ('bar')"); + i = 0; + std::set strings; + auto reader2{pqxx::stream_from::table(tx, {"str"})}; + for (std::tuple t : reader2.iter()) + { + i++; + strings.insert(std::get<0>(t)); + } + PQXX_CHECK_EQUAL(i, 2, "Wrong number of iterations."); + PQXX_CHECK_EQUAL( + std::size(strings), 2u, "Wrong number of strings retrieved."); + PQXX_CHECK(strings.find("foo") != std::end(strings), "Missing key."); + PQXX_CHECK(strings.find("bar") != std::end(strings), "Missing key."); +} + + +void test_transaction_stream_from() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + tx.exec0("CREATE TEMP TABLE sample (id integer, name varchar)"); + tx.exec0("INSERT INTO sample (id, name) VALUES (321, 'something')"); + + int items{0}; + int id{0}; + std::string name; + + for (auto [iid, iname] : + tx.stream("SELECT id, name FROM sample")) + { + items++; + id = iid; + name = iname; + } + PQXX_CHECK_EQUAL(items, 1, "Wrong number of iterations."); + PQXX_CHECK_EQUAL(id, 321, "Got wrong int."); + PQXX_CHECK_EQUAL(name, std::string{"something"}, "Got wrong string."); + + PQXX_CHECK_EQUAL( + tx.query_value("SELECT 4"), 4, + "Loop did not relinquish transaction."); +} + + +void test_stream_from_read_row() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + tx.exec0("CREATE TEMP TABLE sample (id integer, name varchar, opt integer)"); + tx.exec0("INSERT INTO sample (id, name) VALUES (321, 'something')"); + + auto stream{pqxx::stream_from::table(tx, {"sample"})}; + auto fields{stream.read_row()}; + PQXX_CHECK_EQUAL(fields->size(), 3ul, "Wrong number of fields."); + PQXX_CHECK_EQUAL( + std::string((*fields)[0]), "321", "Integer field came out wrong."); + PQXX_CHECK_EQUAL( + std::string((*fields)[1]), "something", "Text field came out wrong."); + PQXX_CHECK(std::data((*fields)[2]) == nullptr, "Null field came out wrong."); + + auto last{stream.read_row()}; + PQXX_CHECK(last == nullptr, "No null pointer at end of stream."); +} + + +PQXX_REGISTER_TEST(test_stream_from); +PQXX_REGISTER_TEST(test_stream_from_does_escaping); +PQXX_REGISTER_TEST(test_stream_from_does_iteration); +PQXX_REGISTER_TEST(test_transaction_stream_from); +PQXX_REGISTER_TEST(test_stream_from_read_row); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_stream_to.cxx b/ext/libpqxx-7.7.3/test/unit/test_stream_to.cxx new file mode 100644 index 000000000..ae4eb3c65 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_stream_to.cxx @@ -0,0 +1,445 @@ +#include +#include + +#include +#include + +#include "../test_helpers.hxx" +#include "../test_types.hxx" + +namespace +{ +std::string truncate_sql_error(std::string const &what) +{ + auto trunc{what.substr(0, what.find('\n'))}; + if (std::size(trunc) > 64) + trunc = trunc.substr(0, 61) + "..."; + return trunc; +} + + +void test_nonoptionals(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto inserter{pqxx::stream_to::table(tx, {"stream_to_test"})}; + PQXX_CHECK(inserter, "stream_to failed to initialize"); + + auto const nonascii{"\u3053\u3093\u306b\u3061\u308f"}; + bytea const binary{'\x00', '\x01', '\x02'}, + text{'f', 'o', 'o', ' ', 'b', 'a', 'r', '\0'}; + + inserter << std::make_tuple( + 1234, "now", 4321, ipv4{8, 8, 4, 4}, "hello nonoptional world", binary); + inserter << std::make_tuple( + 5678, "2018-11-17 21:23:00", nullptr, nullptr, nonascii, text); + inserter << std::make_tuple(910, nullptr, nullptr, nullptr, "\\N", bytea{}); + + inserter.complete(); + + auto r1{tx.exec1("SELECT * FROM stream_to_test WHERE number0 = 1234")}; + PQXX_CHECK_EQUAL(r1[0].as(), 1234, "Read back wrong first int."); + PQXX_CHECK_EQUAL( + r1[4].as(), "hello nonoptional world", + "Read back wrong string."); + PQXX_CHECK_EQUAL(r1[3].as(), ipv4(8, 8, 4, 4), "Read back wrong ip."); + PQXX_CHECK_EQUAL(r1[5].as(), binary, "Read back wrong bytea."); + + auto r2{tx.exec1("SELECT * FROM stream_to_test WHERE number0 = 5678")}; + PQXX_CHECK_EQUAL(r2[0].as(), 5678, "Wrong int on second row."); + PQXX_CHECK(r2[2].is_null(), "Field 2 was meant to be null."); + PQXX_CHECK(r2[3].is_null(), "Field 3 was meant to be null."); + PQXX_CHECK_EQUAL(r2[4].as(), nonascii, "Wrong non-ascii text."); + tx.commit(); +} + +void test_nonoptionals_fold(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto inserter{pqxx::stream_to::table(tx, {"stream_to_test"})}; + PQXX_CHECK(inserter, "stream_to failed to initialize"); + + auto const nonascii{"\u3053\u3093\u306b\u3061\u308f"}; + bytea const binary{'\x00', '\x01', '\x02'}, + text{'f', 'o', 'o', ' ', 'b', 'a', 'r', '\0'}; + + inserter.write_values( + 1234, "now", 4321, ipv4{8, 8, 4, 4}, "hello nonoptional world", binary); + inserter.write_values( + 5678, "2018-11-17 21:23:00", nullptr, nullptr, nonascii, text); + inserter.write_values(910, nullptr, nullptr, nullptr, "\\N", bytea{}); + + inserter.complete(); + + auto r1{tx.exec1("SELECT * FROM stream_to_test WHERE number0 = 1234")}; + PQXX_CHECK_EQUAL(r1[0].as(), 1234, "Read back wrong first int."); + PQXX_CHECK_EQUAL( + r1[4].as(), "hello nonoptional world", + "Read back wrong string."); + PQXX_CHECK_EQUAL(r1[3].as(), ipv4(8, 8, 4, 4), "Read back wrong ip."); + PQXX_CHECK_EQUAL(r1[5].as(), binary, "Read back wrong bytera."); + + auto r2{tx.exec1("SELECT * FROM stream_to_test WHERE number0 = 5678")}; + PQXX_CHECK_EQUAL(r2[0].as(), 5678, "Wrong int on second row."); + PQXX_CHECK(r2[2].is_null(), "Field 2 was meant to be null."); + PQXX_CHECK(r2[3].is_null(), "Field 3 was meant to be null."); + PQXX_CHECK_EQUAL(r2[4].as(), nonascii, "Wrong non-ascii text."); + tx.commit(); +} + + +/// Try to violate stream_to_test's not-null constraint using a stream_to. +void insert_bad_null_tuple(pqxx::stream_to &inserter) +{ + inserter << std::make_tuple( + nullptr, "now", 4321, ipv4{8, 8, 8, 8}, "hello world", + bytea{'\x00', '\x01', '\x02'}); + inserter.complete(); +} + + +void test_bad_null(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto inserter{pqxx::stream_to::table(tx, {"stream_to_test"})}; + PQXX_CHECK(inserter, "stream_to failed to initialize"); + PQXX_CHECK_THROWS( + insert_bad_null_tuple(inserter), pqxx::not_null_violation, + "Did not expected not_null_violation when stream_to inserts a bad null."); +} + + +/// Try to violate stream_to_test's not-null construct using a stream_to. +void insert_bad_null_write(pqxx::stream_to &inserter) +{ + inserter.write_values( + nullptr, "now", 4321, ipv4{8, 8, 8, 8}, "hello world", + bytea{'\x00', '\x01', '\x02'}); + inserter.complete(); +} + + +void test_bad_null_fold(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto inserter{pqxx::stream_to::table(tx, {"stream_to_test"})}; + PQXX_CHECK(inserter, "stream_to failed to initialize"); + PQXX_CHECK_THROWS( + insert_bad_null_write(inserter), pqxx::not_null_violation, + "Did not expected not_null_violation when stream_to inserts a bad null."); +} + + +void test_too_few_fields(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto inserter{pqxx::stream_to::table(tx, {"stream_to_test"})}; + PQXX_CHECK(inserter, "stream_to failed to initialize"); + + try + { + inserter << std::make_tuple(1234, "now", 4321, ipv4{8, 8, 8, 8}); + inserter.complete(); + tx.commit(); + PQXX_CHECK_NOTREACHED("stream_from improperly inserted row"); + } + catch (pqxx::sql_error const &e) + { + std::string what{e.what()}; + if (what.find("missing data for column") == std::string::npos) + throw; + pqxx::test::expected_exception( + "Could not insert row: " + truncate_sql_error(what)); + } +} + +void test_too_few_fields_fold(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto inserter{pqxx::stream_to::table(tx, {"stream_to_test"})}; + PQXX_CHECK(inserter, "stream_to failed to initialize"); + + try + { + inserter.write_values(1234, "now", 4321, ipv4{8, 8, 8, 8}); + inserter.complete(); + tx.commit(); + PQXX_CHECK_NOTREACHED("stream_from_fold improperly inserted row"); + } + catch (pqxx::sql_error const &e) + { + std::string what{e.what()}; + if (what.find("missing data for column") == std::string::npos) + throw; + pqxx::test::expected_exception( + "Fold - Could not insert row: " + truncate_sql_error(what)); + } +} + + +void test_too_many_fields(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto inserter{pqxx::stream_to::table(tx, {"stream_to_test"})}; + PQXX_CHECK(inserter, "stream_to failed to initialize"); + + try + { + inserter << std::make_tuple( + 1234, "now", 4321, ipv4{8, 8, 8, 8}, "hello world", + bytea{'\x00', '\x01', '\x02'}, 5678); + inserter.complete(); + tx.commit(); + PQXX_CHECK_NOTREACHED("stream_from improperly inserted row"); + } + catch (pqxx::sql_error const &e) + { + std::string what{e.what()}; + if (what.find("extra data") == std::string::npos) + throw; + pqxx::test::expected_exception( + "Could not insert row: " + truncate_sql_error(what)); + } +} + +void test_too_many_fields_fold(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto inserter{pqxx::stream_to::table(tx, {"stream_to_test"})}; + PQXX_CHECK(inserter, "stream_to failed to initialize"); + + try + { + inserter.write_values( + 1234, "now", 4321, ipv4{8, 8, 8, 8}, "hello world", + bytea{'\x00', '\x01', '\x02'}, 5678); + inserter.complete(); + tx.commit(); + PQXX_CHECK_NOTREACHED("stream_from_fold improperly inserted row"); + } + catch (pqxx::sql_error const &e) + { + std::string what{e.what()}; + if (what.find("extra data") == std::string::npos) + throw; + pqxx::test::expected_exception( + "Fold - Could not insert row: " + truncate_sql_error(what)); + } +} + + +void test_stream_to_does_nonnull_optional() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + tx.exec0("CREATE TEMP TABLE foo(x integer, y text)"); + auto inserter{pqxx::stream_to::table(tx, {"foo"})}; + inserter.write_values( + std::optional{368}, std::optional{"Text"}); + inserter.complete(); + auto const row{tx.exec1("SELECT x, y FROM foo")}; + PQXX_CHECK_EQUAL( + row[0].as(), "368", "Non-null int optional came out wrong."); + PQXX_CHECK_EQUAL( + row[1].as(), "Text", + "Non-null string optional came out wrong."); +} + + +template class O> +void test_optional(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto inserter{pqxx::stream_to::table(tx, {"stream_to_test"})}; + PQXX_CHECK(inserter, "stream_to failed to initialize"); + + inserter << std::make_tuple( + 910, O{pqxx::nullness>::null()}, + O{pqxx::nullness>::null()}, + O{pqxx::nullness>::null()}, "\\N", bytea{}); + + inserter.complete(); + tx.commit(); +} + +template class O> +void test_optional_fold(pqxx::connection &connection) +{ + pqxx::work tx{connection}; + auto inserter{pqxx::stream_to::table(tx, {"stream_to_test"})}; + PQXX_CHECK(inserter, "stream_to failed to initialize"); + + inserter.write_values( + 910, O{pqxx::nullness>::null()}, + O{pqxx::nullness>::null()}, + O{pqxx::nullness>::null()}, "\\N", bytea{}); + + inserter.complete(); + tx.commit(); +} + + +// As an alternative to a tuple, you can also insert a container. +void test_container_stream_to() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + tx.exec0("CREATE TEMP TABLE test_container(a integer, b integer)"); + + auto inserter{pqxx::stream_to::table(tx, {"test_container"})}; + + inserter << std::vector{112, 244}; + inserter.complete(); + + auto read{tx.exec1("SELECT * FROM test_container")}; + PQXX_CHECK_EQUAL( + read[0].as(), 112, "stream_to on container went wrong."); + PQXX_CHECK_EQUAL( + read[1].as(), 244, "Second container field went wrong."); + tx.commit(); +} + +void test_variant_fold(pqxx::connection_base &connection) +{ + pqxx::work tx{connection}; + auto inserter{pqxx::stream_to::table(tx, {"stream_to_test"})}; + PQXX_CHECK(inserter, "stream_to failed to initialize"); + + inserter.write_values( + std::variant{1234}, + std::variant{"now"}, 4321, ipv4{8, 8, 8, 8}, + "hello world", bytea{'\x00', '\x01', '\x02'}); + inserter.write_values( + 5678, "2018-11-17 21:23:00", nullptr, nullptr, + "\u3053\u3093\u306b\u3061\u308f", + bytea{'f', 'o', 'o', ' ', 'b', 'a', 'r', '\0'}); + inserter.write_values(910, nullptr, nullptr, nullptr, "\\N", bytea{}); + + inserter.complete(); + tx.commit(); +} + +void clear_table(pqxx::connection &conn) +{ + pqxx::work tx{conn}; + tx.exec0("DELETE FROM stream_to_test"); + tx.commit(); +} + + +void test_stream_to() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + + tx.exec0( + "CREATE TEMP TABLE stream_to_test (" + "number0 INT NOT NULL," + "ts1 TIMESTAMP NULL," + "number2 INT NULL," + "addr3 INET NULL," + "txt4 TEXT NULL," + "bin5 BYTEA NOT NULL" + ")"); + tx.commit(); + + test_nonoptionals(conn); + clear_table(conn); + test_nonoptionals_fold(conn); + clear_table(conn); + test_bad_null(conn); + clear_table(conn); + test_bad_null_fold(conn); + clear_table(conn); + test_too_few_fields(conn); + clear_table(conn); + test_too_few_fields_fold(conn); + clear_table(conn); + test_too_many_fields(conn); + clear_table(conn); + test_too_many_fields_fold(conn); + clear_table(conn); + test_optional(conn); + clear_table(conn); + test_optional_fold(conn); + clear_table(conn); + test_optional(conn); + clear_table(conn); + test_optional_fold(conn); + clear_table(conn); + test_variant_fold(conn); +} + + +void test_stream_to_factory_with_static_columns() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + + tx.exec0("CREATE TEMP TABLE pqxx_stream_to(a integer, b varchar)"); + + auto stream{pqxx::stream_to::table(tx, {"pqxx_stream_to"}, {"a", "b"})}; + stream.write_values(3, "three"); + stream.complete(); + + auto r{tx.exec1("SELECT a, b FROM pqxx_stream_to")}; + PQXX_CHECK_EQUAL(r[0].as(), 3, "Failed to stream_to a table."); + PQXX_CHECK_EQUAL( + r[1].as(), "three", + "Failed to stream_to a string to a table."); +} + + +void test_stream_to_factory_with_dynamic_columns() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + + tx.exec0("CREATE TEMP TABLE pqxx_stream_to(a integer, b varchar)"); + + std::vector columns{"a", "b"}; +#if defined(PQXX_HAVE_CONCEPTS) + auto stream{pqxx::stream_to::table(tx, {"pqxx_stream_to"}, columns)}; +#else + auto stream{pqxx::stream_to::raw_table( + tx, conn.quote_table({"pqxx_stream_to"}), conn.quote_columns(columns))}; +#endif + stream.write_values(4, "four"); + stream.complete(); + + auto r{tx.exec1("SELECT a, b FROM pqxx_stream_to")}; + PQXX_CHECK_EQUAL( + r[0].as(), 4, "Failed to stream_to a table with dynamic columns."); + PQXX_CHECK_EQUAL( + r[1].as(), "four", + "Failed to stream_to a string to a table with dynamic columns."); +} + + +void test_stream_to_quotes_arguments() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + + std::string const table{R"--(pqxx_Stream"'x)--"}, column{R"--(a'"b)--"}; + + tx.exec0( + "CREATE TEMP TABLE " + tx.quote_name(table) + "(" + tx.quote_name(column) + + " integer)"); + auto write{pqxx::stream_to::table(tx, {table}, {column})}; + write.write_values(12); + write.complete(); + + PQXX_CHECK_EQUAL( + tx.query_value( + "SELECT " + tx.quote_name(column) + " FROM " + tx.quote_name(table)), + 12, "Stream wrote wrong value."); +} + + +PQXX_REGISTER_TEST(test_stream_to); +PQXX_REGISTER_TEST(test_container_stream_to); +PQXX_REGISTER_TEST(test_stream_to_does_nonnull_optional); +PQXX_REGISTER_TEST(test_stream_to_factory_with_static_columns); +PQXX_REGISTER_TEST(test_stream_to_factory_with_dynamic_columns); +PQXX_REGISTER_TEST(test_stream_to_quotes_arguments); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_string_conversion.cxx b/ext/libpqxx-7.7.3/test/unit/test_string_conversion.cxx new file mode 100644 index 000000000..30bc084f5 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_string_conversion.cxx @@ -0,0 +1,178 @@ +#include +#include + +#include +#include + +#include "../test_helpers.hxx" + +// Some enums with string conversions. +enum EnumA +{ + ea0, + ea1, + ea2 +}; +enum EnumB +{ + eb0, + eb1, + eb2 +}; +namespace pqxx +{ +PQXX_DECLARE_ENUM_CONVERSION(EnumA); +PQXX_DECLARE_ENUM_CONVERSION(EnumB); +} // namespace pqxx + + +namespace +{ +// "A minimal difference." +constexpr double thres{0.00001}; + + +void test_string_conversion() +{ + PQXX_CHECK_EQUAL( + "C string array", pqxx::to_string("C string array"), + "C-style string constant does not convert to string properly."); + + char text_array[]{"C char array"}; + PQXX_CHECK_EQUAL( + "C char array", pqxx::to_string(text_array), + "C-style non-const char array does not convert to string properly."); + + char const *text_ptr{"C string pointer"}; + PQXX_CHECK_EQUAL( + "C string pointer", pqxx::to_string(text_ptr), + "C-style string pointer does not convert to string properly."); + + std::string const cxx_string{"C++ string"}; + PQXX_CHECK_EQUAL( + "C++ string", pqxx::to_string(cxx_string), + "C++-style string object does not convert to string properly."); + + PQXX_CHECK_EQUAL("0", pqxx::to_string(0), "Zero does not convert right."); + PQXX_CHECK_EQUAL( + "1", pqxx::to_string(1), "Basic integer does not convert right."); + PQXX_CHECK_EQUAL("-1", pqxx::to_string(-1), "Negative numbers don't work."); + PQXX_CHECK_EQUAL( + "9999", pqxx::to_string(9999), "Larger numbers don't work."); + PQXX_CHECK_EQUAL( + "-9999", pqxx::to_string(-9999), "Larger negative numbers don't work."); + + int x; + pqxx::from_string("0", x); + PQXX_CHECK_EQUAL(0, x, "Zero does not parse right."); + pqxx::from_string("1", x); + PQXX_CHECK_EQUAL(1, x, "Basic integer does not parse right."); + pqxx::from_string("-1", x); + PQXX_CHECK_EQUAL(-1, x, "Negative numbers don't work."); + pqxx::from_string("9999", x); + PQXX_CHECK_EQUAL(9999, x, "Larger numbers don't work."); + pqxx::from_string("-9999", x); + PQXX_CHECK_EQUAL(-9999, x, "Larger negative numbers don't work."); + + // Bug #263 describes a case where this kind of overflow went undetected. + if (sizeof(unsigned int) == 4) + { + std::uint32_t u; + PQXX_CHECK_THROWS( + pqxx::from_string("4772185884", u), pqxx::conversion_error, + "Overflow not detected."); + } + + // We can convert to and from long double. The implementation may fall + // back on a thread-local std::stringstream. Each call does its own + // cleanup, so the conversion works multiple times. + constexpr long double ld1{123456789.25}, ld2{9876543210.5}; + constexpr char lds1[]{"123456789.25"}, lds2[]{"9876543210.5"}; + PQXX_CHECK_EQUAL( + pqxx::to_string(ld1).substr(0, 12), lds1, + "Wrong conversion from long double."); + PQXX_CHECK_EQUAL( + pqxx::to_string(ld2).substr(0, 12), lds2, + "Wrong value on repeated conversion from long double."); + long double ldi1, ldi2; + pqxx::from_string(lds1, ldi1); + PQXX_CHECK_BOUNDS( + ldi1, ld1 - thres, ld1 + thres, "Wrong conversion to long double."); + pqxx::from_string(lds2, ldi2); + PQXX_CHECK_BOUNDS( + ldi2, ld2 - thres, ld2 + thres, + "Wrong repeated conversion to long double."); + + // We can define string conversions for enums. + PQXX_CHECK_EQUAL( + pqxx::to_string(ea0), "0", "Enum-to-string conversion is broken."); + PQXX_CHECK_EQUAL( + pqxx::to_string(eb0), "0", + "Enum-to-string conversion is inconsistent between enum types."); + PQXX_CHECK_EQUAL( + pqxx::to_string(ea1), "1", + "Enum-to-string conversion breaks for nonzero value."); + + EnumA ea; + pqxx::from_string("2", ea); + PQXX_CHECK_EQUAL(ea, ea2, "String-to-enum conversion is broken."); +} + + +void test_convert_variant_to_string() +{ + PQXX_CHECK_EQUAL( + pqxx::to_string(std::variant{99}), "99", + "First variant field did not convert right."); + + PQXX_CHECK_EQUAL( + pqxx::to_string(std::variant{"Text"}), "Text", + "Second variant field did not convert right."); +} + + +void test_integer_conversion() +{ + PQXX_CHECK_EQUAL( + pqxx::from_string("12"), 12, "Basic integer conversion failed."); + PQXX_CHECK_EQUAL( + pqxx::from_string(" 12"), 12, + "Leading whitespace confused integer conversion."); + PQXX_CHECK_THROWS( + pqxx::ignore_unused(pqxx::from_string("")), pqxx::conversion_error, + "Converting empty string to integer did not throw conversion error."); + PQXX_CHECK_THROWS( + pqxx::ignore_unused(pqxx::from_string(" ")), pqxx::conversion_error, + "Converting whitespace to integer did not throw conversion error."); + PQXX_CHECK_EQUAL( + pqxx::from_string("-6"), -6, + "Leading whitespace did not work with negative number."); + PQXX_CHECK_THROWS( + pqxx::ignore_unused(pqxx::from_string("- 3")), pqxx::conversion_error, + "A space between negation and number was not properly flagged."); + PQXX_CHECK_THROWS( + pqxx::ignore_unused(pqxx::from_string("-")), pqxx::conversion_error, + "Just a minus sign should not parse as an integer."); +} + + +void test_convert_null() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + PQXX_CHECK_EQUAL( + tx.quote(nullptr), "NULL", "Null pointer did not come out as SQL 'null'."); + PQXX_CHECK_EQUAL( + tx.quote(std::nullopt), "NULL", + "std::nullopt did not come out as SQL 'null'."); + PQXX_CHECK_EQUAL( + tx.quote(std::monostate{}), "NULL", + "std::monostate did not come out as SQL 'null'."); +} + + +PQXX_REGISTER_TEST(test_string_conversion); +PQXX_REGISTER_TEST(test_convert_variant_to_string); +PQXX_REGISTER_TEST(test_integer_conversion); +PQXX_REGISTER_TEST(test_convert_null); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_subtransaction.cxx b/ext/libpqxx-7.7.3/test/unit/test_subtransaction.cxx new file mode 100644 index 000000000..4ba909bec --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_subtransaction.cxx @@ -0,0 +1,78 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +void make_table(pqxx::transaction_base &trans) +{ + trans.exec0("CREATE TEMP TABLE foo (x INTEGER)"); +} + + +void insert_row(pqxx::transaction_base &trans) +{ + trans.exec0("INSERT INTO foo(x) VALUES (1)"); +} + + +int count_rows(pqxx::transaction_base &trans) +{ + return trans.query_value("SELECT count(*) FROM foo"); +} + + +void test_subtransaction_commits_if_commit_called(pqxx::connection &conn) +{ + pqxx::work trans(conn); + make_table(trans); + { + pqxx::subtransaction sub(trans); + insert_row(sub); + sub.commit(); + } + PQXX_CHECK_EQUAL( + count_rows(trans), 1, "Work done in committed subtransaction was lost."); +} + + +void test_subtransaction_aborts_if_abort_called(pqxx::connection &conn) +{ + pqxx::work trans(conn); + make_table(trans); + { + pqxx::subtransaction sub(trans); + insert_row(sub); + sub.abort(); + } + PQXX_CHECK_EQUAL( + count_rows(trans), 0, "Aborted subtransaction was not rolled back."); +} + + +void test_subtransaction_aborts_implicitly(pqxx::connection &conn) +{ + pqxx::work trans(conn); + make_table(trans); + { + pqxx::subtransaction sub(trans); + insert_row(sub); + } + PQXX_CHECK_EQUAL( + count_rows(trans), 0, + "Uncommitted subtransaction was not rolled back uring destruction."); +} + + +void test_subtransaction() +{ + pqxx::connection conn; + test_subtransaction_commits_if_commit_called(conn); + test_subtransaction_aborts_if_abort_called(conn); + test_subtransaction_aborts_implicitly(conn); +} + + +PQXX_REGISTER_TEST(test_subtransaction); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_test_helpers.cxx b/ext/libpqxx-7.7.3/test/unit/test_test_helpers.cxx new file mode 100644 index 000000000..89cde627e --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_test_helpers.cxx @@ -0,0 +1,214 @@ +#include "../test_helpers.hxx" + +namespace +{ +void empty() {} + + +void test_check_notreached() +{ + // At a minimum, PQXX_CHECK_NOTREACHED must work. + bool failed{true}; + try + { + PQXX_CHECK_NOTREACHED("(expected)"); + failed = false; + } + catch (pqxx::test::test_failure const &) + { + // This is what we expect. + } + if (not failed) + throw pqxx::test::test_failure( + __FILE__, __LINE__, "PQXX_CHECK_NOTREACHED is broken."); +} + + +// Test PQXX_CHECK. +void test_check() +{ + PQXX_CHECK(true, "PQXX_CHECK is broken."); + + bool failed{true}; + try + { + PQXX_CHECK(false, "(expected)"); + failed = false; + } + catch (pqxx::test::test_failure const &) + {} + if (not failed) + PQXX_CHECK_NOTREACHED("PQXX_CHECK failed to notice failure."); +} + + +// Test PQXX_CHECK_THROWS_EXCEPTION. +void test_check_throws_exception() +{ + // PQXX_CHECK_THROWS_EXCEPTION expects std::exception... + PQXX_CHECK_THROWS_EXCEPTION( + throw std::exception(), + "PQXX_CHECK_THROWS_EXCEPTION did not catch std::exception."); + + // ...or any exception type derived from it. + PQXX_CHECK_THROWS_EXCEPTION( + throw pqxx::test::test_failure(__FILE__, __LINE__, "(expected)"), + "PQXX_CHECK_THROWS_EXCEPTION() failed to catch expected exception."); + + // Any other type is an error. + bool failed{true}; + try + { + PQXX_CHECK_THROWS_EXCEPTION(throw 1, "(expected)"); + failed = false; + } + catch (pqxx::test::test_failure const &) + {} + PQXX_CHECK( + failed, + "PQXX_CHECK_THROWS_EXCEPTION did not complain about non-exception."); + + // But there _must_ be an exception. + failed = true; + try + { + // If the test fails to throw, this throws a failure. + PQXX_CHECK_THROWS_EXCEPTION(empty(), "(expected)"); + // So we shouldn't get to this point. + failed = false; + } + catch (pqxx::test::test_failure const &) + { + // Instead, we go straight here. + } + PQXX_CHECK( + failed, "PQXX_CHECK_THROWS_EXCEPTION did not notice missing exception."); + + // PQXX_CHECK_THROWS_EXCEPTION can test itself... + PQXX_CHECK_THROWS_EXCEPTION( + PQXX_CHECK_THROWS_EXCEPTION(empty(), "(expected)"), + "PQXX_CHECK_THROWS_EXCEPTION failed to throw for missing exception."); + + PQXX_CHECK_THROWS_EXCEPTION( + PQXX_CHECK_THROWS_EXCEPTION(throw 1, "(expected)"), + "PQXX_CHECK_THROWS_EXCEPTION ignored wrong exception type."); +} + + +// Test PQXX_CHECK_THROWS. +void test_check_throws() +{ + PQXX_CHECK_THROWS( + throw pqxx::test::test_failure(__FILE__, __LINE__, "(expected)"), + pqxx::test::test_failure, + "PQXX_CHECK_THROWS() failed to catch expected exception."); + + // Even if it's not std::exception-derived. + PQXX_CHECK_THROWS(throw 1, int, "(expected)"); + + // PQXX_CHECK_THROWS means there _must_ be an exception. + bool failed{true}; + try + { + // If the test fails to throw, PQXX_CHECK_THROWS throws a failure. + PQXX_CHECK_THROWS(empty(), std::runtime_error, "(expected)"); + // So we shouldn't get to this point. + failed = false; + } + catch (pqxx::test::test_failure const &) + { + // Instead, we go straight here. + } + PQXX_CHECK(failed, "PQXX_CHECK_THROWS did not notice missing exception."); + + // The exception must be of the right type (or a subclass of the right type). + failed = true; + try + { + // If the test throws the wrong type, PQXX_CHECK_THROWS throws a failure. + PQXX_CHECK_THROWS( + throw std::exception(), pqxx::test::test_failure, "(expected)"); + failed = false; + } + catch (pqxx::test::test_failure const &) + { + // Instead, we go straight here. + } + PQXX_CHECK(failed, "PQXX_CHECK_THROWS did not notice wrong exception type."); + + // PQXX_CHECK_THROWS can test itself... + PQXX_CHECK_THROWS( + PQXX_CHECK_THROWS(empty(), pqxx::test::test_failure, "(expected)"), + pqxx::test::test_failure, + "PQXX_CHECK_THROWS failed to throw for missing exception."); + + PQXX_CHECK_THROWS( + PQXX_CHECK_THROWS(throw 1, std::runtime_error, "(expected)"), + pqxx::test::test_failure, + "PQXX_CHECK_THROWS failed to throw for wrong exception type."); +} + + +void test_test_helpers() +{ + test_check_notreached(); + test_check(); + test_check_throws_exception(); + test_check_throws(); + + // Test other helpers against PQXX_CHECK_THROWS. + PQXX_CHECK_THROWS( + PQXX_CHECK_NOTREACHED("(expected)"), pqxx::test::test_failure, + "PQXX_CHECK_THROWS did not catch PQXX_CHECK_NOTREACHED."); + + PQXX_CHECK_THROWS( + PQXX_CHECK(false, "(expected)"), pqxx::test::test_failure, + "PQXX_CHECK_THROWS did not catch failing PQXX_CHECK."); + + PQXX_CHECK_THROWS( + PQXX_CHECK_THROWS( + PQXX_CHECK(true, "(shouldn't happen)"), pqxx::test::test_failure, + "(expected)"), + pqxx::test::test_failure, + "PQXX_CHECK_THROWS on successful PQXX_CHECK failed to throw."); + + // PQXX_CHECK_EQUAL tests for equality. Its arguments need not be of the + // same type, as long as equality between them is defined. + PQXX_CHECK_EQUAL(1, 1, "PQXX_CHECK_EQUAL is broken."); + PQXX_CHECK_EQUAL(1, 1L, "PQXX_CHECK_EQUAL breaks on type mismatch."); + + PQXX_CHECK_THROWS( + PQXX_CHECK_EQUAL(1, 2, "(expected)"), pqxx::test::test_failure, + "PQXX_CHECK_EQUAL fails to spot inequality."); + + // PQXX_CHECK_NOT_EQUAL is like PQXX_CHECK_EQUAL, but tests for inequality. + PQXX_CHECK_NOT_EQUAL(1, 2, "PQXX_CHECK_NOT_EQUAL is broken."); + PQXX_CHECK_THROWS( + PQXX_CHECK_NOT_EQUAL(1, 1, "(expected)"), pqxx::test::test_failure, + "PQXX_CHECK_NOT_EQUAL fails to fail when arguments are equal."); + PQXX_CHECK_THROWS( + PQXX_CHECK_NOT_EQUAL(1, 1L, "(expected)"), pqxx::test::test_failure, + "PQXX_CHECK_NOT_EQUAL breaks on type mismatch."); + + // PQXX_CHECK_BOUNDS checks a value against a range. + PQXX_CHECK_BOUNDS(2, 1, 3, "PQXX_CHECK_BOUNDS wrongly finds fault."); + + PQXX_CHECK_THROWS( + PQXX_CHECK_BOUNDS(1, 2, 3, "(Expected)"), pqxx::test::test_failure, + "PQXX_CHECK_BOUNDS did not detect value below permitted range."); + + // PQXX_CHECK_BOUNDS tests against a half-open interval. + PQXX_CHECK_BOUNDS(1, 1, 3, "PQXX_CHECK_BOUNDS goes wrong on lower bound."); + PQXX_CHECK_THROWS( + PQXX_CHECK_BOUNDS(3, 1, 3, "(Expected)"), pqxx::test::test_failure, + "PQXX_CHECK_BOUNDS interval is not half-open."); + + // PQXX_CHECK_BOUNDS deals well with empty intervals. + PQXX_CHECK_THROWS( + PQXX_CHECK_BOUNDS(1, 2, 1, "(Expected)"), pqxx::test::test_failure, + "PQXX_CHECK_BOUNDS did not detect empty interval."); +} + + +PQXX_REGISTER_TEST(test_test_helpers); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_thread_safety_model.cxx b/ext/libpqxx-7.7.3/test/unit/test_thread_safety_model.cxx new file mode 100644 index 000000000..cf7627cb3 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_thread_safety_model.cxx @@ -0,0 +1,23 @@ +#include "../test_helpers.hxx" + +#include + +namespace +{ +void test_thread_safety_model() +{ + auto const model{pqxx::describe_thread_safety()}; + + if (model.safe_libpq and model.safe_kerberos) + PQXX_CHECK_EQUAL( + model.description, "", + "Thread-safety looks okay but model description is nonempty."); + else + PQXX_CHECK_NOT_EQUAL( + model.description, "", + "Thread-safety model is imperfect but lacks description."); +} + + +PQXX_REGISTER_TEST(test_thread_safety_model); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_time.cxx b/ext/libpqxx-7.7.3/test/unit/test_time.cxx new file mode 100644 index 000000000..2fb79d472 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_time.cxx @@ -0,0 +1,86 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +#if defined(PQXX_HAVE_YEAR_MONTH_DAY) +using namespace std::literals; + + +void test_date_string_conversion() +{ + pqxx::connection conn; + pqxx::work tx{conn}; + std::tuple const conversions[]{ + {-542, 1, 1, "0543-01-01 BC"sv}, + {-1, 2, 3, "0002-02-03 BC"sv}, + {0, 9, 14, "0001-09-14 BC"sv}, + {1, 12, 8, "0001-12-08"sv}, + {2021, 10, 24, "2021-10-24"sv}, + {10191, 8, 30, "10191-08-30"sv}, + {-4712, 1, 1, "4713-01-01 BC"sv}, + {32767, 12, 31, "32767-12-31"sv}, + {2000, 2, 29, "2000-02-29"sv}, + {2004, 2, 29, "2004-02-29"sv}, + // This one won't work in postgres, but we can test the conversions. + {-32767, 11, 3, "32768-11-03 BC"sv}, + }; + for (auto const &[y, m, d, text] : conversions) + { + std::chrono::year_month_day const date{ + std::chrono::year{y}, std::chrono::month{m}, std::chrono::day{d}}; + PQXX_CHECK_EQUAL( + pqxx::to_string(date), text, "Date did not convert right."); + PQXX_CHECK_EQUAL( + pqxx::from_string(text), date, + "Date did not parse right."); + if (int{date.year()} > -4712) + { + // We can't test this for years before 4713 BC (4712 BCE), because + // postgres doesn't handle earlier years. + PQXX_CHECK_EQUAL( + tx.query_value( + "SELECT '" + pqxx::to_string(date) + "'::date"), + text, "Backend interpreted date differently."); + } + } + + std::string_view const invalid[]{ + ""sv, + "yesterday"sv, + "1981-01"sv, + "2010"sv, + "2010-8-9"sv, + "1900-02-29"sv, + "2021-02-29"sv, + "2000-11-29-3"sv, + "1900-02-29"sv, + "2003-02-29"sv, + "12-12-12"sv, + "0000-09-16"sv, + "-01-01"sv, + "-1000-01-01"sv, + "1000-00-01"sv, + "1000-01-00"sv, + "2001y-01-01"sv, + "10-09-08"sv, + "0-01-01"sv, + "0000-01-01"sv, + "2021-13-01"sv, + "2021-+02-01"sv, + "2021-12-32"sv, + }; + for (auto const text : invalid) + PQXX_CHECK_THROWS( + pqxx::ignore_unused( + pqxx::from_string(text)), + pqxx::conversion_error, + pqxx::internal::concat("Invalid date '", text, "' parsed as if valid.")); +} + + +PQXX_REGISTER_TEST(test_date_string_conversion); +#endif // PQXX_HAVE_YEAR_MONTH_DAY +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_transaction.cxx b/ext/libpqxx-7.7.3/test/unit/test_transaction.cxx new file mode 100644 index 000000000..2ae016a26 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_transaction.cxx @@ -0,0 +1,113 @@ +#include +#include + +#include "../test_helpers.hxx" + + +namespace +{ +void test_nontransaction_continues_after_error() +{ + pqxx::connection c; + pqxx::nontransaction tx{c}; + + PQXX_CHECK_EQUAL( + tx.query_value("SELECT 9"), 9, "Simple query went wrong."); + PQXX_CHECK_THROWS( + tx.exec("SELECT 1/0"), pqxx::sql_error, "Expected error did not happen."); + + PQXX_CHECK_EQUAL( + tx.query_value("SELECT 5"), 5, "Wrong result after error."); +} + + +std::string const table{"pqxx_test_transaction"}; + + +void delete_temp_table(pqxx::transaction_base &tx) +{ + tx.exec0(std::string{"DROP TABLE IF EXISTS "} + table); +} + + +void create_temp_table(pqxx::transaction_base &tx) +{ + tx.exec0("CREATE TEMP TABLE " + table + " (x integer)"); +} + + +void insert_temp_table(pqxx::transaction_base &tx, int value) +{ + tx.exec0( + "INSERT INTO " + table + " (x) VALUES (" + pqxx::to_string(value) + ")"); +} + +int count_temp_table(pqxx::transaction_base &tx) +{ + return tx.query_value("SELECT count(*) FROM " + table); +} + + +void test_nontransaction_autocommits() +{ + pqxx::connection c; + + pqxx::nontransaction tx1{c}; + delete_temp_table(tx1); + create_temp_table(tx1); + tx1.commit(); + + pqxx::nontransaction tx2{c}; + insert_temp_table(tx2, 4); + tx2.abort(); + + pqxx::nontransaction tx3{c}; + PQXX_CHECK_EQUAL( + count_temp_table(tx3), 1, + "Did not keep effect of aborted nontransaction."); + delete_temp_table(tx3); +} + + +template void test_double_close() +{ + pqxx::connection c; + + TX tx1{c}; + tx1.exec1("SELECT 1"); + tx1.commit(); + tx1.commit(); + + TX tx2{c}; + tx2.exec1("SELECT 2"); + tx2.abort(); + tx2.abort(); + + TX tx3{c}; + tx3.exec1("SELECT 3"); + tx3.commit(); + PQXX_CHECK_THROWS( + tx3.abort(), pqxx::usage_error, "Abort after commit not caught."); + ; + + TX tx4{c}; + tx4.exec1("SELECT 4"); + tx4.abort(); + PQXX_CHECK_THROWS( + tx4.commit(), pqxx::usage_error, "Commit after abort not caught."); +} + + +void test_transaction() +{ + test_nontransaction_continues_after_error(); + test_nontransaction_autocommits(); + test_double_close>(); + test_double_close(); + test_double_close(); + test_double_close>(); +} + + +PQXX_REGISTER_TEST(test_transaction); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_transaction_base.cxx b/ext/libpqxx-7.7.3/test/unit/test_transaction_base.cxx new file mode 100644 index 000000000..bea15b190 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_transaction_base.cxx @@ -0,0 +1,106 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_exec0(pqxx::transaction_base &trans) +{ + pqxx::result E{trans.exec0("SELECT * FROM pg_tables WHERE 0 = 1")}; + PQXX_CHECK(std::empty(E), "Nonempty result from exec0."); + + PQXX_CHECK_THROWS( + trans.exec0("SELECT 99"), pqxx::unexpected_rows, + "Nonempty exec0 result did not throw unexpected_rows."); +} + + +void test_exec1(pqxx::transaction_base &trans) +{ + pqxx::row R{trans.exec1("SELECT 99")}; + PQXX_CHECK_EQUAL(std::size(R), 1, "Wrong size result from exec1."); + PQXX_CHECK_EQUAL(R.front().as(), 99, "Wrong result from exec1."); + + PQXX_CHECK_THROWS( + trans.exec1("SELECT * FROM pg_tables WHERE 0 = 1"), pqxx::unexpected_rows, + "Empty exec1 result did not throw unexpected_rows."); + PQXX_CHECK_THROWS( + trans.exec1("SELECT * FROM generate_series(1, 2)"), pqxx::unexpected_rows, + "Two-row exec1 result did not throw unexpected_rows."); +} + + +void test_exec_n(pqxx::transaction_base &trans) +{ + pqxx::result R{trans.exec_n(3, "SELECT * FROM generate_series(1, 3)")}; + PQXX_CHECK_EQUAL(std::size(R), 3, "Wrong result size from exec_n."); + + PQXX_CHECK_THROWS( + trans.exec_n(2, "SELECT * FROM generate_series(1, 3)"), + pqxx::unexpected_rows, + "exec_n did not throw unexpected_rows for an undersized result."); + PQXX_CHECK_THROWS( + trans.exec_n(4, "SELECT * FROM generate_series(1, 3)"), + pqxx::unexpected_rows, + "exec_n did not throw unexpected_rows for an oversized result."); +} + + +void test_query_value(pqxx::connection &conn) +{ + pqxx::work tx{conn}; + + PQXX_CHECK_EQUAL( + tx.query_value("SELECT 84 / 2"), 42, + "Got wrong value from query_value."); + PQXX_CHECK_THROWS( + tx.query_value("SAVEPOINT dummy"), pqxx::unexpected_rows, + "Got field when none expected."); + PQXX_CHECK_THROWS( + tx.query_value("SELECT generate_series(1, 2)"), pqxx::unexpected_rows, + "Failed to fail for multiple rows."); + PQXX_CHECK_THROWS( + tx.query_value("SELECT 1, 2"), pqxx::usage_error, + "No error for too many fields."); + PQXX_CHECK_THROWS( + tx.query_value("SELECT 3.141"), pqxx::conversion_error, + "Got int field from float string."); +} + + +void test_transaction_base() +{ + pqxx::connection conn; + { + pqxx::work tx{conn}; + test_exec_n(tx); + test_exec0(tx); + test_exec1(tx); + } + test_query_value(conn); +} + + +void test_transaction_for_each() +{ + constexpr auto query{ + "SELECT i, concat('x', (2*i)::text) " + "FROM generate_series(1, 3) AS i " + "ORDER BY i"}; + pqxx::connection conn; + pqxx::work tx{conn}; + std::string ints; + std::string strings; + tx.for_each(query, [&ints, &strings](int i, std::string const &s) { + ints += pqxx::to_string(i) + " "; + strings += s + " "; + }); + PQXX_CHECK_EQUAL(ints, "1 2 3 ", "Unexpected int sequence."); + PQXX_CHECK_EQUAL(strings, "x2 x4 x6 ", "Unexpected string sequence."); +} + + +PQXX_REGISTER_TEST(test_transaction_base); +PQXX_REGISTER_TEST(test_transaction_for_each); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_transaction_focus.cxx b/ext/libpqxx-7.7.3/test/unit/test_transaction_focus.cxx new file mode 100644 index 000000000..48fdfdd3f --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_transaction_focus.cxx @@ -0,0 +1,53 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +auto make_focus(pqxx::dbtransaction &tx) +{ + return pqxx::stream_from::query(tx, "SELECT * from generate_series(1, 10)"); +} + + +void test_cannot_run_statement_during_focus() +{ + pqxx::connection conn; + pqxx::transaction tx{conn}; + tx.exec("SELECT 1"); + auto focus{make_focus(tx)}; + PQXX_CHECK_THROWS( + tx.exec("SELECT 1"), pqxx::usage_error, + "Command during focus did not throw expected error."); +} + + +void test_cannot_run_prepared_statement_during_focus() +{ + pqxx::connection conn; + conn.prepare("foo", "SELECT 1"); + pqxx::transaction tx{conn}; + tx.exec_prepared("foo"); + auto focus{make_focus(tx)}; + PQXX_CHECK_THROWS( + tx.exec_prepared("foo"), pqxx::usage_error, + "Prepared statement during focus did not throw expected error."); +} + +void test_cannot_run_params_statement_during_focus() +{ + pqxx::connection conn; + pqxx::transaction tx{conn}; + tx.exec_params("select $1", 10); + auto focus{make_focus(tx)}; + PQXX_CHECK_THROWS( + tx.exec_params("select $1", 10), pqxx::usage_error, + "Parameterized statement during focus did not throw expected error."); +} + + +PQXX_REGISTER_TEST(test_cannot_run_statement_during_focus); +PQXX_REGISTER_TEST(test_cannot_run_prepared_statement_during_focus); +PQXX_REGISTER_TEST(test_cannot_run_params_statement_during_focus); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_transactor.cxx b/ext/libpqxx-7.7.3/test/unit/test_transactor.cxx new file mode 100644 index 000000000..43034807e --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_transactor.cxx @@ -0,0 +1,130 @@ +#include +#include + +#include "../test_helpers.hxx" + +namespace +{ +void test_transactor_newstyle_executes_simple_query() +{ + pqxx::connection conn; + auto const r{pqxx::perform([&conn] { + return pqxx::work{conn}.exec("SELECT generate_series(1, 4)"); + })}; + + PQXX_CHECK_EQUAL(std::size(r), 4, "Unexpected result size."); + PQXX_CHECK_EQUAL(r.columns(), 1, "Unexpected number of columns."); + PQXX_CHECK_EQUAL(r[0][0].as(), 1, "Unexpected first row."); + PQXX_CHECK_EQUAL(r[3][0].as(), 4, "Unexpected last row."); +} + + +void test_transactor_newstyle_can_return_void() +{ + bool done{false}; + pqxx::perform([&done]() noexcept { done = true; }); + PQXX_CHECK(done, "Callback was not executed."); +} + + +void test_transactor_newstyle_completes_upon_success() +{ + int attempts{0}; + pqxx::perform([&attempts]() noexcept { attempts++; }); + PQXX_CHECK_EQUAL(attempts, 1, "Successful transactor didn't run 1 time."); +} + + +void test_transactor_newstyle_retries_broken_connection() +{ + int counter{0}; + auto const &callback{[&counter] { + ++counter; + if (counter == 1) + throw pqxx::broken_connection(); + return counter; + }}; + + int const result{pqxx::perform(callback)}; + PQXX_CHECK_EQUAL(result, 2, "Transactor run returned wrong result."); + PQXX_CHECK_EQUAL(counter, result, "Number of retries does not match."); +} + + +void test_transactor_newstyle_retries_rollback() +{ + int counter{0}; + auto const &callback{[&counter] { + ++counter; + if (counter == 1) + throw pqxx::transaction_rollback("Simulated error"); + return counter; + }}; + + int const result{pqxx::perform(callback)}; + PQXX_CHECK_EQUAL(result, 2, "Transactor run returned wrong result."); + PQXX_CHECK_EQUAL(counter, result, "Number of retries does not match."); +} + + +void test_transactor_newstyle_does_not_retry_in_doubt_error() +{ + int counter{0}; + auto const &callback{[&counter] { + ++counter; + throw pqxx::in_doubt_error("Simulated error"); + }}; + + PQXX_CHECK_THROWS( + pqxx::perform(callback), pqxx::in_doubt_error, + "Transactor did not propagate in_doubt_error."); + PQXX_CHECK_EQUAL(counter, 1, "Transactor retried after in_doubt_error."); +} + + +void test_transactor_newstyle_does_not_retry_other_error() +{ + int counter{0}; + auto const &callback{[&counter] { + ++counter; + throw std::runtime_error("Simulated error"); + }}; + + PQXX_CHECK_THROWS( + pqxx::perform(callback), std::runtime_error, + "Transactor did not propagate std exception."); + PQXX_CHECK_EQUAL(counter, 1, "Transactor retried after std exception."); +} + + +void test_transactor_newstyle_repeats_up_to_given_number_of_attempts() +{ + int const attempts{5}; + int counter{0}; + auto const &callback{[&counter] { + ++counter; + throw pqxx::transaction_rollback("Simulated error"); + }}; + + PQXX_CHECK_THROWS( + pqxx::perform(callback, attempts), pqxx::transaction_rollback, + "Not propagating original exception."); + PQXX_CHECK_EQUAL(counter, attempts, "Number of retries does not match."); +} + + +void test_transactor() +{ + test_transactor_newstyle_executes_simple_query(); + test_transactor_newstyle_can_return_void(); + test_transactor_newstyle_completes_upon_success(); + test_transactor_newstyle_retries_broken_connection(); + test_transactor_newstyle_retries_rollback(); + test_transactor_newstyle_does_not_retry_in_doubt_error(); + test_transactor_newstyle_does_not_retry_other_error(); + test_transactor_newstyle_repeats_up_to_given_number_of_attempts(); +} + + +PQXX_REGISTER_TEST(test_transactor); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_type_name.cxx b/ext/libpqxx-7.7.3/test/unit/test_type_name.cxx new file mode 100644 index 000000000..af1c93eb5 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_type_name.cxx @@ -0,0 +1,19 @@ +#include "../test_helpers.hxx" + +namespace +{ +void test_type_name() +{ + // It's hard to test in more detail, because spellings may differ. + // For instance, one compiler might call "const unsigned int*" what another + // might call "unsigned const *". And Visual Studio prefixes "class" to + // class types. + std::string const i{pqxx::type_name}; + PQXX_CHECK_LESS(std::size(i), 5u, "type_name is suspiciously long."); + PQXX_CHECK_EQUAL( + i.substr(0, 1), "i", "type_name does not start with 'i'."); +} + + +PQXX_REGISTER_TEST(test_type_name); +} // namespace diff --git a/ext/libpqxx-7.7.3/test/unit/test_zview.cxx b/ext/libpqxx-7.7.3/test/unit/test_zview.cxx new file mode 100644 index 000000000..f8ce5b9e1 --- /dev/null +++ b/ext/libpqxx-7.7.3/test/unit/test_zview.cxx @@ -0,0 +1,16 @@ +#include + +#include "../test_helpers.hxx" + + +namespace +{ +void test_zview_literal() +{ + using pqxx::operator"" _zv; + + PQXX_CHECK_EQUAL(("foo"_zv), pqxx::zview{"foo"}, "zview literal is broken."); +} + +PQXX_REGISTER_TEST(test_zview_literal); +} // namespace diff --git a/ext/libpqxx-7.7.3/tools/Makefile.am b/ext/libpqxx-7.7.3/tools/Makefile.am new file mode 100644 index 000000000..9f918cd5a --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/Makefile.am @@ -0,0 +1,20 @@ +EXTRA_DIST = \ + extract_version \ + lint \ + rmlo.cxx \ + splitconfig \ + template2mak.py \ + pqxxthreadsafety.cxx + +AM_CPPFLAGS=-I$(top_builddir)/include -I$(top_srcdir)/include ${POSTGRES_INCLUDE} +# Override automatically generated list of default includes. It contains only +# unnecessary entries, and incorrectly mentions include/pqxx directly. +DEFAULT_INCLUDES= + +noinst_PROGRAMS = rmlo pqxxthreadsafety + +rmlo_SOURCES = rmlo.cxx +rmlo_LDADD = $(top_builddir)/src/libpqxx.la ${POSTGRES_LIB} + +pqxxthreadsafety_SOURCES = pqxxthreadsafety.cxx +pqxxthreadsafety_LDADD = $(top_builddir)/src/libpqxx.la ${POSTGRES_LIB} diff --git a/ext/libpqxx-7.7.3/tools/Makefile.in b/ext/libpqxx-7.7.3/tools/Makefile.in new file mode 100644 index 000000000..4e21cb265 --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/Makefile.in @@ -0,0 +1,638 @@ +# Makefile.in generated by automake 1.16.4 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994-2021 Free Software Foundation, Inc. + +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +VPATH = @srcdir@ +am__is_gnu_make = { \ + if test -z '$(MAKELEVEL)'; then \ + false; \ + elif test -n '$(MAKE_HOST)'; then \ + true; \ + elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \ + true; \ + else \ + false; \ + fi; \ +} +am__make_running_with_option = \ + case $${target_option-} in \ + ?) ;; \ + *) echo "am__make_running_with_option: internal error: invalid" \ + "target option '$${target_option-}' specified" >&2; \ + exit 1;; \ + esac; \ + has_opt=no; \ + sane_makeflags=$$MAKEFLAGS; \ + if $(am__is_gnu_make); then \ + sane_makeflags=$$MFLAGS; \ + else \ + case $$MAKEFLAGS in \ + *\\[\ \ ]*) \ + bs=\\; \ + sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \ + | sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \ + esac; \ + fi; \ + skip_next=no; \ + strip_trailopt () \ + { \ + flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \ + }; \ + for flg in $$sane_makeflags; do \ + test $$skip_next = yes && { skip_next=no; continue; }; \ + case $$flg in \ + *=*|--*) continue;; \ + -*I) strip_trailopt 'I'; skip_next=yes;; \ + -*I?*) strip_trailopt 'I';; \ + -*O) strip_trailopt 'O'; skip_next=yes;; \ + -*O?*) strip_trailopt 'O';; \ + -*l) strip_trailopt 'l'; skip_next=yes;; \ + -*l?*) strip_trailopt 'l';; \ + -[dEDm]) skip_next=yes;; \ + -[JT]) skip_next=yes;; \ + esac; \ + case $$flg in \ + *$$target_option*) has_opt=yes; break;; \ + esac; \ + done; \ + test $$has_opt = yes +am__make_dryrun = (target_option=n; $(am__make_running_with_option)) +am__make_keepgoing = (target_option=k; $(am__make_running_with_option)) +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +noinst_PROGRAMS = rmlo$(EXEEXT) pqxxthreadsafety$(EXEEXT) +subdir = tools +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/config/m4/libtool.m4 \ + $(top_srcdir)/config/m4/ltoptions.m4 \ + $(top_srcdir)/config/m4/ltsugar.m4 \ + $(top_srcdir)/config/m4/ltversion.m4 \ + $(top_srcdir)/config/m4/lt~obsolete.m4 \ + $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON) +mkinstalldirs = $(SHELL) $(top_srcdir)/config/mkinstalldirs +CONFIG_HEADER = $(top_builddir)/include/pqxx/config.h +CONFIG_CLEAN_FILES = +CONFIG_CLEAN_VPATH_FILES = +PROGRAMS = $(noinst_PROGRAMS) +am_pqxxthreadsafety_OBJECTS = pqxxthreadsafety.$(OBJEXT) +pqxxthreadsafety_OBJECTS = $(am_pqxxthreadsafety_OBJECTS) +pqxxthreadsafety_DEPENDENCIES = $(top_builddir)/src/libpqxx.la +AM_V_lt = $(am__v_lt_@AM_V@) +am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@) +am__v_lt_0 = --silent +am__v_lt_1 = +am_rmlo_OBJECTS = rmlo.$(OBJEXT) +rmlo_OBJECTS = $(am_rmlo_OBJECTS) +rmlo_DEPENDENCIES = $(top_builddir)/src/libpqxx.la +AM_V_P = $(am__v_P_@AM_V@) +am__v_P_ = $(am__v_P_@AM_DEFAULT_V@) +am__v_P_0 = false +am__v_P_1 = : +AM_V_GEN = $(am__v_GEN_@AM_V@) +am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_@AM_V@) +am__v_at_ = $(am__v_at_@AM_DEFAULT_V@) +am__v_at_0 = @ +am__v_at_1 = +depcomp = $(SHELL) $(top_srcdir)/config/depcomp +am__maybe_remake_depfiles = depfiles +am__depfiles_remade = ./$(DEPDIR)/pqxxthreadsafety.Po \ + ./$(DEPDIR)/rmlo.Po +am__mv = mv -f +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \ + $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \ + $(AM_CXXFLAGS) $(CXXFLAGS) +AM_V_CXX = $(am__v_CXX_@AM_V@) +am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@) +am__v_CXX_0 = @echo " CXX " $@; +am__v_CXX_1 = +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \ + $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ + $(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CXXLD = $(am__v_CXXLD_@AM_V@) +am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@) +am__v_CXXLD_0 = @echo " CXXLD " $@; +am__v_CXXLD_1 = +SOURCES = $(pqxxthreadsafety_SOURCES) $(rmlo_SOURCES) +DIST_SOURCES = $(pqxxthreadsafety_SOURCES) $(rmlo_SOURCES) +am__can_run_installinfo = \ + case $$AM_UPDATE_INFO_DIR in \ + n|no|NO) false;; \ + *) (install-info --version) >/dev/null 2>&1;; \ + esac +am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) +# Read a list of newline-separated strings from the standard input, +# and print each of them once, without duplicates. Input order is +# *not* preserved. +am__uniquify_input = $(AWK) '\ + BEGIN { nonempty = 0; } \ + { items[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in items) print i; }; } \ +' +# Make sure the list of sources is unique. This is necessary because, +# e.g., the same source file might be shared among _SOURCES variables +# for different programs/libraries. +am__define_uniq_tagged_files = \ + list='$(am__tagged_files)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | $(am__uniquify_input)` +am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/config/depcomp \ + $(top_srcdir)/config/mkinstalldirs +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CSCOPE = @CSCOPE@ +CTAGS = @CTAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DOXYGEN = @DOXYGEN@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +ETAGS = @ETAGS@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +HAVE_DOT = @HAVE_DOT@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@ +MAINT = @MAINT@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR = @MKDIR@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PG_CONFIG = @PG_CONFIG@ +PKG_CONFIG = @PKG_CONFIG@ +POSTGRES_INCLUDE = @POSTGRES_INCLUDE@ +PQXXVERSION = @PQXXVERSION@ +PQXX_ABI = @PQXX_ABI@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +with_postgres_lib = @with_postgres_lib@ +EXTRA_DIST = \ + extract_version \ + lint \ + rmlo.cxx \ + splitconfig \ + template2mak.py \ + pqxxthreadsafety.cxx + +AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include ${POSTGRES_INCLUDE} +# Override automatically generated list of default includes. It contains only +# unnecessary entries, and incorrectly mentions include/pqxx directly. +DEFAULT_INCLUDES = +rmlo_SOURCES = rmlo.cxx +rmlo_LDADD = $(top_builddir)/src/libpqxx.la ${POSTGRES_LIB} +pqxxthreadsafety_SOURCES = pqxxthreadsafety.cxx +pqxxthreadsafety_LDADD = $(top_builddir)/src/libpqxx.la ${POSTGRES_LIB} +all: all-am + +.SUFFIXES: +.SUFFIXES: .cxx .lo .o .obj +$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \ + && { if test -f $@; then exit 0; else break; fi; }; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu tools/Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --gnu tools/Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__maybe_remake_depfiles);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh + +$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) + cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh +$(am__aclocal_m4_deps): + +clean-noinstPROGRAMS: + @list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \ + echo " rm -f" $$list; \ + rm -f $$list || exit $$?; \ + test -n "$(EXEEXT)" || exit 0; \ + list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f" $$list; \ + rm -f $$list + +pqxxthreadsafety$(EXEEXT): $(pqxxthreadsafety_OBJECTS) $(pqxxthreadsafety_DEPENDENCIES) $(EXTRA_pqxxthreadsafety_DEPENDENCIES) + @rm -f pqxxthreadsafety$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(pqxxthreadsafety_OBJECTS) $(pqxxthreadsafety_LDADD) $(LIBS) + +rmlo$(EXEEXT): $(rmlo_OBJECTS) $(rmlo_DEPENDENCIES) $(EXTRA_rmlo_DEPENDENCIES) + @rm -f rmlo$(EXEEXT) + $(AM_V_CXXLD)$(CXXLINK) $(rmlo_OBJECTS) $(rmlo_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pqxxthreadsafety.Po@am__quote@ # am--include-marker +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rmlo.Po@am__quote@ # am--include-marker + +$(am__depfiles_remade): + @$(MKDIR_P) $(@D) + @echo '# dummy' >$@-t && $(am__mv) $@-t $@ + +am--depfiles: $(am__depfiles_remade) + +.cxx.o: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ $< + +.cxx.obj: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cxx.lo: +@am__fastdepCXX_TRUE@ $(AM_V_CXX)depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ $(AM_V_CXX)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(AM_V_CXX@am__nodep@)$(LTCXXCOMPILE) -c -o $@ $< + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + +ID: $(am__tagged_files) + $(am__define_uniq_tagged_files); mkid -fID $$unique +tags: tags-am +TAGS: tags + +tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + set x; \ + here=`pwd`; \ + $(am__define_uniq_tagged_files); \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: ctags-am + +CTAGS: ctags +ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files) + $(am__define_uniq_tagged_files); \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" +cscopelist: cscopelist-am + +cscopelist-am: $(am__tagged_files) + list='$(am__tagged_files)'; \ + case "$(srcdir)" in \ + [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \ + *) sdir=$(subdir)/$(srcdir) ;; \ + esac; \ + for i in $$list; do \ + if test -f "$$i"; then \ + echo "$(subdir)/$$i"; \ + else \ + echo "$$sdir/$$i"; \ + fi; \ + done >> $(top_builddir)/cscope.files + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags +distdir: $(BUILT_SOURCES) + $(MAKE) $(AM_MAKEFLAGS) distdir-am + +distdir-am: $(DISTFILES) + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done +check-am: all-am +check: check-am +all-am: Makefile $(PROGRAMS) +installdirs: +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +clean: clean-am + +clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \ + mostlyclean-am + +distclean: distclean-am + -rm -f ./$(DEPDIR)/pqxxthreadsafety.Po + -rm -f ./$(DEPDIR)/rmlo.Po + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f ./$(DEPDIR)/pqxxthreadsafety.Po + -rm -f ./$(DEPDIR)/rmlo.Po + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: + +.MAKE: install-am install-strip + +.PHONY: CTAGS GTAGS TAGS all all-am am--depfiles check check-am clean \ + clean-generic clean-libtool clean-noinstPROGRAMS cscopelist-am \ + ctags ctags-am distclean distclean-compile distclean-generic \ + distclean-libtool distclean-tags distdir dvi dvi-am html \ + html-am info info-am install install-am install-data \ + install-data-am install-dvi install-dvi-am install-exec \ + install-exec-am install-html install-html-am install-info \ + install-info-am install-man install-pdf install-pdf-am \ + install-ps install-ps-am install-strip installcheck \ + installcheck-am installdirs maintainer-clean \ + maintainer-clean-generic mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags tags-am uninstall uninstall-am + +.PRECIOUS: Makefile + + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/ext/libpqxx-7.7.3/tools/deprecations b/ext/libpqxx-7.7.3/tools/deprecations new file mode 100755 index 000000000..f3970838b --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/deprecations @@ -0,0 +1,6 @@ +#! /bin/sh +set -eu + +MARKER='include.*ignore-deprecated-pre' +FILES="src include tools/*.cxx test config-tests" +grep -Ircl $MARKER $FILES | sort diff --git a/ext/libpqxx-7.7.3/tools/extract_version b/ext/libpqxx-7.7.3/tools/extract_version new file mode 100755 index 000000000..920ed9aad --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/extract_version @@ -0,0 +1,73 @@ +#! /bin/sh +set -eu + +ARG="${1:-}" + +# Source directory. In out-of-tree builds, Automake sets this for us. +srcdir=${srcdir:-.} + + +# Print usage information. +usage() { + cat <... + +Acceptable option values are: + -h, --help Print this message, and exit. + -a, --abi Show libpqxx ABI version; leave out revision number. + -f, --full Show full libpqxx version string (the default). + -M, --major Show major libpqxx version. + -m, --minor Show minor libpqxx version (between major and revision). +EOF +} + + +# Print "unknown argument" error. +unknown_arg() { + cat <&2 +Unknown argument: $1. +Try + + $0 --help + +for usage information. +EOF +} + + +case "$ARG" in +''|-f|--full) + # Default: Print full version. + cat $srcdir/VERSION + ;; + +-h|--help) + # Print usage information, and exit. + usage + exit + ;; + +-a|--abi) + # Print just the ABI version (major & minor). + sed -e 's/^\([^.]*\.[^.]*\)\..*/\1/' $srcdir/VERSION + ;; + +-M|--major) + # Print the major version number. + sed -e 's/^\([^.]*\)\..*/\1/' $srcdir/VERSION + ;; + +-m|--minor) + # Print the minor version number. + sed -e 's/^[^.]*\.\([^.]*\)\..*/\1/' $srcdir/VERSION + ;; + +*) + unknown_arg $ARG + exit 1 + ;; +esac diff --git a/ext/libpqxx-7.7.3/tools/format b/ext/libpqxx-7.7.3/tools/format new file mode 100755 index 000000000..2645ccdf6 --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/format @@ -0,0 +1,20 @@ +#! /bin/bash +# +# Reformat source code using clang-format. +# +# This script is not portable: as of Ubuntu 21.04, virtualenv's "activate" +# seems to rely on a non-POSIX variable, $OSTYPE. + +set -C -u -e + +# Reformat C++ files. +find -name \*.cxx -o -name \*.hxx | xargs clang-format -i + + +# Reformat CMake files. +WORKDIR=$(mktemp -d) +virtualenv -q --python=$(which python3) "$WORKDIR/venv" +. "$WORKDIR/venv/bin/activate" +pip install -q six pyaml cmake-format +(find -name CMakeLists.txt | xargs cmake-format -i) || /bin/true +rm -rf "$WORKDIR" diff --git a/ext/libpqxx-7.7.3/tools/lint b/ext/libpqxx-7.7.3/tools/lint new file mode 100755 index 000000000..7beadbb27 --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/lint @@ -0,0 +1,197 @@ +#! /bin/bash +# +# Routine sanity checks for libpqxx source tree. +# +# Optionally, set environment variable "srcdir" to the source directory. It +# defaults to the parent directory of the one where this script is. This trick +# requires bash (or a close equivalent) as the shell. + +set -eu -o pipefail + +SRCDIR="${srcdir:-$(dirname "${BASH_SOURCE[0]}")/..}" +PQXXVERSION="$(cd "$SRCDIR" && "$SRCDIR/tools/extract_version")" + +ARGS="${1:-}" + + +# Check that all source code is ASCII. +# +# I'd love to have rich Unicode, but I can live without it. But we don't want +# any surprises in contributions. +check_ascii() { + local exotics=$( + find -name \*.cxx -o -name \*.hxx | + xargs cat | + tr -d '\011-\176' | + wc -c + ) + if [ $exotics != 0 ] + then + echo >&2 "There's a non-ASCII character somewhere." + exit 1 + fi +} + + +# This version must be at the top of the NEWS file. +check_news_version() { + if ! head -n1 $SRCDIR/NEWS | grep -q "^$PQXXVERSION\$" + then + cat <&2 +Version $PQXXVERSION is not at the top of NEWS. +EOF + exit 1 + fi +} + + +# Count number of times header $1 is included from each of given input files. +# Output is lines of :, one line per file, sorted. +count_includes() { + local HEADER_NAME WS PAT + HEADER_NAME="$1" + shift + WS="[[:space:]]*" + PAT="^${WS}#${WS}include${WS}[<\"]$HEADER_NAME[>\"]" + # It's OK for the grep to fail. + (grep -c "$PAT" $* || /bin/true) | sort +} + + +# Check that any includes of $1-pre.hxx are matched by $1-post.hxx ones. +match_pre_post_headers() { + local NAME TEMPDIR PRE POST HEADERS + NAME="$1" + TEMPDIR="$(mktemp -d)" + if test -z "$TEMPDIR" + then + echo >&2 "Could not create temporary directory." + exit 1 + fi + PRE="$TEMPDIR/pre" + POST="$TEMPDIR/post" + HEADERS=$(find include/pqxx/* -type f | grep -v '\.swp$') + count_includes \ + $SRCDIR/NAME-pre.hxx $HEADERS >"$PRE" + count_includes \ + $SRCDIR/NAME-post.hxx $HEADERS >"$POST" + DIFF="$(diff "$PRE" "$POST")" || /bin/true + rm -r -- "$TEMPDIR" + if test -n "$DIFF" + then + cat <&2 +Mismatched pre/post header pairs: + +$DIFF +EOF + exit 1 + fi +} + + +# Any file that includes header-pre.hxx must also include header-post.hxx, and +# vice versa. Similar for ignore-deprecated-{pre|post}.hxx. +check_compiler_internal_headers() { + match_pre_post_headers "pqxx/internal/header" + match_pre_post_headers "pqxx/internal/ignore-deprecated" +} + + +cpplint() { + local cxxflags dialect includes + + if which clang-tidy >/dev/null + then + if [ -e compile_flags ] + then + # Pick out relevant flags, but leave out the rest. + # If we're not compiling with clang, compile_flags may contain + # options that clang-tidy doesn't recognise. + dialect="$(grep -o -- '-std=[^[:space:]]*' compile_flags || true)" + includes="$( + grep -o -- '-I[[:space:]]*[^[:space:]]*' compile_flags || + true)" + else + dialect="" + includes="" + fi + + cxxflags="$dialect $includes" + +# TODO: Please, is there any way we can parallelise this? +# TODO: I'd like cppcoreguidelines-*, but it's a tsunami of false positives. +# TODO: Some useful checks in abseil-*, but it recommends "use our library." +# TODO: Check test/, but tolerate some of the dubious stuff tests do. + clang-tidy \ + $(find $SRCDIR/src $SRCDIR/tools -name \*.cxx) \ + --checks=boost-*, \ + -- \ + -I$SRCDIR/include -Iinclude $cxxflags + fi + + # Run Facebook's "infer" static analyser, if available. + # Instructions here: https://fbinfer.com/docs/getting-started/ + if which infer >/dev/null + then + # This will work in an out-of-tree build, but either way it does + # require a successful "configure", or a cmake with the "make" + # generator. + infer capture -- make -j$(nproc) + infer run + fi +} + + +pylint() { + local PYFILES="$SRCDIR/tools/*.py $SRCDIR/tools/splitconfig" + echo "Skipping pocketlint; it's not up to date with Python3." + # if which pocketlint >/dev/null + # then + # pocketlint $PYFILES + # fi + + if which pyflakes3 >/dev/null + then + pyflakes3 $PYFILES + fi +} + + +main() { + local full="no" + for arg in $ARGS + do + case $arg in + -h|--help) + cat <&2 "Unknown argument: '$arg'" + exit 1 + ;; + esac + done + + check_ascii + pylint + check_news_version + check_compiler_internal_headers + if [ $full == "yes" ] + then + cpplint + fi +} + + +main diff --git a/ext/libpqxx-7.7.3/tools/m4esc.py b/ext/libpqxx-7.7.3/tools/m4esc.py new file mode 100755 index 000000000..355169db2 --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/m4esc.py @@ -0,0 +1,70 @@ +#! /usr/bin/env python3 + +"""M4-quote text, for use as a literal in configure.ac. + +Produces M4 "code" which evaluates to the input text. + +It's not easy to read plain text from an input file in M4, without having it +expanded as M4. Sometimes all we want is literal text! +""" +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +from argparse import ArgumentParser +from sys import ( + stdin, + stdout, + ) + + +def parse_args(): + parser = ArgumentParser(description=__doc__) + parser.add_argument( + '--open', '-a', default='[[', help="Current open-quote symbol.") + parser.add_argument( + '--close', '-b', default=']]', help="Current close-quote symbol.") + parser.add_argument( + '--input', '-i', default='-', help="Input file, or '-' for stdin.") + parser.add_argument( + '--output', '-o', default='-', help="Output file, or '-' for stdout.") + return parser.parse_args() + + +def open_input(in_file): + if in_file == '-': + return stdin + else: + return open(in_file) + + +def open_output(out_file): + if out_file == '-': + return stdout + else: + return open(out_file, 'w') + + +def escape(line): + return ( + line + .replace('[', '@<:@') + .replace(']', '@:>@') + .replace('#', '@%:@') + .replace('$', '@S|@') + ) + + +def main(args): + with open_input(args.input) as istr, open_output(args.output) as ostr: + ostr.write(args.open) + for line in istr: + ostr.write(escape(line)) + ostr.write('\n') + ostr.write(args.close) + + +if __name__ == '__main__': + main(parse_args()) diff --git a/ext/libpqxx-7.7.3/tools/pqxxthreadsafety.cxx b/ext/libpqxx-7.7.3/tools/pqxxthreadsafety.cxx new file mode 100644 index 000000000..afa8fb01f --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/pqxxthreadsafety.cxx @@ -0,0 +1,10 @@ +// Print thread-safety information for present libpqxx build. +#include + +#include "pqxx/util" + + +int main() +{ + std::cout << pqxx::describe_thread_safety().description << std::endl; +} diff --git a/ext/libpqxx-7.7.3/tools/rmlo.cxx b/ext/libpqxx-7.7.3/tools/rmlo.cxx new file mode 100644 index 000000000..5db9605f1 --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/rmlo.cxx @@ -0,0 +1,39 @@ +// Remove large objects given on the command line from the default database. +#include + +#include "pqxx/pqxx" + + +int main(int, char *argv[]) +{ + pqxx::connection conn; + bool failures = false; + + try + { + for (int i{1}; argv[i]; ++i) + { + auto o{pqxx::from_string(argv[i])}; + try + { + pqxx::perform([o, &conn] { + pqxx::work tx{conn}; + pqxx::blob::remove(tx, o); + tx.commit(); + }); + } + catch (std::exception const &e) + { + std::cerr << e.what() << std::endl; + failures = true; + } + } + } + catch (std::exception const &e) + { + std::cerr << e.what() << std::endl; + return 2; + } + + return failures; +} diff --git a/ext/libpqxx-7.7.3/tools/splitconfig b/ext/libpqxx-7.7.3/tools/splitconfig new file mode 100755 index 000000000..2f4be957f --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/splitconfig @@ -0,0 +1,244 @@ +#! /usr/bin/env python3 + +"""Extract configuration items into various configuration headers. + +This uses the configitems file, a database consisting of text lines with the +following single-tab-separated fields: + - Name of the configuration item, e.g. PQXX_HAVE_PTRDIFF_T. + - Publication marker: public or internal. + - A single environmental factor determining the item, e.g. libpq or compiler. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +from argparse import ArgumentParser +import codecs +from errno import ENOENT +import os.path +from os import getcwd +import re +from sys import ( + getdefaultencoding, + getfilesystemencoding, + stdout, + ) + +__metaclass__ = type + + +def guess_fs_encoding(): + """Try to establish the filesystem encoding. + + It's a sad thing: some guesswork is involved. The encoding often seems to + be conservatively, and incorrectly, set to ascii. + """ + candidates = [ + getfilesystemencoding(), + getdefaultencoding(), + 'utf-8', + ] + for encoding in candidates: + lower = encoding.lower() + if lower != 'ascii' and lower != 'ansi_x3.4-1968': + return encoding + raise AssertionError("unreachable code reached.") + + +def guess_output_encoding(): + """Return the encoding of standard output.""" + # Apparently builds in Docker containers may have None as an encoding. + # Fall back to ASCII. If this ever happens in a non-ASCII path, well, + # there may be a more difficult decision to be made. We'll burn that + # bridge when we get to it, as they almost say. + return stdout.encoding or 'ascii' + + +def decode_path(path): + """Decode a path element from bytes to unicode string.""" + return path.decode(guess_fs_encoding()) + + +def encode_path(path): + """Encode a path element from unicode string to bytes.""" + # Nasty detail: unicode strings are stored as UTF-16. Which can contain + # surrogate pairs. And those break in encoding, unless you use this + # special error handler. + return path.encode(guess_fs_encoding(), 'surrogateescape') + + +def read_text_file(path, encoding='utf-8'): + """Read text file, return as string, or `None` if file is not there.""" + assert isinstance(path, type('')) + try: + with codecs.open(encode_path(path), encoding=encoding) as stream: + return stream.read() + except IOError as error: + if error.errno == ENOENT: + return None + else: + raise + + +def read_lines(path, encoding='utf-8'): + """Read text file, return as list of lines.""" + assert isinstance(path, type('')) + with codecs.open(encode_path(path), encoding=encoding) as stream: + return list(stream) + + +def read_configitems(filename): + """Read the configuration-items database. + + :param filename: Path to the configitems file. + :return: Sequence of text lines from configitems file. + """ + return [line.split() for line in read_lines(filename)] + + +def map_configitems(items): + """Map each config item to publication/factor. + + :param items: Sequence of config items: (name, publication, factor). + :return: Dict mapping each item name to a tuple (publication, factor). + """ + return { + item: (publication, factor) + for item, publication, factor in items + } + + +def read_header(source_tree, filename): + """Read the original config.h generated by autoconf. + + :param source_tree: Path to libpqxx source tree. + :param filename: Path to the config.h file. + :return: Sequence of text lines from config.h. + """ + assert isinstance(source_tree, type('')) + assert isinstance(filename, type('')) + return read_lines(os.path.join(source_tree, filename)) + + +def extract_macro_name(config_line): + """Extract a cpp macro name from a configuration line. + + :param config_line: Text line from config.h which may define a macro. + :return: Name of macro defined in `config_line` if it is a `#define` + statement, or None. + """ + config_line = config_line.strip() + match = re.match('\s*#\s*define\s+([^\s]+)', config_line) + if match is None: + return None + else: + return match.group(1) + + +def extract_section(header_lines, items, publication, factor): + """Extract config items for given publication/factor from header lines. + + :param header_lines: Sequence of header lines from config.h. + :param items: Dict mapping macro names to (publication, factor). + :param publication: Extract only macros for this publication tag. + :param factor: Extract only macros for this environmental factor. + :return: Sequence of `#define` lines from `header_lines` insofar they + fall within the requested section. + """ + return sorted( + line.strip() + for line in header_lines + if items.get(extract_macro_name(line)) == (publication, factor) + ) + + +def compose_header(lines, publication, factor): + """Generate header text containing given lines.""" + intro = ( + "/* Automatically generated from config.h: %s/%s config. */" + % (publication, factor) + ) + return '\n'.join([intro, ''] + lines + ['']) + + +def generate_config(source_tree, header_lines, items, publication, factor): + """Generate config file for a given section, if appropriate. + + Writes nothing if the configuration file ends up identical to one that's + already there. + + :param source_tree: Location of the libpqxx source tree. + :param header_lines: Sequence of header lines from config.h. + :param items: Dict mapping macro names to (publication, factor). + :param publication: Extract only macros for this publication tag. + :param factor: Extract only macros for this environmental factor. + """ + assert isinstance(source_tree, type('')) + config_file = os.path.join( + source_tree, 'include', 'pqxx', + 'config-%s-%s.h' % (publication, factor)) + unicode_path = config_file.encode(guess_output_encoding(), 'replace') + section = extract_section(header_lines, items, publication, factor) + contents = compose_header(section, publication, factor) + if read_text_file(config_file) == contents: + print("Generating %s: no changes--skipping." % unicode_path) + return + + print("Generating %s: %d item(s)." % (unicode_path, len(section))) + path = encode_path(config_file) + with codecs.open(path, 'wb', encoding='ascii') as header: + header.write(contents) + + +def parse_args(): + """Parse command-line arguments.""" + default_source_tree = os.path.dirname( + os.path.dirname(os.path.normpath(os.path.abspath(__file__)))) + parser = ArgumentParser(description=__doc__) + parser.add_argument( + 'sourcetree', metavar='PATH', default=default_source_tree, + help="Location of libpqxx source tree. Defaults to '%(default)s'.") + return parser.parse_args() + + +def check_args(args): + """Validate command-line arguments.""" + if not os.path.isdir(args.sourcetree): + raise Exception("Not a directory: '%s'." % args.sourcetree) + + +def get_current_dir(): + cwd = getcwd() + if isinstance(cwd, bytes): + return decode_path(cwd) + else: + return cwd + + +def main(): + """Main program entry point.""" + args = parse_args() + check_args(args) + # The configitems file is under revision control; it's in sourcetree. + items = read_configitems(os.path.join(args.sourcetree, 'configitems')) + publications = sorted(set(item[1] for item in items)) + factors = sorted(set(item[2] for item in items)) + # The config.h header is generated; it's in the build tree, which should + # be where we are. + directory = get_current_dir() + original_header = read_header( + directory, + os.path.join('include', 'pqxx', 'config.h')) + items_map = map_configitems(items) + + for publication in publications: + for factor in factors: + generate_config( + directory, original_header, items_map, publication, factor) + + +if __name__ == '__main__': + main() diff --git a/ext/libpqxx-7.7.3/tools/template2mak.py b/ext/libpqxx-7.7.3/tools/template2mak.py new file mode 100755 index 000000000..9a5286d95 --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/template2mak.py @@ -0,0 +1,194 @@ +#! /usr/bin/env python3 +"""Minimal macro processor. Used for generating VC++ makefiles. + +The available template commands are: + + Expand a template section for each file in a list of file patterns:: + ###MAKTEMPLATE:FOREACH my/path*/*.cxx,other*.cxx + ... + ###MAKTEMPLATE:ENDFOREACH + + In the template section, you can use `###BASENAME###` to get the base name + of the file being processed (e.g. "base" for "../base.cxx"), and you can + use `###FILENAME###` to get the full filename. + + +Copyright (c) 2000-2022, Bart Samwel and Jeroen T. Vermeulen. +""" + +from __future__ import ( + absolute_import, + print_function, + unicode_literals, + ) + +from argparse import ( + ArgumentError, + ArgumentParser, + RawDescriptionHelpFormatter, + ) +from contextlib import contextmanager +from glob import glob +import os +from sys import ( + argv, + stdin, + stderr, + stdout, + ) +import sys +from textwrap import dedent + + +def expand_foreach_file(path, block, outfile): + """Expand a "foreach" block for a single file path. + + Write the results to outfile. + """ + basepath, _ = os.path.splitext(os.path.basename(path)) + for line in block: + line = line.replace("###FILENAME###", path) + line = line.replace("###BASENAME###", basepath) + outfile.write(line) + + +def match_globs(globs): + """List all files matching any item in globs. + + Eliminates duplicates. + """ + return sorted({ + path + for pattern in globs + for path in glob(pattern) + }) + + +def expand_foreach(globs, block, outfile): + """Expand a foreach block for each file matching one of globs. + + Write the results to outfile. + """ + # We'll be iterating over block a variable number of times. Turn it + # from a generic iterable into an immutable array. + block = tuple(block) + for path in match_globs(globs): + expand_foreach_file(path, block, outfile) + + +# Header to be prefixed to the generated file. +OUTPUT_HEADER = dedent("""\ + # AUTOMATICALLY GENERATED FILE -- DO NOT EDIT. + # + # This file is generated automatically by libpqxx's {script} script, and + # will be rewritten from time to time. + # + # If you modify this file, chances are your modifications will be lost. + # + # The {script} script should be available in the tools directory of the + # libpqxx source archive. + """) + + +foreach_marker = r"###MAKTEMPLATE:FOREACH " +end_foreach_marker = r"###MAKTEMPLATE:ENDFOREACH" + + +def parse_foreach(line): + """Parse FOREACH directive, if line contains one. + + :param line: One line of template input. + :return: A list of FOREACH globs, or None if this was not a FOREACH line. + """ + line = line.strip() + if line.startswith(foreach_marker): + return line[len(foreach_marker):].split(',') + else: + return None + + +def read_foreach_block(infile): + """Read a FOREACH block from infile (not including the FOREACH directive). + + Assumes that the FOREACH directive was in the preceding line. Consumes + the line with the ENDFOREACH directive, but does not yield it. + + :return: Iterable of lines. + """ + for line in infile: + if line.strip().startswith(end_foreach_marker): + return + yield line + + +def expand_template(infile, outfile): + """Expand the template in infile, and write the results to outfile.""" + for line in infile: + globs = parse_foreach(line) + if globs is None: + # Not a FOREACH line. Copy to output. + outfile.write(line) + else: + block = read_foreach_block(infile) + expand_foreach(globs, block, outfile) + + +@contextmanager +def open_stream(path=None, default=None, mode='r'): + """Open file at given path, or yield default. Close as appropriate. + + The default should be a stream, not a path; closing the context will not + close it. + """ + if path is None: + yield default + else: + with open(path, mode) as stream: + yield stream + + +def parse_args(): + """Parse command-line arguments. + + :return: Tuple of: input path (or None for stdin), output path (or None + for stdout). + """ + parser = ArgumentParser( + description=__doc__, formatter_class=RawDescriptionHelpFormatter) + + parser.add_argument( + 'template', nargs='?', + help="Input template. Defaults to standard input.") + parser.add_argument( + 'output', nargs='?', + help="Output file. Defaults to standard output.") + + args = parser.parse_args() + return args.template, args.output + + +def write_header(stream, template_path=None): + """Write header to stream.""" + hr = ('# ' + '#' * 78) + "\n" + script = os.path.basename(argv[0]) + + outstream.write(hr) + outstream.write(OUTPUT_HEADER.format(script=script)) + if template_path is not None: + outstream.write("#\n") + outstream.write("# Generated from template '%s'.\n" % template_path) + outstream.write(hr) + + +if __name__ == '__main__': + try: + template_path, output_path = parse_args() + except ArgumentError as error: + stderr.write('%s\n' % error) + sys.exit(2) + + input_stream = open_stream(template_path, stdin, 'r') + output_stream = open_stream(output_path, stdout, 'w') + with input_stream as instream, output_stream as outstream: + write_header(outstream, template_path) + expand_template(instream, outstream) diff --git a/ext/libpqxx-7.7.3/tools/test_all.py b/ext/libpqxx-7.7.3/tools/test_all.py new file mode 100755 index 000000000..29f33050c --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/test_all.py @@ -0,0 +1,630 @@ +#! /usr/bin/env python3 +"""Brute-force test script: test libpqxx against many compilers etc. + +This script makes no changes in the source tree; all builds happen in +temporary directories. + +To make this possible, you may need to run "make distclean" in the +source tree. The configure script will refuse to configure otherwise. +""" + +# Without this, pocketlint does not yet understand the print function. +from __future__ import print_function + +from abc import ( + ABCMeta, + abstractmethod, + ) +from argparse import ArgumentParser +from contextlib import contextmanager +from datetime import datetime +from functools import partial +import json +from multiprocessing import ( + JoinableQueue, + Process, + Queue, + ) +from multiprocessing.pool import ( + Pool, + ) +from os import ( + cpu_count, + getcwd, + ) +import os.path +from queue import Empty +from shutil import rmtree +from subprocess import ( + CalledProcessError, + check_call, + check_output, + DEVNULL, + ) +from sys import ( + stderr, + stdout, + ) +from tempfile import mkdtemp +from textwrap import dedent + + +CPUS = cpu_count() + +GCC_VERSIONS = list(range(8, 14)) +GCC = ['g++-%d' % ver for ver in GCC_VERSIONS] +CLANG_VERSIONS = list(range(7, 15)) +CLANG = ['clang++-6.0'] + ['clang++-%d' % ver for ver in CLANG_VERSIONS] +CXX = GCC + CLANG + +STDLIB = ( + '', + '-stdlib=libc++', + ) + +OPT = ('-O0', '-O3') + +LINK = { + 'static': ['--enable-static', '--disable-shared'], + 'dynamic': ['--disable-static', '--enable-shared'], +} + +DEBUG = { + 'plain': [], + 'audit': ['--enable-audit'], + 'maintainer': ['--enable-maintainer-mode'], + 'full': ['--enable-audit', '--enable-maintainer-mode'], +} + + +# CMake "generators." Maps a value for cmake's -G option to a command line to +# run. +# +# I prefer Ninja if available, because it's fast. But hey, the default will +# work. +# +# Maps the name of the generator (as used with cmake's -G option) to the +# actual command line needed to do the build. +CMAKE_GENERATORS = { + 'Ninja': ['ninja'], + 'Unix Makefiles': ['make', '-j%d' % CPUS], +} + + +class Fail(Exception): + """A known, well-handled exception. Doesn't need a traceback.""" + + +class Skip(Exception): + """"We're not doing this build. It's not an error though.""" + + +def run(cmd, output, cwd=None): + """Run a command, write output to file-like object.""" + command_line = ' '.join(cmd) + output.write("%s\n\n" % command_line) + check_call(cmd, stdout=output, stderr=output, cwd=cwd) + + +def report(output, message): + """Report a message to output, and standard output.""" + print(message, flush=True) + output.write('\n\n') + output.write(message) + output.write('\n') + + +def file_contains(path, text): + """Does the file at path contain text?""" + with open(path) as stream: + for line in stream: + if text in line: + return True + return False + + +@contextmanager +def tmp_dir(): + """Create a temporary directory, and clean it up again.""" + tmp = mkdtemp() + try: + yield tmp + finally: + rmtree(tmp) + + +def write_check_code(work_dir): + """Write a simple C++ program so we can tesst whether we can compile it. + + Returns the file's full path. + """ + path = os.path.join(work_dir, "check.cxx") + with open(path, 'w') as source: + source.write(dedent("""\ + #include + int main() + { + std::cout << "Hello world." << std::endl; + } + """)) + + return path + + +def check_compiler(work_dir, cxx, stdlib, check, verbose=False): + """Is the given compiler combo available?""" + err_file = os.path.join(work_dir, 'stderr.log') + if verbose: + err_output = open(err_file, 'w') + else: + err_output = DEVNULL + try: + command = [cxx, check] + if stdlib != '': + command.append(stdlib) + check_call(command, cwd=work_dir, stderr=err_output) + except (OSError, CalledProcessError): + if verbose: + with open(err_file) as errors: + stdout.write(errors.read()) + print("Can't build with '%s %s'. Skipping." % (cxx, stdlib)) + return False + else: + return True + + +# TODO: Use Pool. +def check_compilers(compilers, stdlibs, verbose=False): + """Check which compiler configurations are viable.""" + with tmp_dir() as work_dir: + check = write_check_code(work_dir) + return [ + (cxx, stdlib) + for stdlib in stdlibs + for cxx in compilers + if check_compiler( + work_dir, cxx, stdlib, check=check, verbose=verbose) + ] + + +def find_cmake_command(): + """Figure out a CMake generator we can use, or None.""" + try: + caps = check_output(['cmake', '-E', 'capabilities']) + except FileNotFoundError: + return None + + names = {generator['name'] for generator in json.loads(caps)['generators']} + for gen in CMAKE_GENERATORS.keys(): + if gen in names: + return gen + return None + + +class Config: + """Configuration for a build. + + These classes must be suitable for pickling, so we can send its objects to + worker processes. + """ + __metaclass__ = ABCMeta + + @abstractmethod + def name(self): + """Return an identifier for this build configuration.""" + + def make_log_name(self): + """Compose log file name for this build.""" + return "build-%s.out" % self.name() + + +class Build: + """A pending or ondoing build, in its own directory. + + Each step returns True for Success, or False for failure. + + These classes must be suitable for pickling, so we can send its objects to + worker processes. + """ + def __init__(self, logs_dir, config=None): + self.config = config + self.log = os.path.join(logs_dir, config.make_log_name()) + # Start a fresh log file. + with open(self.log, 'w') as log: + log.write("Starting %s.\n" % datetime.utcnow()) + self.work_dir = mkdtemp() + + def clean_up(self): + """Delete the build tree.""" + rmtree(self.work_dir) + + @abstractmethod + def configure(self, log): + """Prepare for a build.""" + + @abstractmethod + def build(self, log): + """Build the code, including the tests. Don't run tests though.""" + + def test(self, log): + """Run tests.""" + run( + [os.path.join(os.path.curdir, 'test', 'runner')], log, + cwd=self.work_dir) + + def logging(self, function): + """Call function, pass open write handle for `self.log`.""" +# TODO: Should probably be a decorator. + with open(self.log, 'a') as log: + try: + function(log) + except Exception as error: + log.write("%s\n" % error) + raise + + def do_configure(self): + """Call `configure`, writing output to `self.log`.""" + self.logging(self.configure) + + def do_build(self): + """Call `build`, writing output to `self.log`.""" + self.logging(self.build) + + def do_test(self): + """Call `test`, writing output to `self.log`.""" + self.logging(self.test) + + +class AutotoolsConfig(Config): + """A combination of build options for the "configure" script.""" + def __init__(self, cxx, opt, stdlib, link, link_opts, debug, debug_opts): + self.cxx = cxx + self.opt = opt + self.stdlib = stdlib + self.link = link + self.link_opts = link_opts + self.debug = debug + self.debug_opts = debug_opts + + def name(self): + return '_'.join([ + self.cxx, self.opt, self.stdlib, self.link, self.debug]) + + +class AutotoolsBuild(Build): + """Build using the "configure" script.""" + __metaclass__ = ABCMeta + + def configure(self, log): + configure = [ + os.path.join(getcwd(), "configure"), + "CXX=%s" % self.config.cxx, + ] + + if self.config.stdlib == '': + configure += [ + "CXXFLAGS=%s" % self.config.opt, + ] + else: + configure += [ + "CXXFLAGS=%s %s" % (self.config.opt, self.config.stdlib), + "LDFLAGS=%s" % self.config.stdlib, + ] + + configure += [ + "--disable-documentation", + ] + self.config.link_opts + self.config.debug_opts + + run(configure, log, cwd=self.work_dir) + + def build(self, log): + run(['make', '-j%d' % CPUS], log, cwd=self.work_dir) + # Passing "TESTS=" like this will suppress the actual running of + # the tests. We run them in the "test" stage. + run(['make', '-j%d' % CPUS, 'check', 'TESTS='], log, cwd=self.work_dir) + + +class CMakeConfig(Config): + """Configuration for a CMake build.""" + def __init__(self, generator): + self.generator = generator + self.builder = CMAKE_GENERATORS[generator] + + def name(self): + return "cmake" + + +class CMakeBuild(Build): + """Build using CMake. + + Ignores the config for now. + """ + __metaclass__ = ABCMeta + + def configure(self, log): + source_dir = getcwd() + generator = self.config.generator + run( + ['cmake', '-G', generator, source_dir], output=log, + cwd=self.work_dir) + + def build(self, log): + run(self.config.builder, log, cwd=self.work_dir) + + +def parse_args(): + """Parse command-line arguments.""" + parser = ArgumentParser(description=__doc__) + parser.add_argument('--verbose', '-v', action='store_true') + parser.add_argument( + '--compilers', '-c', default=','.join(CXX), + help="Compilers, separated by commas. Default is %(default)s.") + parser.add_argument( + '--optimize', '-O', default=','.join(OPT), + help=( + "Alternative optimisation options, separated by commas. " + "Default is %(default)s.")) + parser.add_argument( + '--stdlibs', '-L', default=','.join(STDLIB), + help=( + "Comma-separated options for choosing standard library. " + "Defaults to %(default)s.")) + parser.add_argument( + '--logs', '-l', default='.', metavar='DIRECTORY', + help="Write build logs to DIRECTORY.") + parser.add_argument( + '--jobs', '-j', default=CPUS, metavar='CPUS', + help=( + "When running 'make', run up to CPUS concurrent processes. " + "Defaults to %(default)s.")) + parser.add_argument( + '--minimal', '-m', action='store_true', + help="Make it as short a run as possible. For testing this script.") + return parser.parse_args() + + +def soft_get(queue, block=True): + """Get an item off `queue`, or `None` if the queue is empty.""" + try: + return queue.get(block) + except Empty: + return None + + +def read_queue(queue, block=True): + """Read entries off `queue`, terminating when it gets a `None`. + + Also terminates when the queue is empty. + """ + entry = soft_get(queue, block) + while entry is not None: + yield entry + entry = soft_get(queue, block) + + +def service_builds(in_queue, fail_queue, out_queue): + """Worker process for "build" stage: process one job at a time. + + Sends successful builds to `out_queue`, and failed builds to `fail_queue`. + + Terminates when it receives a `None`, at which point it will send a `None` + into `out_queue` in turn. + """ + for build in read_queue(in_queue): + try: + build.do_build() + except Exception as error: + fail_queue.put((build, "%s" % error)) + else: + out_queue.put(build) + in_queue.task_done() + + # Mark the end of the queue. + out_queue.put(None) + + +def service_tests(in_queue, fail_queue, out_queue): + """Worker process for "test" stage: test one build at a time. + + Sends successful builds to `out_queue`, and failed builds to `fail_queue`. + + Terminates when it receives a final `None`. Does not send out a final + `None` of its own. + """ + for build in read_queue(in_queue): + try: + build.do_test() + except Exception as error: + fail_queue.put((build, "%s" % error)) + else: + out_queue.put(build) + in_queue.task_done() + + +def report_failures(queue, message): + """Report failures from a failure queue. Return total number.""" + failures = 0 + for build, error in read_queue(queue, block=False): + print("%s: %s - %s" % (message, build.config.name(), error)) + failures += 1 + return failures + + +def count_entries(queue): + """Get and discard all entries from `queue`, return the total count.""" + total = 0 + for _ in read_queue(queue, block=False): + total += 1 + return total + + +def gather_builds(args): + """Produce the list of builds we want to perform.""" + if args.verbose: + print("\nChecking available compilers.") + + compiler_candidates = args.compilers.split(',') + compilers = check_compilers( + compiler_candidates, args.stdlibs.split(','), + verbose=args.verbose) + if list(compilers) == []: + raise Fail( + "Did not find any viable compilers. Tried: %s." + % ', '.join(compiler_candidates)) + + opt_levels = args.optimize.split(',') + link_types = LINK.items() + debug_mixes = DEBUG.items() + + if args.minimal: + compilers = compilers[:1] + opt_levels = opt_levels[:1] + link_types = list(link_types)[:1] + debug_mixes = list(debug_mixes)[:1] + + builds = [ + AutotoolsBuild( + args.logs, + AutotoolsConfig( + opt=opt, link=link, link_opts=link_opts, debug=debug, + debug_opts=debug_opts, cxx=cxx, stdlib=stdlib)) + for opt in sorted(opt_levels) + for link, link_opts in sorted(link_types) + for debug, debug_opts in sorted(debug_mixes) + for cxx, stdlib in compilers + ] + + cmake = find_cmake_command() + if cmake is not None: + builds.append(CMakeBuild(args.logs, CMakeConfig(cmake))) + return builds + + +def enqueue(queue, build, *args): + """Put `build` on `queue`. + + Ignores additional arguments, so that it can be used as a clalback for + `Pool`. + + We do this instead of a lambda in order to get the closure right. We want + the build for the current iteration, not the last one that was executed + before the lambda runs. + """ + queue.put(build) + + +def enqueue_error(queue, build, error): + """Put the pair of `build` and `error` on `queue`.""" + queue.put((build, error)) + + +def main(args): + """Do it all.""" + if not os.path.isdir(args.logs): + raise Fail("Logs location '%s' is not a directory." % args.logs) + + builds = gather_builds(args) + if args.verbose: + print("Lined up %d builds." % len(builds)) + + # The "configure" step is single-threaded. We can run many at the same + # time, even when we're also running a "build" step at the same time. + # This means we may run a lot more processes than we have CPUs, but there's + # no law against that. There's also I/O time to be covered. + configure_pool = Pool() + + # Builds which have failed the "configure" stage, with their errors. This + # queue must never stall, so that we can let results pile up here while the + # work continues. + configure_fails = Queue(len(builds)) + + # Waiting list for the "build" stage. It contains Build objects, + # terminated by a final None to signify that there are no more builds to be + # done. + build_queue = JoinableQueue(10) + + # Builds that have failed the "build" stage. + build_fails = Queue(len(builds)) + + # Waiting list for the "test" stage. It contains Build objects, terminated + # by a final None. + test_queue = JoinableQueue(10) + + # The "build" step tries to utilise all CPUs, and it may use a fair bit of + # memory. Run only one of these at a time, in a single worker process. + build_worker = Process( + target=service_builds, args=(build_queue, build_fails, test_queue)) + build_worker.start() + + # Builds that have failed the "test" stage. + test_fails = Queue(len(builds)) + + # Completed builds. This must never stall. + done_queue = JoinableQueue(len(builds)) + + # The "test" step can not run concurrently (yet). So, run tests serially + # in a single worker process. It takes its jobs directly from the "build" + # worker. + test_worker = Process( + target=service_tests, args=(test_queue, test_fails, done_queue)) + test_worker.start() + + # Feed all builds into the "configure" pool. Each build which passes this + # stage goes into the "build" queue. + for build in builds: + configure_pool.apply_async( + build.do_configure, callback=partial(enqueue, build_queue, build), + error_callback=partial(enqueue_error, configure_fails, build)) + if args.verbose: + print("All jobs are underway.") + configure_pool.close() + configure_pool.join() + +# TODO: Async reporting for faster feedback. + configure_fail_count = report_failures(configure_fails, "CONFIGURE FAIL") + if args.verbose: + print("Configure stage done.") + + # Mark the end of the build queue for the build worker. + build_queue.put(None) + + build_worker.join() +# TODO: Async reporting for faster feedback. + build_fail_count = report_failures(build_fails, "BUILD FAIL") + if args.verbose: + print("Build step done.") + + # Mark the end of the test queue for the test worker. + test_queue.put(None) + + test_worker.join() +# TODO: Async reporting for faster feedback. +# TODO: Collate failures into meaningful output, e.g. "shared library fails." + test_fail_count = report_failures(test_fails, "TEST FAIL") + if args.verbose: + print("Test step done.") + + # All done. Clean up. + for build in builds: + build.clean_up() + + ok_count = count_entries(done_queue) + if ok_count == len(builds): + print("All tests OK.") + else: + print( + "Failures during configure: %d - build: %d - test: %d. OK: %d." + % ( + configure_fail_count, + build_fail_count, + test_fail_count, + ok_count, + )) + + +if __name__ == '__main__': + try: + exit(main(parse_args())) + except Fail as failure: + stderr.write("%s\n" % failure) + exit(2) diff --git a/ext/libpqxx-7.7.3/tools/todo b/ext/libpqxx-7.7.3/tools/todo new file mode 100755 index 000000000..87429f4b2 --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/todo @@ -0,0 +1,31 @@ +#! /bin/bash +# +# List "TODO" and "XXX" items in the given files, or throughout the source +# code. + +set -e -u -o pipefail + +# TODO: Make location-independent? +find_source() { + echo configure.ac + find . -name \*.cxx -o -name \*.hxx | sed -e 's|^\./||' | sort + for f in $(ls tools) + do + echo tools/$f + done +} + + +FILES=${*:-$(find_source)} + + +# Search for "$1:" in files $2. +# (This function adds the colon. That way, the search statement itself won't +# show up in the search.) +search_for() { + grep $1: $2 +} + + +search_for XXX "$FILES" || true +search_for TODO "$FILES" || true diff --git a/ext/libpqxx-7.7.3/tools/update-copyright b/ext/libpqxx-7.7.3/tools/update-copyright new file mode 100755 index 000000000..9d1eb5a97 --- /dev/null +++ b/ext/libpqxx-7.7.3/tools/update-copyright @@ -0,0 +1,29 @@ +#! /bin/bash +# +# Update the libpqxx copyright strings in the current directory. +# +# Usage: update-copyright [year] +# +# Where "year" is the new copyright year. Defaults to the current year. +# +# Assumes GNU grep and GNU sed. +set -eu -o pipefail + +# The regexes are a bit awkward because they must work in both grep and sed. +# +# F'rinstance, PREFIX can't include the dash because our replacement string in +# sed would have a backreference (e.g. "\3") immediately followed by a year +# (e.g. 2022), and there's no clear boundary between the backreference number +# and the year: "\32022". +PREFIX='Copyright (c),* 2000' +YEAR='20[0-9][0-9]' +NEW_YEAR="${1:-$(date '+%Y')}" +SUFFIX=',* \(.* and \)*Jeroen T\. Vermeulen' +grep -rIl "$PREFIX-$YEAR$SUFFIX" | + xargs -r sed -i -e "s/\\($PREFIX\\)-$YEAR\\($SUFFIX\\)/\\1-$NEW_YEAR\\2/" + +# This one is so different that I'd rather keep it a special case. +sed \ + -i \ + -e "s/\\(2000\\)-$YEAR\\(,* Jeroen T\\. Vermeulen\\)/\1-$NEW_YEAR\\2/" \ + doc/conf.py diff --git a/ext/miniupnpc/minisoap.c b/ext/miniupnpc/minisoap.c index 5b8c0784c..11f4635ad 100644 --- a/ext/miniupnpc/minisoap.c +++ b/ext/miniupnpc/minisoap.c @@ -21,12 +21,14 @@ #include "minisoap.h" #ifdef _WIN32 +#undef OS_STRING #define OS_STRING "Win32" #define MINIUPNPC_VERSION_STRING "2.0" #define UPNP_VERSION_STRING "UPnP/1.1" #endif #ifdef __ANDROID__ +#undef OS_STRING #define OS_STRING "Android" #define MINIUPNPC_VERSION_STRING "2.0" #define UPNP_VERSION_STRING "UPnP/1.1" diff --git a/ext/miniupnpc/miniwget.c b/ext/miniupnpc/miniwget.c index e23f11e37..0c461734f 100644 --- a/ext/miniupnpc/miniwget.c +++ b/ext/miniupnpc/miniwget.c @@ -49,12 +49,14 @@ #endif /* MIN */ #ifdef _WIN32 +#undef OS_STRING #define OS_STRING "Win32" #define MINIUPNPC_VERSION_STRING "2.0" #define UPNP_VERSION_STRING "UPnP/1.1" #endif #ifdef __ANDROID__ +#undef OS_STRING #define OS_STRING "Android" #define MINIUPNPC_VERSION_STRING "2.0" #define UPNP_VERSION_STRING "UPnP/1.1" diff --git a/ext/json/LICENSE.MIT b/ext/nlohmann/LICENSE.MIT similarity index 100% rename from ext/json/LICENSE.MIT rename to ext/nlohmann/LICENSE.MIT diff --git a/ext/json/README.md b/ext/nlohmann/README.md similarity index 100% rename from ext/json/README.md rename to ext/nlohmann/README.md diff --git a/ext/json/json.hpp b/ext/nlohmann/json.hpp similarity index 100% rename from ext/json/json.hpp rename to ext/nlohmann/json.hpp diff --git a/ext/opentelemetry-cpp-1.21.0/.bazelignore b/ext/opentelemetry-cpp-1.21.0/.bazelignore new file mode 100644 index 000000000..56306fc4b --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.bazelignore @@ -0,0 +1,3 @@ +third_party +tools +out diff --git a/ext/opentelemetry-cpp-1.21.0/.bazelrc b/ext/opentelemetry-cpp-1.21.0/.bazelrc new file mode 100644 index 000000000..57cd0a2fa --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.bazelrc @@ -0,0 +1,41 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +# bazel configurations for running tests under sanitizers. +# Based on https://github.com/bazelment/trunk/blob/master/tools/bazel.rc + +# Enable automatic configs based on platform +common --enable_platform_specific_config + +# Make globs that don't match anything fail +common --incompatible_disallow_empty_glob + +# Needed by gRPC to build on some platforms. +build --copt -DGRPC_BAZEL_BUILD + +# Workaround abseil libraries missing symbols +build:windows --dynamic_mode=off + +# Set minimum supported C++ version +build:macos --host_cxxopt=-std=c++14 --cxxopt=-std=c++14 +build:linux --host_cxxopt=-std=c++14 --cxxopt=-std=c++14 +build:windows --host_cxxopt=/std:c++14 --cxxopt=/std:c++14 + +# --config=asan : Address Sanitizer. +common:asan --copt -DADDRESS_SANITIZER +common:asan --copt -fsanitize=address,bool,float-cast-overflow,integer-divide-by-zero,null,return,returns-nonnull-attribute,shift-exponent,signed-integer-overflow,unreachable,vla-bound +common:asan --copt -fsanitize-address-use-after-scope +common:asan --copt -fno-sanitize-recover=all +common:asan --linkopt -fsanitize=address,bool,float-cast-overflow,integer-divide-by-zero,null,return,returns-nonnull-attribute,shift-exponent,signed-integer-overflow,unreachable,vla-bound +common:asan --linkopt -fsanitize-address-use-after-scope +common:asan --linkopt -fno-sanitize-recover=all +common:asan --cc_output_directory_tag=asan + +# --config=tsan : Thread Sanitizer. +common:tsan --copt -fsanitize=thread +common:tsan --copt -DTHREAD_SANITIZER +common:tsan --linkopt -fsanitize=thread +common:tsan --cc_output_directory_tag=tsan +# This is needed to address false positive problem with abseil.The same setting as gRPC +# https://github.com/google/sanitizers/issues/953 +common:tsan --test_env=TSAN_OPTIONS=report_atomic_races=0 diff --git a/ext/opentelemetry-cpp-1.21.0/.bazelversion b/ext/opentelemetry-cpp-1.21.0/.bazelversion new file mode 100644 index 000000000..21c8c7b46 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.bazelversion @@ -0,0 +1 @@ +7.1.1 diff --git a/ext/opentelemetry-cpp-1.21.0/.clang-format b/ext/opentelemetry-cpp-1.21.0/.clang-format new file mode 100644 index 000000000..1b5d0d488 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.clang-format @@ -0,0 +1,73 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +# See Clang docs: http://clang.llvm.org/docs/ClangFormatStyleOptions.html +BasedOnStyle: Chromium + +# Allow double brackets such as std::vector>. +Standard: Cpp11 + +# Indent 2 spaces at a time. +IndentWidth: 2 + +# Keep lines under 100 columns long. +ColumnLimit: 100 + +# Always break before braces +BreakBeforeBraces: Custom +BraceWrapping: +# TODO(lujc) wait for clang-format-9 support in Chromium tools +# AfterCaseLabel: true + AfterClass: true + AfterControlStatement: true + AfterEnum: true + AfterFunction: true + AfterNamespace: true + AfterStruct: true + AfterUnion: true + BeforeCatch: true + BeforeElse: true + IndentBraces: false + SplitEmptyFunction: false + SplitEmptyRecord: false + SplitEmptyNamespace: false + + # Keeps extern "C" blocks unindented. + AfterExternBlock: false + +# Indent case labels. +IndentCaseLabels: true + +# Right-align pointers and references +PointerAlignment: Right + +# ANGLE likes to align things as much as possible. +AlignOperands: true +AlignConsecutiveAssignments: true + +# Use 2 space negative offset for access modifiers +AccessModifierOffset: -2 + +# TODO(jmadill): Decide if we want this on. Doesn't have an "all or none" mode. +AllowShortCaseLabelsOnASingleLine: false + +# Useful for spacing out functions in classes +KeepEmptyLinesAtTheStartOfBlocks: true + +# Indent nested PP directives. +IndentPPDirectives: AfterHash + +# Include blocks style +IncludeBlocks: Preserve + +AttributeMacros: + - OPENTELEMETRY_UNLIKELY + - OPENTELEMETRY_LIKELY + - OPENTELEMETRY_MAYBE_UNUSED + - OPENTELEMETRY_DEPRECATED + - OPENTELEMETRY_API_SINGLETON + - OPENTELEMETRY_LOCAL_SYMBOL + - OPENTELEMETRY_EXPORT + - OPENTELEMETRY_SANITIZER_NO_MEMORY + - OPENTELEMETRY_SANITIZER_NO_THREAD + - OPENTELEMETRY_SANITIZER_NO_ADDRESS \ No newline at end of file diff --git a/ext/opentelemetry-cpp-1.21.0/.clang-tidy b/ext/opentelemetry-cpp-1.21.0/.clang-tidy new file mode 100644 index 000000000..db61b810c --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.clang-tidy @@ -0,0 +1,42 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +Checks: > + -*, + performance-*, + portability-*, + abseil-*, + -abseil-string-find-str-contains, + bugprone-*, + -bugprone-easily-swappable-parameters, + -bugprone-implicit-widening-of-multiplication-result, + -bugprone-inc-dec-in-conditions, + -bugprone-narrowing-conversions, + -bugprone-unchecked-optional-access, + -bugprone-unhandled-exception-at-new, + -bugprone-unused-local-non-trivial-variable, + google-*, + -google-build-using-namespace, + -google-default-arguments, + -google-explicit-constructor, + -google-readability-avoid-underscore-in-googletest-name, + -google-readability-braces-around-statements, + -google-readability-namespace-comments, + -google-readability-todo, + -google-runtime-references, + misc-*, + -misc-const-correctness, + -misc-include-cleaner, + -misc-non-private-member-variables-in-classes, + -misc-unused-alias-decls, + -misc-use-anonymous-namespace, + cppcoreguidelines-*, + -cppcoreguidelines-owning-memory, + -cppcoreguidelines-avoid-do-while, + -cppcoreguidelines-avoid-c-arrays, + -cppcoreguidelines-avoid-magic-numbers, + -cppcoreguidelines-init-variables, + -cppcoreguidelines-macro-usage, + -cppcoreguidelines-non-private-member-variables-in-classes, + -cppcoreguidelines-avoid-non-const-global-variables, + -cppcoreguidelines-pro-* \ No newline at end of file diff --git a/ext/opentelemetry-cpp-1.21.0/.cmake-format.py b/ext/opentelemetry-cpp-1.21.0/.cmake-format.py new file mode 100644 index 000000000..d232ac8eb --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.cmake-format.py @@ -0,0 +1,7 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +# If comment markup is enabled, don't reflow the first comment block in +# eachlistfile. Use this to preserve formatting of your +# copyright/licensestatements. +first_comment_is_literal = True diff --git a/ext/opentelemetry-cpp-1.21.0/.copyright-ignore b/ext/opentelemetry-cpp-1.21.0/.copyright-ignore new file mode 100644 index 000000000..81c51bcd5 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.copyright-ignore @@ -0,0 +1,52 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +# Ignore the following directories + +./.git/* +./.github/* +./third_party/benchmark/* +./third_party/boost/* +./third_party/googletest/* +./third_party/ms-gsl/* +./third_party/nlohmann-json/* +./third_party/opentelemetry-proto/* +./third_party/prometheus-cpp/* +./tools/vcpkg/* +./tools/ports/* + +# Third party code + +./api/include/opentelemetry/nostd/internal/absl/* +./exporters/jaeger/thrift-gen/* +./exporters/etw/include/opentelemetry/exporters/etw/TraceLoggingDynamic.h + +# Doc + +./docs/* + +## Ignore the following files patterns + +*.md +*.rst +*.png +*.log +*.patch +*.json +*.nuspec +*.pem + +# Packaging +*/CONTROL + +# LICENSE files +*/LICENSE + +# Ignore the following misc files + +./.bazelignore +./.bazelversion +./docker/.gitignore +.markdownlintignore +./ci/toc.yml +./ci/valgrind-suppressions diff --git a/ext/opentelemetry-cpp-1.21.0/.devcontainer/Dockerfile.conan b/ext/opentelemetry-cpp-1.21.0/.devcontainer/Dockerfile.conan new file mode 100644 index 000000000..05f6ae27f --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.devcontainer/Dockerfile.conan @@ -0,0 +1,55 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 +FROM ubuntu:24.04@sha256:1e622c5f073b4f6bfad6632f2616c7f59ef256e96fe78bf6a595d1dc4376ac02 + +RUN apt update && apt install -y \ + build-essential \ + ca-certificates \ + wget \ + cmake \ + git \ + sudo \ + nano \ + pkg-config \ + ninja-build \ + clang-format \ + clang-tidy \ + autoconf \ + automake \ + libtool \ + python3-pip + +RUN pip install "conan==2.15.1" --break-system-packages + +ARG USER_UID=1000 +ARG USER_GID=1000 +ARG USER_NAME=devuser +ENV USER_NAME=devuser +ENV USER_UID=${USER_UID} +ENV USER_GID=${USER_GID} +ENV INSTALL_PACKAGES= +ENV IS_CONTAINER_BUILD=true + +COPY ./.devcontainer/customize_container.sh /tmp/opentelemetry_cpp/devcontainer/customize_container.sh +RUN /tmp/opentelemetry_cpp/devcontainer/customize_container.sh +USER devuser + +RUN conan profile detect --force + +ARG CONAN_FILE=conanfile_stable.txt +ARG CONAN_BUILD_TYPE=Debug +ARG CXX_STANDARD=17 +WORKDIR /home/devuser/conan +COPY ./install/conan/ . + +RUN conan install ./${CONAN_FILE} --build=missing -s build_type=${CONAN_BUILD_TYPE} +ENV CMAKE_TOOLCHAIN_FILE=/home/devuser/conan/build/${CONAN_BUILD_TYPE}/generators/conan_toolchain.cmake +ENV CXX_STANDARD=${CXX_STANDARD} +ENV BUILD_TYPE=${CONAN_BUILD_TYPE} +ENV CONAN_FILE=${CONAN_FILE} + +WORKDIR /workspaces/opentelemetry-cpp + +ENTRYPOINT [] + +CMD ["/bin/bash"] diff --git a/ext/opentelemetry-cpp-1.21.0/.devcontainer/Dockerfile.dev b/ext/opentelemetry-cpp-1.21.0/.devcontainer/Dockerfile.dev new file mode 100644 index 000000000..c0eea6d32 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.devcontainer/Dockerfile.dev @@ -0,0 +1,58 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +FROM otel/cpp_format_tools + +ARG USER_UID=1000 +ARG USER_GID=1000 +ARG INSTALL_PACKAGES= + +ARG CXX_STANDARD=17 +ARG CMAKE_VERSION=3.31.6 +ARG ABSEIL_CPP_VERSION=20230125.3 +ARG PROTOBUF_VERSION=23.3 +ARG GRPC_VERSION=v1.55.0 + +ENV CXX_STANDARD=${CXX_STANDARD} +ENV CMAKE_VERSION=${CMAKE_VERSION} +ENV ABSEIL_CPP_VERSION=${ABSEIL_CPP_VERSION} +ENV PROTOBUF_VERSION=${PROTOBUF_VERSION} +ENV GRPC_VERSION=${GRPC_VERSION} + +COPY ci /opt/ci + +RUN apt update && apt install -y wget \ + ninja-build \ + libcurl4-openssl-dev \ + clang-tidy \ + shellcheck + +RUN cd /opt/ci && bash setup_cmake.sh +RUN cd /opt/ci && bash setup_ci_environment.sh +RUN cd /opt && bash ci/setup_googletest.sh \ + && bash ci/install_abseil.sh \ + && bash ci/install_protobuf.sh \ + && bash ci/setup_grpc.sh -r $GRPC_VERSION -s $CXX_STANDARD -p protobuf -p abseil-cpp + +ADD https://github.com/bazelbuild/bazelisk/releases/download/v1.22.1/bazelisk-linux-amd64 /usr/local/bin + +RUN git config --global core.autocrlf input \ + && chmod +x /usr/local/bin/bazelisk-linux-amd64 + +ENV INSTALL_PACKAGES=${INSTALL_PACKAGES} +ENV USER_NAME=devuser +ENV USER_UID=${USER_UID} +ENV USER_GID=${USER_GID} +ENV IS_CONTAINER_BUILD=true + +COPY ./.devcontainer/customize_container.sh /tmp/opentelemetry_cpp/devcontainer/customize_container.sh +RUN /tmp/opentelemetry_cpp/devcontainer/customize_container.sh +RUN apt install -y npm && npm install -g markdownlint-cli@0.44.0 + +USER devuser + +WORKDIR /workspaces/opentelemetry-cpp + +ENTRYPOINT [] + +CMD ["/bin/bash"] \ No newline at end of file diff --git a/ext/opentelemetry-cpp-1.21.0/.devcontainer/README.md b/ext/opentelemetry-cpp-1.21.0/.devcontainer/README.md new file mode 100644 index 000000000..bea6f8c4d --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.devcontainer/README.md @@ -0,0 +1,65 @@ +# Customizing Your Dev Container + +Customize your dev container using build arguments (for direct Docker builds) or +environment variables (for evaluation in `devcontainer.json`). + +* **CMake version:** + The version of cmake to install. (Default: 3.31.6) + * Docker ARG: + `CMAKE_VERSION` + * Host Environment Variable: + `OTEL_CPP_DEVCONTAINER_CMAKE_VERSION` + +* **CXX standard:** + This is the C++ standard to build from (eg: 17, 20, ...). (Default: 17) + * Docker ARG: + `CXX_STANDARD` + * Host Environment Variable: + `OTEL_CPP_DEVCONTAINER_CXX_STANDARD` + +* **abseil-cpp version:** + This is the version of abseil-cpp that will be used to build protobuf, gRPC, + and opentelemetry-cpp. + * Docker ARG: + `ABSEIL_CPP_VERSION` + * Host Environment Variable: + `OTEL_CPP_DEVCONTAINER_ABSEIL_CPP_VERSION` + +* **Protobuf version:** + * Docker ARG: + `PROTOBUF_VERSION` + * Host Environment Variable: + `OTEL_CPP_DEVCONTAINER_PROTOBUF_VERSION` + +* **gRPC version:** + * Docker ARG: + `GRPC_VERSION` + * Host Environment Variable: + `OTEL_CPP_DEVCONTAINER_GRPC_VERSION` + +* **User ID (UID):** + User ID (Default: `1000`) + * Docker ARG: + `USER_UID` + * Host Environment Variable: + `OTEL_CPP_DEVCONTAINER_USER_UID` + +* **Group ID (GID):** + User group ID (Default: `1000`) + * Docker ARG: + `USER_GID` + * Host Environment Variable: + `OTEL_CPP_DEVCONTAINER_USER_GID` + +* **Install Packages:** + These are the additional packages that will be installed via `apt install` in the devcontainer. This is a space separated list. + * Docker ARG: + `INSTALL_PACKAGES` (Default: ``) + * Host Environment Variable: + `OTEL_CPP_DEVCONTAINER_INSTALL_PACKAGES` (Default: ``) + +## Examples + +* `docker build --build-arg CXX_STANDARD="20" --build-arg INSTALL_PACKAGES="nano gitk"...` +* `export OTEL_CPP_DEVCONTAINER_CXX_STANDARD=20` +* `export OTEL_CPP_DEVCONTAINER_INSTALL_PACKAGES="nano gitk"` diff --git a/ext/opentelemetry-cpp-1.21.0/.devcontainer/customize_container.sh b/ext/opentelemetry-cpp-1.21.0/.devcontainer/customize_container.sh new file mode 100755 index 000000000..ba9614e67 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.devcontainer/customize_container.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -eu + +if [[ $IS_CONTAINER_BUILD != "true" ]]; then + echo "This script should only run inside a Docker container." + exit 1 +fi + +if [[ -n "$INSTALL_PACKAGES" ]]; then + packages=($INSTALL_PACKAGES) + for package in "${packages[@]}"; do + apt install -y "$package" + done +fi + +if [[ $(id "$USER_NAME" 2>/dev/null) ]]; then + echo "User '$USER_NAME' already exists. Removing it." + userdel -rf "$USER_NAME" +elif [[ $(id -u "$USER_UID" 2>/dev/null) ]]; then + OTHER_USER=$(getent passwd "$USER_UID" | cut -d: -f1) + echo "User '$OTHER_USER' exists with UID $USER_UID. Removing it." + userdel -rf "$OTHER_USER" +fi + +if [[ ! $(getent group "$USER_GID" 2>/dev/null) ]]; then + echo "Group '$USER_GID' does not exist. Adding it." + groupadd -g "$USER_GID" "$USER_NAME" +fi + +useradd -m -u "$USER_UID" -g "$USER_GID" -s /bin/bash "$USER_NAME" +echo "Created user '$USER_NAME' (UID: $USER_UID, GID: $USER_GID)." + +echo "$USER_NAME ALL=(ALL) NOPASSWD:ALL" | tee /etc/sudoers.d/"$USER_NAME" + +echo "User and group setup complete." diff --git a/ext/opentelemetry-cpp-1.21.0/.devcontainer/devcontainer.json b/ext/opentelemetry-cpp-1.21.0/.devcontainer/devcontainer.json new file mode 100644 index 000000000..808712242 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.devcontainer/devcontainer.json @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 +// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: +// https://github.com/microsoft/vscode-dev-containers/tree/v0.162.0/containers/javascript-node +{ + "name": "opentelemetry-cpp", + "build": { + "context": "..", + "dockerfile": "Dockerfile.dev", + "args": { + "USER_UID": "${localEnv:OTEL_CPP_DEVCONTAINER_USER_UID:1000}", + "USER_GID": "${localEnv:OTEL_CPP_DEVCONTAINER_USER_GID:1000}", + "INSTALL_PACKAGES": "${localEnv:OTEL_CPP_DEVCONTAINER_INSTALL_PACKAGES:}", + "CMAKE_VERSION": "${localEnv:OTEL_CPP_DEVCONTAINER_CMAKE_VERSION:3.31.6}", + "CXX_STANDARD": "${localEnv:OTEL_CPP_DEVCONTAINER_CXX_STANDARD:17}", + "GRPC_VERSION": "${localEnv:OTEL_CPP_DEVCONTAINER_GRPC_VERSION:v1.55.0}", + "PROTOBUF_VERSION": "${localEnv:OTEL_CPP_DEVCONTAINER_PROTOBUF_VERSION:23.3}", + "ABSEIL_CPP_VERSION":"${localEnv:OTEL_CPP_DEVCONTAINER_ABSEIL_CPP_VERSION:20230125.3}" + } + }, + "customizations": { + "vscode": { + "extensions": [ + "ms-vscode.cpptools", + "ms-azuretools.vscode-docker", + "ms-vscode.cpptools-extension-pack" + ], + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + } + } + }, + + "remoteUser": "devuser" +} diff --git a/ext/opentelemetry-cpp-1.21.0/.gitattributes b/ext/opentelemetry-cpp-1.21.0/.gitattributes new file mode 100644 index 000000000..ca2467b47 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.gitattributes @@ -0,0 +1,47 @@ +##### Source code ##### + +## C++ and C source files +*.c text eol=lf diff=cpp +*.h text eol=lf diff=cpp +*.cc text eol=lf diff=cpp +*.cpp text eol=lf diff=cpp +*.cxx text eol=lf diff=cpp +*.hpp text eol=lf diff=cpp + +## Python scripts +*.py text eol=lf diff=python + +## Perl scripts/libraries/modules +*.perl text eol=lf diff=perl +*.pl text eol=lf diff=perl +*.pm text eol=lf diff=perl + +## Shell scripts +*.sh text eol=lf +*.bash text eol=lf + +## Windows batch and PowerShell scripts +*.bat text eol=crlf +*.cmd text eol=crlf +*.ps1 text eol=crlf + +##### Other file types ##### + +## Text files and documentation +*.txt text +README* text +INSTALL* text +LICENSE* text + +## Non-text documentation +*.html text diff=html +*.pdf binary +*.rtf binary + +## git files +.gitignore text eol=lf +.gitattributes text eol=lf + +## bazel files +WORKSPACE text eol=lf +BUILD text eol=lf diff --git a/ext/opentelemetry-cpp-1.21.0/.github/.codecov.yaml b/ext/opentelemetry-cpp-1.21.0/.github/.codecov.yaml new file mode 100644 index 000000000..1b9921ac4 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/.codecov.yaml @@ -0,0 +1,50 @@ +codecov: + require_ci_to_pass: false + max_report_age: off + +coverage: + precision: 2 + round: up + range: "80...100" + status: + project: + default: + informational: true + target: auto + threshold: 10% + patch: false + +parsers: + gcov: + branch_detection: + conditional: yes + loop: yes + method: no + macro: no + +comment: + layout: "reach,diff,flags,tree" + behavior: default + require_changes: false + +# Relative file path fixing. +# CI file paths must match Git file paths. +# This fix removes the "/home/runner/" prefix +# to coverage report file paths. +fixes: + - "/home/runner/::" + +ignore: + - "docs/**/*" + - "docker/**/*" + - "examples/**/*" + - "bazel/**/*" + - "cmake/**/*" + - "buildscripts/**/*" + - "third_party/**/*" + - "test_common/**/*" + - "tools/**/*" + - ".vscode/**/*" + - ".github/**/*" + - "**/test/**/*" + - "**.md" \ No newline at end of file diff --git a/ext/opentelemetry-cpp-1.21.0/.github/CODEOWNERS b/ext/opentelemetry-cpp-1.21.0/.github/CODEOWNERS new file mode 100644 index 000000000..7a40cd0b0 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/CODEOWNERS @@ -0,0 +1,8 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. + +# For ETW exporter +exporters/etw/* @reyang @maxgolov @lalitb @ThomsonTan @open-telemetry/cpp-approvers + +# For anything not explicitly taken by someone else: +* @open-telemetry/cpp-approvers diff --git a/ext/opentelemetry-cpp-1.21.0/.github/ISSUE_TEMPLATE/bug_report.md b/ext/opentelemetry-cpp-1.21.0/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..e0bb4cca2 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,19 @@ +--- +name: Bug Report +about: Create a report to help us improve +labels: bug +--- + +**Describe your environment** Describe any aspect of your environment relevant to the problem, including your platform, build system, version numbers of installed dependencies, etc. If you're reporting a problem with a specific version of a library in this repo, please check whether the problem has been fixed on main branch. + +**Steps to reproduce** +Describe exactly how to reproduce the error. Include a code sample if applicable. + +**What is the expected behavior?** +What did you expect to see? + +**What is the actual behavior?** +What did you see instead? + +**Additional context** +Add any other context about the problem here. diff --git a/ext/opentelemetry-cpp-1.21.0/.github/ISSUE_TEMPLATE/feature_request.md b/ext/opentelemetry-cpp-1.21.0/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..973549ab2 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,19 @@ +--- +name: Feature Request +about: Suggest an idea for this project +labels: feature-request +--- + +Before opening a feature request against this repo, consider whether the feature should/could be implemented in the [other OpenTelemetry client libraries](https://github.com/open-telemetry/). If so, please [open an issue on opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification/issues/new) first. + +**Is your feature request related to a problem?** +If so, provide a concise description of the problem. + +**Describe the solution you'd like** +What do you want to happen instead? What is the expected behavior? + +**Describe alternatives you've considered** +Which alternative solutions or features have you considered? + +**Additional context** +Add any other context about the feature request here. diff --git a/ext/opentelemetry-cpp-1.21.0/.github/dependabot.yml b/ext/opentelemetry-cpp-1.21.0/.github/dependabot.yml new file mode 100644 index 000000000..5cfb4cb89 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/dependabot.yml @@ -0,0 +1,13 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" + labels: + - "GHA" + + - package-ecosystem: "devcontainers" + directory: "/" + schedule: + interval: daily diff --git a/ext/opentelemetry-cpp-1.21.0/.github/pull_request_template.md b/ext/opentelemetry-cpp-1.21.0/.github/pull_request_template.md new file mode 100644 index 000000000..e54760323 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/pull_request_template.md @@ -0,0 +1,11 @@ +Fixes # (issue) + +## Changes + +Please provide a brief description of the changes here. + +For significant contributions please make sure you have completed the following items: + +* [ ] `CHANGELOG.md` updated for non-trivial changes +* [ ] Unit tests have been added +* [ ] Changes in public API reviewed \ No newline at end of file diff --git a/ext/opentelemetry-cpp-1.21.0/.github/repository-settings.md b/ext/opentelemetry-cpp-1.21.0/.github/repository-settings.md new file mode 100644 index 000000000..fa86d02cf --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/repository-settings.md @@ -0,0 +1,38 @@ +# Process + +This file documents local admin changes for opentelemetry-cpp, +per the community process: https://github.com/open-telemetry/community/blob/main/docs/how-to-configure-new-repository.md + +Please note that the EasyCLA check **MUST** stay **REQUIRED**, +it should never be disabled or bypassed, at the risk of tainting the repository. + +# Guidelines + +The best is to open a PR first that describes the change, +so it can be discussed during review (maybe it is not needed, +maybe there is an alternate solution, ...). + +The PR must add a log entry in this file, detailing: + +* the date the change is implemented +* what is changed exactly (which setting) +* a short rationale + +Admin changes are then applied only when the PR is merged. + +If for some reason a change is implemented in emergency, +before a PR can be discussed and merged, +a PR should still be prepared and pushed after the fact to +describe the settings changed. + +# Log of local changes + +## 2023-11-03 + +Created log file `.github/repository-settings.md`, since admin permissions are now granted to maintainers. + +See https://github.com/open-telemetry/community/issues/1727 + +No setting changed. + + diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/benchmark.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/benchmark.yml new file mode 100644 index 000000000..eaad6b280 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/benchmark.yml @@ -0,0 +1,82 @@ +name: OpenTelemetry-cpp benchmarks +on: + push: + branches: + - main + +permissions: + contents: write + deployments: write + +jobs: + benchmark: + name: Run OpenTelemetry-cpp benchmarks + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Mount Bazel Cache + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + env: + cache-name: bazel_cache + with: + path: /home/runner/.cache/bazel + key: bazel_benchmark + - name: setup + run: | + sudo ./ci/setup_ci_environment.sh + sudo ./ci/install_bazelisk.sh + - name: Run benchmark + id: run_benchmarks + run: | + ./ci/do_ci.sh bazel.benchmark + mkdir -p benchmarks + mv api-benchmark_result.json benchmarks + mv sdk-benchmark_result.json benchmarks + mv exporters-benchmark_result.json benchmarks + - uses: actions/upload-artifact@6027e3dd177782cd8ab9af838c04fd81a07f1d47 # main March 2025 + with: + name: benchmark_results + path: benchmarks + store_benchmark: + needs: benchmark + strategy: + matrix: + components: ["api", "sdk", "exporters"] + name: Store benchmark result + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # main March 2025 + with: + name: benchmark_results + path: benchmarks + - name: Print json files + id: print_json + run: | + cat benchmarks/* + - name: Push benchmark result + uses: benchmark-action/github-action-benchmark@d48d326b4ca9ba73ca0cd0d59f108f9e02a381c7 # v1.20.4 + with: + name: OpenTelemetry-cpp ${{ matrix.components }} Benchmark + tool: 'googlecpp' + output-file-path: benchmarks/${{ matrix.components }}-benchmark_result.json + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + # Show alert with commit comment on detecting possible performance regression + alert-threshold: '200%' + comment-on-alert: true + fail-on-alert: false + gh-pages-branch: gh-pages + benchmark-data-dir-path: benchmarks diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/ci.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/ci.yml new file mode 100644 index 000000000..bc585fc26 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/ci.yml @@ -0,0 +1,1247 @@ +name: CI + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +permissions: + contents: read + +jobs: + +# Commented 2024-11-06, lack of workers in github causes CI failures +# arm64_test: +# name: CMake test arm64 (with modern protobuf,grpc and abseil) +# runs-on: actuated-arm64-4cpu-16gb +# steps: +# - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 +# with: +# submodules: 'recursive' +# - name: setup +# env: +# PROTOBUF_VERSION: '23.3' +# ABSEIL_CPP_VERSION: '20230125.3' +# CXX_STANDARD: '14' +# CC: /usr/bin/gcc-10 +# CXX: /usr/bin/g++-10 +# run: | +# sudo -E ./ci/setup_gcc10.sh +# sudo -E ./ci/setup_ci_environment.sh +# sudo -E ./ci/setup_cmake.sh +# sudo -E ./ci/setup_googletest.sh +# sudo -E ./ci/install_abseil.sh +# sudo -E ./ci/install_protobuf.sh + + cmake_test: + name: CMake test (prometheus, elasticsearch, zipkin) + runs-on: ubuntu-22.04 + env: + CXX_STANDARD: '17' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run cmake tests + run: | + ./ci/do_ci.sh cmake.test + + cmake_fetch_content_test: + name: CMake FetchContent usage with opentelemetry-cpp + runs-on: ubuntu-24.04 + env: + CXX_STANDARD: '17' + BUILD_TYPE: 'Debug' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: Install abseil, protobuf, and grpc with apt + run: | + sudo -E apt-get update + sudo -E apt-get install -y libabsl-dev libprotobuf-dev libgrpc++-dev protobuf-compiler protobuf-compiler-grpc + - name: run fetch content cmake test + run: | + ./ci/do_ci.sh cmake.fetch_content.test + + cmake_gcc_maintainer_sync_test: + name: CMake gcc 14 (maintainer mode, sync) + runs-on: ubuntu-24.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + CC: /usr/bin/gcc-14 + CXX: /usr/bin/g++-14 + PROTOBUF_VERSION: 21.12 + run: | + sudo apt remove needrestart #refer: https://github.com/actions/runner-images/issues/9937 + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + sudo -E ./ci/install_protobuf.sh + - name: setup grpc + run: | + sudo ./ci/setup_grpc.sh + - name: run cmake gcc (maintainer mode, sync) + env: + CC: /usr/bin/gcc-14 + CXX: /usr/bin/g++-14 + run: | + ./ci/do_ci.sh cmake.maintainer.sync.test + - name: generate test cert + env: + CFSSL_VERSION: 1.6.3 + run: | + sudo -E ./tools/setup-cfssl.sh + (cd ./functional/cert; ./generate_cert.sh) + - name: run func test + run: | + (cd ./functional/otlp; ./run_test.sh) + + cmake_gcc_maintainer_async_test: + name: CMake gcc 14 (maintainer mode, async) + runs-on: ubuntu-24.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + CC: /usr/bin/gcc-14 + CXX: /usr/bin/g++-14 + PROTOBUF_VERSION: 21.12 + run: | + sudo apt remove needrestart #refer: https://github.com/actions/runner-images/issues/9937 + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + sudo -E ./ci/install_protobuf.sh + - name: setup grpc + run: | + sudo ./ci/setup_grpc.sh + - name: run cmake gcc (maintainer mode, async) + env: + CC: /usr/bin/gcc-14 + CXX: /usr/bin/g++-14 + run: | + ./ci/do_ci.sh cmake.maintainer.async.test + - name: generate test cert + env: + CFSSL_VERSION: 1.6.3 + run: | + sudo -E ./tools/setup-cfssl.sh + (cd ./functional/cert; ./generate_cert.sh) + - name: run func test + run: | + (cd ./functional/otlp; ./run_test.sh) + + cmake_clang_maintainer_sync_test: + name: CMake clang 18 (maintainer mode, sync) + runs-on: ubuntu-24.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + CC: /usr/bin/clang-18 + CXX: /usr/bin/clang++-18 + PROTOBUF_VERSION: 21.12 + run: | + sudo apt remove needrestart #refer: https://github.com/actions/runner-images/issues/9937 + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + sudo -E ./ci/install_protobuf.sh + - name: setup grpc + run: | + sudo ./ci/setup_grpc.sh + - name: run cmake clang (maintainer mode, sync) + env: + CC: /usr/bin/clang-18 + CXX: /usr/bin/clang++-18 + run: | + ./ci/do_ci.sh cmake.maintainer.sync.test + - name: generate test cert + env: + CFSSL_VERSION: 1.6.3 + run: | + sudo -E ./tools/setup-cfssl.sh + (cd ./functional/cert; ./generate_cert.sh) + - name: run func test + run: | + (cd ./functional/otlp; ./run_test.sh) + + cmake_clang_maintainer_async_test: + name: CMake clang 18 (maintainer mode, async) + runs-on: ubuntu-24.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + CC: /usr/bin/clang-18 + CXX: /usr/bin/clang++-18 + PROTOBUF_VERSION: 21.12 + run: | + sudo apt remove needrestart #refer: https://github.com/actions/runner-images/issues/9937 + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + sudo -E ./ci/install_protobuf.sh + - name: setup grpc + run: | + sudo ./ci/setup_grpc.sh + - name: run cmake clang (maintainer mode, async) + env: + CC: /usr/bin/clang-18 + CXX: /usr/bin/clang++-18 + run: | + ./ci/do_ci.sh cmake.maintainer.async.test + - name: generate test cert + env: + CFSSL_VERSION: 1.6.3 + run: | + sudo -E ./tools/setup-cfssl.sh + (cd ./functional/cert; ./generate_cert.sh) + - name: run func test + run: | + (cd ./functional/otlp; ./run_test.sh) + + cmake_clang_maintainer_abiv2_test: + name: CMake clang 18 (maintainer mode, abiv2) + runs-on: ubuntu-24.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + CC: /usr/bin/clang-18 + CXX: /usr/bin/clang++-18 + PROTOBUF_VERSION: 21.12 + run: | + sudo apt remove needrestart #refer: https://github.com/actions/runner-images/issues/9937 + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + sudo -E ./ci/install_protobuf.sh + - name: setup grpc + run: | + sudo ./ci/setup_grpc.sh + - name: run cmake clang (maintainer mode, abiv2) + env: + CC: /usr/bin/clang-18 + CXX: /usr/bin/clang++-18 + run: | + ./ci/do_ci.sh cmake.maintainer.abiv2.test + - name: generate test cert + env: + CFSSL_VERSION: 1.6.3 + run: | + sudo -E ./tools/setup-cfssl.sh + (cd ./functional/cert; ./generate_cert.sh) + - name: run func test + run: | + (cd ./functional/otlp; ./run_test.sh) + + cmake_msvc_maintainer_test: + name: CMake msvc (maintainer mode) + runs-on: windows-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + ./ci/setup_cmake.ps1 + ./ci/setup_windows_ci_environment.ps1 + - name: run tests + run: ./ci/do_ci.ps1 cmake.maintainer.test + + cmake_msvc_maintainer_test_stl_cxx20: + name: CMake msvc (maintainer mode) with C++20 + runs-on: windows-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + ./ci/setup_cmake.ps1 + ./ci/setup_windows_ci_environment.ps1 + - name: run tests + env: + CXX_STANDARD: '20' + run: ./ci/do_ci.ps1 cmake.maintainer.cxx20.stl.test + + cmake_msvc_maintainer_abiv2_test: + name: CMake msvc (maintainer mode, abiv2) + runs-on: windows-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + ./ci/setup_cmake.ps1 + ./ci/setup_windows_ci_environment.ps1 + - name: run tests + env: + CXX_STANDARD: '20' + run: ./ci/do_ci.ps1 cmake.maintainer.abiv2.test + + cmake_with_async_export_test: + name: CMake test (without otlp-exporter and with async export) + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + CC: /usr/bin/gcc-12 + CXX: /usr/bin/g++-12 + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run cmake tests (without otlp-exporter) + env: + CC: /usr/bin/gcc-12 + CXX: /usr/bin/g++-12 + run: | + ./ci/do_ci.sh cmake.with_async_export.test + + cmake_opentracing_shim_test: + name: CMake test (with opentracing-shim) + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run cmake tests (enable opentracing-shim) + run: ./ci/do_ci.sh cmake.opentracing_shim.test + + cmake_test_cxx14_gcc: + name: CMake C++14 test(GCC) + runs-on: ubuntu-22.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run tests (enable stl) + env: + CXX_STANDARD: '14' + run: ./ci/do_ci.sh cmake.c++14.stl.test + + cmake_test_cxx17_gcc: + name: CMake C++17 test(GCC) + runs-on: ubuntu-22.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run tests (enable stl) + env: + CXX_STANDARD: '17' + run: ./ci/do_ci.sh cmake.c++17.stl.test + + cmake_test_cxx20_gcc: + name: CMake C++20 test(GCC) + runs-on: ubuntu-22.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run tests + env: + CXX_STANDARD: '20' + run: ./ci/do_ci.sh cmake.c++20.test + - name: run tests (enable stl) + env: + CXX_STANDARD: '20' + run: ./ci/do_ci.sh cmake.c++20.stl.test + + cmake_test_cxx20_clang: + name: CMake C++20 test(Clang with libc++) + runs-on: ubuntu-22.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + CC: /usr/bin/clang + CXX: /usr/bin/clang++ + CXXFLAGS: "-stdlib=libc++" + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run tests + env: + CC: /usr/bin/clang + CXX: /usr/bin/clang++ + CXXFLAGS: "-stdlib=libc++" + CXX_STANDARD: '20' + run: ./ci/do_ci.sh cmake.c++20.test + - name: run tests (enable stl) + env: + CC: /usr/bin/clang + CXX: /usr/bin/clang++ + CXXFLAGS: "-stdlib=libc++" + CXX_STANDARD: '20' + run: ./ci/do_ci.sh cmake.c++20.stl.test + + cmake_test_cxx23_gcc: + name: CMake C++23 test(GCC) + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run tests + env: + CXX_STANDARD: '23' + run: ./ci/do_ci.sh cmake.c++23.test + - name: run tests (enable stl) + env: + CXX_STANDARD: '23' + run: ./ci/do_ci.sh cmake.c++23.stl.test + + cmake_test_cxx23_clang: + name: CMake C++23 test(Clang with libc++) + runs-on: ubuntu-22.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + CC: /usr/bin/clang + CXX: /usr/bin/clang++ + CXXFLAGS: "-stdlib=libc++" + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run tests + env: + CC: /usr/bin/clang + CXX: /usr/bin/clang++ + CXXFLAGS: "-stdlib=libc++" + CXX_STANDARD: '23' + run: ./ci/do_ci.sh cmake.c++23.test + - name: run tests (enable stl) + env: + CC: /usr/bin/clang + CXX: /usr/bin/clang++ + CXXFLAGS: "-stdlib=libc++" + CXX_STANDARD: '23' + run: ./ci/do_ci.sh cmake.c++23.stl.test + + cmake_otprotocol_test: + name: CMake test (with otlp-exporter) + runs-on: ubuntu-22.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run otlp exporter tests + run: | + sudo ./ci/setup_grpc.sh + ./ci/do_ci.sh cmake.exporter.otprotocol.test + - name: generate test cert + env: + CFSSL_VERSION: 1.6.3 + run: | + sudo -E ./tools/setup-cfssl.sh + (cd ./functional/cert; ./generate_cert.sh) + - name: run func test + run: | + (cd ./functional/otlp; ./run_test.sh) + + cmake_modern_protobuf_grpc_with_abseil_test: + name: CMake test (with modern protobuf,grpc and abseil) + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + PROTOBUF_VERSION: '23.3' + ABSEIL_CPP_VERSION: '20230125.3' + CXX_STANDARD: '14' + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + sudo -E ./ci/install_abseil.sh + sudo -E ./ci/install_protobuf.sh + - name: run otlp exporter tests + env: + CXX_STANDARD: '14' + run: | + sudo -E ./ci/setup_grpc.sh -m -p protobuf -p abseil-cpp + ./ci/do_ci.sh cmake.exporter.otprotocol.test + + cmake_do_not_install_test: + name: CMake do not install test (with otlp-exporter) + runs-on: ubuntu-22.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run otlp exporter tests + run: | + sudo ./ci/setup_grpc.sh + ./ci/do_ci.sh cmake.do_not_install.test + + cmake_otprotocol_shared_libs_with_static_grpc_test: + name: CMake test (build shared libraries with otlp-exporter and static gRPC) + runs-on: ubuntu-22.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run otlp exporter tests + run: | + sudo ./ci/setup_grpc.sh -T + ./ci/do_ci.sh cmake.exporter.otprotocol.shared_libs.with_static_grpc.test + + plugin_test: + name: Plugin -> CMake + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + CC: /usr/bin/gcc-12 + CXX: /usr/bin/g++-12 + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run tests + env: + CC: /usr/bin/gcc-12 + CXX: /usr/bin/g++-12 + run: ./ci/do_ci.sh cmake.test_example_plugin + + bazel_test: + name: Bazel + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Mount Bazel Cache + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + env: + cache-name: bazel_cache + with: + path: /home/runner/.cache/bazel + key: bazel_test + - name: setup + run: | + sudo ./ci/setup_ci_environment.sh + sudo ./ci/setup_cmake.sh + sudo ./ci/install_bazelisk.sh + - name: run tests + run: ./ci/do_ci.sh bazel.test + + bazel_no_bzlmod_test: + name: Bazel without bzlmod + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Mount Bazel Cache + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + env: + cache-name: bazel_cache + with: + path: /home/runner/.cache/bazel + key: bazel_test + - name: setup + run: | + sudo ./ci/setup_ci_environment.sh + sudo ./ci/setup_cmake.sh + sudo ./ci/install_bazelisk.sh + - name: run tests + run: ./ci/do_ci.sh bazel.no_bzlmod.test + + bazel_test_async: + name: Bazel with async export + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Mount Bazel Cache + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + env: + cache-name: bazel_cache + with: + path: /home/runner/.cache/bazel + key: bazel_test + - name: setup + run: | + sudo ./ci/setup_ci_environment.sh + sudo ./ci/setup_cmake.sh + sudo ./ci/install_bazelisk.sh + - name: run tests + run: ./ci/do_ci.sh bazel.with_async_export.test + + bazel_valgrind: + name: Bazel valgrind + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Mount Bazel Cache + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + env: + cache-name: bazel_cache + with: + path: /home/runner/.cache/bazel + key: bazel_valgrind + - name: setup + run: | + sudo ./ci/setup_ci_environment.sh + sudo ./ci/setup_cmake.sh + sudo ./ci/install_bazelisk.sh + - name: run tests + run: ./ci/do_ci.sh bazel.valgrind + + bazel_noexcept: + name: Bazel noexcept + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Mount Bazel Cache + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + env: + cache-name: bazel_cache + with: + path: /home/runner/.cache/bazel + key: bazel_noexcept + - name: setup + run: | + sudo ./ci/setup_ci_environment.sh + sudo ./ci/setup_cmake.sh + sudo ./ci/install_bazelisk.sh + - name: run tests + run: ./ci/do_ci.sh bazel.noexcept + + bazel_nortti: + name: Bazel nortti + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Mount Bazel Cache + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + env: + cache-name: bazel_cache + with: + path: /home/runner/.cache/bazel + key: bazel_nortti + - name: setup + run: | + sudo ./ci/setup_ci_environment.sh + sudo ./ci/setup_cmake.sh + sudo ./ci/install_bazelisk.sh + - name: run tests + run: ./ci/do_ci.sh bazel.nortti + + bazel_asan: + name: Bazel asan config + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Mount Bazel Cache + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + env: + cache-name: bazel_cache + with: + path: /home/runner/.cache/bazel + key: bazel_asan + - name: setup + run: | + sudo ./ci/setup_ci_environment.sh + sudo ./ci/setup_cmake.sh + sudo ./ci/install_bazelisk.sh + - name: run tests + run: ./ci/do_ci.sh bazel.asan + + bazel_tsan: + name: Bazel tsan config + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Mount Bazel Cache + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + env: + cache-name: bazel_cache + with: + path: /home/runner/.cache/bazel + key: bazel_tsan + - name: setup + run: | + sudo ./ci/setup_ci_environment.sh + sudo ./ci/setup_cmake.sh + sudo ./ci/install_bazelisk.sh + - name: run tests + run: ./ci/do_ci.sh bazel.tsan + + bazel_osx: + name: Bazel on MacOS + runs-on: macos-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Mount Bazel Cache + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + env: + cache-name: bazel_cache + with: + path: /Users/runner/.cache/bazel + key: bazel_osx + - name: run tests + run: ./ci/do_ci.sh bazel.macos.test + + benchmark: + name: Benchmark + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Mount Bazel Cache + uses: actions/cache@5a3ec84eff668545956fd18022155c47e93e2684 # v4.2.3 + env: + cache-name: bazel_cache + with: + path: /home/runner/.cache/bazel + key: bazel_benchmark + - name: setup + run: | + sudo ./ci/setup_ci_environment.sh + sudo ./ci/install_bazelisk.sh + - name: run tests + run: | + env BENCHMARK_DIR=/benchmark + ./ci/do_ci.sh benchmark + - name: Upload benchmark results + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: benchmark_reports + path: /home/runner/benchmark + + format: + name: Format + runs-on: ubuntu-24.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: setup + run: sudo apt remove needrestart && sudo ./ci/install_format_tools.sh #refer: https://github.com/actions/runner-images/issues/9937 + - name: run tests + run: ./ci/do_ci.sh format + + copyright: + name: Copyright + runs-on: ubuntu-22.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: check copyright + run: ./tools/check_copyright.sh + + windows: + name: CMake -> exporter proto + runs-on: windows-2019 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + ./ci/setup_cmake.ps1 + ./ci/setup_windows_ci_environment.ps1 + ./ci/install_windows_protobuf.ps1 + - name: run cmake test + run: ./ci/do_ci.ps1 cmake.test + - name: run otprotocol test + run: ./ci/do_ci.ps1 cmake.exporter.otprotocol.test + + windows-build-dll: + name: CMake -> exporter proto (Build as DLL) + runs-on: windows-2019 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + ./ci/setup_cmake.ps1 + ./ci/setup_windows_ci_environment.ps1 + ./ci/install_windows_protobuf.ps1 + - name: run cmake test (DLL build) + run: ./ci/do_ci.ps1 cmake.dll.test + - name: run cmake cxx20 test (DLL build) + run: ./ci/do_ci.ps1 cmake.dll.cxx20.test + - name: run otprotocol test (DLL build) + run: ./ci/do_ci.ps1 cmake.exporter.otprotocol.dll.test + + windows_with_async_export: + name: CMake (With async export) -> exporter proto + runs-on: windows-2019 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + ./ci/setup_cmake.ps1 + ./ci/setup_windows_ci_environment.ps1 + ./ci/install_windows_protobuf.ps1 + - name: run cmake test + run: ./ci/do_ci.ps1 cmake.with_async_export.test + - name: run otprotocol test + run: ./ci/do_ci.ps1 cmake.exporter.otprotocol.with_async_export.test + + windows_bazel: + name: Bazel Windows + runs-on: windows-2019 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + ./ci/install_windows_bazelisk.ps1 + - name: run tests + run: ./ci/do_ci.ps1 bazel.build + + windows_plugin_test: + name: Plugin -> CMake Windows + runs-on: windows-2019 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + run: | + ./ci/setup_cmake.ps1 + ./ci/setup_windows_ci_environment.ps1 + - name: run tests + run: ./ci/do_ci.ps1 cmake.test_example_plugin + + code_coverage: + name: Code coverage + runs-on: ubuntu-22.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + CC: /usr/bin/gcc-10 + CXX: /usr/bin/g++-10 + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run tests and generate report + env: + CC: /usr/bin/gcc-10 + CXX: /usr/bin/g++-10 + run: ./ci/do_ci.sh code.coverage + - name: upload report + uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24 # v5.4.3 + with: + files: /home/runner/build/coverage.info + + markdown-lint: + runs-on: ubuntu-latest + + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - name: check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: install markdownlint-cli + run: sudo npm install -g markdownlint-cli@0.44.0 + + - name: run markdownlint + run: markdownlint . + + shellcheck: + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - name: check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: install shellcheck + run: sudo apt install --assume-yes shellcheck + - name: run shellcheck + run: find . -name \*.sh | xargs shellcheck --severity=error + + misspell: + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - name: check out code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: install misspell + run: | + curl -L -o ./install-misspell.sh https://git.io/misspell + sh ./install-misspell.sh + - name: run misspell + run: ./bin/misspell -error . + + docfx_check: + name: DocFX check + runs-on: windows-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: install docfx + run: choco install docfx -y --version=2.58.5 + - name: run ./ci/docfx.cmd + shell: cmd + run: ./ci/docfx.cmd + + w3c_trace_context_compliance_v1: + name: W3C Distributed Tracing Validation V1 + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - name: Checkout open-telemetry/opentelemetry-cpp + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: setup + env: + CC: /usr/bin/gcc-12 + CXX: /usr/bin/g++-12 + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: run w3c trace-context test server (background) + env: + CXX_STANDARD: '14' + run: | + ./ci/do_ci.sh cmake.w3c.trace-context.build-server + cd $HOME/build/ext/test/w3c_tracecontext_http_test_server + ./w3c_tracecontext_http_test_server & + - name: Checkout w3c/trace-context repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + repository: w3c/trace-context + path: trace-context + - name: install dependencies + run: | + sudo apt update && sudo apt install python3-pip + sudo pip3 install aiohttp==3.11.18 + - name: run w3c trace-context test suite + env: + SPEC_LEVEL: 1 + run: + | + python ${GITHUB_WORKSPACE}/trace-context/test/test.py http://localhost:30000/test TraceContextTest AdvancedTest + curl http://localhost:30000/stop diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/clang-tidy.yaml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/clang-tidy.yaml new file mode 100644 index 000000000..aec027f64 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/clang-tidy.yaml @@ -0,0 +1,89 @@ +name: clang-tidy + +on: + push: + branches: [main] + pull_request: + branches: [main] + +permissions: + contents: read + +jobs: + clang-tidy: + runs-on: ubuntu-24.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: recursive + + - name: Setup Environment + env: + PROTOBUF_VERSION: '23.3' + ABSEIL_CPP_VERSION: '20230125.3' + CXX_STANDARD: '14' + run: | + sudo apt update -y + sudo apt install -y --no-install-recommends --no-install-suggests \ + build-essential \ + iwyu \ + cmake \ + libssl-dev \ + libcurl4-openssl-dev \ + libprotobuf-dev \ + protobuf-compiler \ + libgmock-dev \ + libgtest-dev \ + libbenchmark-dev + + if ! command -v clang-tidy &> /dev/null; then + echo "clang-tidy could not be found" + exit 1 + fi + echo "Using clang-tidy version: $(clang-tidy --version)" + echo "clang-tidy installed at: $(which clang-tidy)" + + + - name: Prepare CMake + env: + CC: clang + CXX: clang++ + run: | + echo "Running cmake..." + cmake -B build \ + -DCMAKE_CXX_STANDARD=14 \ + -DWITH_STL=CXX14 \ + -DWITH_OTLP_HTTP=ON \ + -DWITH_OTLP_FILE=ON \ + -DWITH_PROMETHEUS=ON \ + -DWITH_ZIPKIN=ON \ + -DWITH_ELASTICSEARCH=ON \ + -DWITH_OTLP_HTTP_COMPRESSION=ON \ + -DWITH_EXAMPLES=ON \ + -DWITH_EXAMPLES_HTTP=ON \ + -DBUILD_W3CTRACECONTEXT_TEST=ON \ + -DWITH_METRICS_EXEMPLAR_PREVIEW=ON \ + -DWITH_ASYNC_EXPORT_PREVIEW=ON \ + -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + + - name: Run clang-tidy + run: | + cmake --build build --target opentelemetry_proto + jq -r .[].file build/compile_commands.json | grep -vE '/(generated|third_party)/' | xargs -P $(nproc) -n 1 clang-tidy --quiet -p build 2>&1 | tee -a clang-tidy.log + + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: Logs (clang-tidy) + path: ./clang-tidy.log + + - name: Count warnings + run: | + COUNT=$(grep -c "warning:" clang-tidy.log) + echo "clang-tidy reported ${COUNT} warning(s)" + +# TODO: include WITH_OTLP_GRPC flags. diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/cmake_install.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/cmake_install.yml new file mode 100644 index 000000000..c62d7def8 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/cmake_install.yml @@ -0,0 +1,372 @@ +name: CMake Install Tests + +on: + workflow_dispatch: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +permissions: + contents: read + +jobs: + windows_2022_vcpkg_submodule: + name: Windows 2022 vcpkg submodule versions cxx17 (static libs - dll) + runs-on: windows-2022 + env: + # Set to the latest version of cmake 3.x + CMAKE_VERSION: '3.31.6' + # cxx17 is the default for windows-2022 + CXX_STANDARD: '17' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Build dependencies with vcpkg submodule + run: | + ./ci/setup_cmake.ps1 + ./ci/setup_windows_ci_environment.ps1 + - name: Run Tests + run: ./ci/do_ci.ps1 cmake.install.test + - name: Run DLL Tests + run: ./ci/do_ci.ps1 cmake.dll.install.test + + windows_2019_vcpkg_submodule_min_cmake: + name: Windows 2019 vcpkg submodule versions minimum cmake cxx14 (static libs) + runs-on: windows-2019 + env: + # cmake 3.15 is the minimum for windows builds (See https://github.com/open-telemetry/opentelemetry-cpp/pull/3349#discussion_r2030319430) + CMAKE_VERSION: '3.15.0' + # cxx14 is the default for windows-2019 + CXX_STANDARD: '14' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Build dependencies with vcpkg submodule + run: | + ./ci/setup_cmake.ps1 + ./ci/setup_windows_ci_environment.ps1 + - name: Run Tests + run: ./ci/do_ci.ps1 cmake.install.test + + ubuntu_2404_system_packages: + name: Ubuntu 24.04 apt packages cxx17 (static libs - shared libs) + runs-on: ubuntu-24.04 + env: + INSTALL_TEST_DIR: '/home/runner/install_test' + # CMake 3.28 is apt package version for Ubuntu 24.04 + CMAKE_VERSION: '3.28.3' + # cxx17 is the default for Ubuntu 24.04 + CXX_STANDARD: '17' + BUILD_TYPE: 'Debug' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Install libcurl, zlib, nlohmann-json with apt + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: Install abseil, protobuf, and grpc with apt + run: | + sudo -E apt-get update + sudo -E apt-get install -y libabsl-dev libprotobuf-dev libgrpc++-dev protobuf-compiler protobuf-compiler-grpc + - name: Run Tests (static libs) + env: + BUILD_SHARED_LIBS: 'OFF' + run: ./ci/do_ci.sh cmake.install.test + - name: Run Tests (shared libs) + env: + BUILD_SHARED_LIBS: 'ON' + run: ./ci/do_ci.sh cmake.install.test + + ubuntu_2404_latest: + name: Ubuntu 24.04 latest versions cxx20 (static libs) + runs-on: ubuntu-24.04 + env: + INSTALL_TEST_DIR: '/home/runner/install_test' + # Set to the latest version of cmake 3.x + CMAKE_VERSION: '3.31.6' + # Set to the latest cxx standard supported by opentelemetry-cpp + CXX_STANDARD: '20' + # Versions below set to the latest version available + # The abseil and protobuf versions are taken from + # the grpc submodules at the GRPC_VERSION tag + GOOGLETEST_VERSION: '1.16.0' + ABSEIL_CPP_VERSION: '20240722.1' + PROTOBUF_VERSION: '29.0' + GRPC_VERSION: 'v1.71.0' + BUILD_TYPE: 'Debug' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Install gtest, libcurl, zlib, nlohmann-json with apt + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: Build abseil, protobuf, and grpc with ci scripts + run: | + sudo -E ./ci/install_abseil.sh + sudo -E ./ci/install_protobuf.sh + sudo -E ./ci/setup_grpc.sh -r $GRPC_VERSION -s $CXX_STANDARD -p protobuf -p abseil-cpp + - name: Run Tests (static libs) + env: + BUILD_SHARED_LIBS: 'OFF' + run: ./ci/do_ci.sh cmake.install.test + + ubuntu_2204_stable: + name: Ubuntu 22.04 stable versions cxx17 (static libs - shared libs) + runs-on: ubuntu-22.04 + env: + INSTALL_TEST_DIR: '/home/runner/install_test' + # CMake 3.22 is the apt package version for Ubuntu 22.04 + CMAKE_VERSION: '3.22.0' + CXX_STANDARD: '17' + # These are stable versions tested in the main ci workflow + # and defaults in the devcontainer + GOOGLETEST_VERSION: '1.14.0' + ABSEIL_CPP_VERSION: '20230125.3' + PROTOBUF_VERSION: '23.3' + GRPC_VERSION: 'v1.55.0' + BUILD_TYPE: 'Debug' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Install gtest, libcurl, zlib, nlohmann-json with apt + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: Build abseil, protobuf, and grpc with ci scripts + run: | + sudo -E ./ci/install_abseil.sh + sudo -E ./ci/install_protobuf.sh + sudo -E ./ci/setup_grpc.sh -r $GRPC_VERSION -s $CXX_STANDARD -p protobuf -p abseil-cpp + - name: Run Tests (static libs) + env: + BUILD_SHARED_LIBS: 'OFF' + run: ./ci/do_ci.sh cmake.install.test + - name: Run Tests (shared libs) + env: + BUILD_SHARED_LIBS: 'ON' + run: ./ci/do_ci.sh cmake.install.test + + ubuntu_2204_minimum: + name: Ubuntu 22.04 minimum versions cxx14 (static libs - shared libs) + runs-on: ubuntu-22.04 + env: + INSTALL_TEST_DIR: '/home/runner/install_test' + # Set to the current minimum version of cmake + CMAKE_VERSION: '3.14.0' + # cxx14 is the default for Ubuntu 22.04 + CXX_STANDARD: '14' + # This is the apt package version of googletest for Ubuntu 22.04 + GOOGLETEST_VERSION: '1.11.0' + # These are minimum versions tested in the main ci workflow + ABSEIL_CPP_VERSION: '20220623.2' + PROTOBUF_VERSION: '21.12' + GRPC_VERSION: 'v1.49.2' + BUILD_TYPE: 'Debug' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Install gtest, libcurl, zlib, nlohmann-json with apt + run: | + sudo -E ./ci/setup_ci_environment.sh + sudo -E ./ci/setup_cmake.sh + sudo -E ./ci/setup_googletest.sh + - name: Build abseil, protobuf, and grpc with ci scripts + run: | + sudo -E ./ci/install_abseil.sh + sudo -E ./ci/install_protobuf.sh + sudo -E ./ci/setup_grpc.sh -r $GRPC_VERSION -s $CXX_STANDARD -p protobuf -p abseil-cpp + - name: Run Tests (static libs) + env: + BUILD_SHARED_LIBS: 'OFF' + run: ./ci/do_ci.sh cmake.install.test + - name: Run Tests (shared libs) + env: + BUILD_SHARED_LIBS: 'ON' + run: ./ci/do_ci.sh cmake.install.test + + ubuntu_2404_conan_stable: + name: Ubuntu 24.04 conan stable versions cxx17 (static libs - shared libs - opentracing shim) + runs-on: ubuntu-24.04 + env: + INSTALL_TEST_DIR: '/home/runner/install_test' + # CMake 3.28 is apt package version for Ubuntu 24.04 + CMAKE_VERSION: '3.28.3' + CXX_STANDARD: '17' + CMAKE_TOOLCHAIN_FILE: /home/runner/conan/build/Debug/generators/conan_toolchain.cmake + BUILD_TYPE: 'Debug' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Install Conan + run: | + python3 -m pip install pip==25.0.1 + pip install "conan==2.15.1" + conan profile detect --force + - name: Install or build all dependencies with Conan + run: | + sudo -E ./ci/setup_cmake.sh + conan install install/conan/conanfile_stable.txt --build=missing -of /home/runner/conan -s build_type=${BUILD_TYPE} -s compiler.cppstd=${CXX_STANDARD} + - name: Run Tests (static libs) + env: + BUILD_SHARED_LIBS: 'OFF' + run: ./ci/do_ci.sh cmake.install.test + - name: Run Tests (shared libs) + env: + BUILD_SHARED_LIBS: 'ON' + run: ./ci/do_ci.sh cmake.install.test + - name: verify pkgconfig packages + run: | + export PKG_CONFIG_PATH=$INSTALL_TEST_DIR/lib/pkgconfig:$PKG_CONFIG_PATH + ./ci/verify_packages.sh + - name: Run OpenTracing Shim Test + run: ./ci/do_ci.sh cmake.opentracing_shim.install.test + + ubuntu_2404_conan_latest: + name: Ubuntu 24.04 conan latest versions cxx17 (static libs) + runs-on: ubuntu-24.04 + env: + INSTALL_TEST_DIR: '/home/runner/install_test' + # Set to the latest version of cmake 3.x + CMAKE_VERSION: '3.31.6' + CXX_STANDARD: '17' + CMAKE_TOOLCHAIN_FILE: /home/runner/conan/build/Debug/generators/conan_toolchain.cmake + BUILD_TYPE: 'Debug' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Install Conan + run: | + python3 -m pip install pip==25.0.1 + pip install "conan==2.15.1" + conan profile detect --force + - name: Install or build all dependencies with Conan + run: | + sudo -E ./ci/setup_cmake.sh + conan install install/conan/conanfile_latest.txt --build=missing -of /home/runner/conan -s build_type=${BUILD_TYPE} -s compiler.cppstd=${CXX_STANDARD} + - name: Run Tests (static libs) + env: + BUILD_SHARED_LIBS: 'OFF' + run: ./ci/do_ci.sh cmake.install.test + - name: verify pkgconfig packages + run: | + export PKG_CONFIG_PATH=$INSTALL_TEST_DIR/lib/pkgconfig:$PKG_CONFIG_PATH + ./ci/verify_packages.sh + + macos_14_conan_stable: + name: macOS 14 conan stable versions cxx17 (static libs) + runs-on: macos-14 + env: + INSTALL_TEST_DIR: '/Users/runner/install_test' + CMAKE_VERSION: '3.28.3' + CXX_STANDARD: '17' + CMAKE_TOOLCHAIN_FILE: '/Users/runner/conan/build/Debug/generators/conan_toolchain.cmake' + BUILD_TYPE: 'Debug' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Install Conan and tools + run: | + brew install conan autoconf automake libtool coreutils + ./ci/setup_cmake_macos.sh + conan profile detect --force + - name: Install or build all dependencies with Conan + run: conan install install/conan/conanfile_stable.txt --build=missing -of /Users/runner/conan -s build_type=${BUILD_TYPE} -s compiler.cppstd=${CXX_STANDARD} + - name: Run Tests (static libs) + env: + BUILD_SHARED_LIBS: 'OFF' + run: ./ci/do_ci.sh cmake.install.test + + macos_14_brew_packages: + name: macOS 14 brew latest versions cxx17 (static libs) + runs-on: macos-14 + env: + INSTALL_TEST_DIR: '/Users/runner/install_test' + # Set to the latest version of cmake 3.x + CMAKE_VERSION: '3.31.6' + CXX_STANDARD: '17' + BUILD_TYPE: 'Debug' + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Install Dependencies with Homebrew + run: | + ./ci/setup_cmake_macos.sh + brew install coreutils + brew install googletest + brew install google-benchmark + brew install zlib + brew install abseil + brew install protobuf + brew install grpc + brew install nlohmann-json + brew install prometheus-cpp + - name: Run Tests (static libs) + env: + BUILD_SHARED_LIBS: 'OFF' + run: ./ci/do_ci.sh cmake.install.test \ No newline at end of file diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/codeql-analysis.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/codeql-analysis.yml new file mode 100644 index 000000000..9d2fe02d7 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/codeql-analysis.yml @@ -0,0 +1,48 @@ +name: "CodeQL" + +on: + push: + branches: [main] + pull_request: + # The branches below must be a subset of the branches above + branches: [main] + +permissions: + contents: read + +jobs: + CodeQL-Build: + permissions: + actions: read # for github/codeql-action/init to get workflow details + contents: read # for actions/checkout to fetch code + security-events: write # for github/codeql-action/autobuild to send a status report + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + - name: Remove Third_party Modules from Code Scan + run: | + rm -rf third_party + - name: Setup + env: + CC: /usr/bin/gcc-12 + CXX: /usr/bin/g++-12 + GOOGLETEST_VERSION: 1.12.1 + run: | + sudo -E ./ci/setup_googletest.sh + sudo -E ./ci/setup_ci_environment.sh + - name: Initialize CodeQL + uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + with: + languages: cpp + - name: Autobuild + uses: github/codeql-action/autobuild@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/cppcheck.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/cppcheck.yml new file mode 100644 index 000000000..53057354b --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/cppcheck.yml @@ -0,0 +1,77 @@ + +name: cppcheck + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +permissions: + contents: read + +jobs: + cppcheck: + runs-on: ubuntu-24.04 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + + - name: Set up dependencies + run: | + sudo apt update -y + sudo apt install -y cppcheck + + - name: Run cppcheck + run: | + cppcheck --version | tee cppcheck.log + cppcheck \ + --force \ + --enable=warning,performance,portability \ + --inline-suppr \ + --suppress=unknownMacro:exporters/etw/include/opentelemetry/exporters/etw/TraceLoggingDynamic.h \ + --language=c++ \ + --std=c++14 \ + -I api/include \ + -I exporters/elasticsearch/include \ + -I exporters/etw/include \ + -I exporters/memory/include \ + -I exporters/ostream/include \ + -I exporters/otlp/include \ + -I exporters/prometheus/include \ + -I exporters/zipkin/include \ + -I ext/include \ + -I opentracing-shim/include \ + -I sdk/include \ + -i build \ + -i test \ + -i third_party \ + -j $(nproc) \ + . 2>&1 | tee --append cppcheck.log + + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + if: success() || failure() + with: + name: Logs (cppcheck) + path: ./cppcheck.log + + - name: Count warnings + run: | + set +e + readonly WARNING_COUNT=`grep -c -E "\[.+\]" cppcheck.log` + echo "cppcheck reported ${WARNING_COUNT} warning(s)" + # Acceptable limit, to decrease over time down to 0 + readonly WARNING_LIMIT=10 + # FAIL the build if WARNING_COUNT > WARNING_LIMIT + if [ $WARNING_COUNT -gt $WARNING_LIMIT ] ; then + exit 1 + # WARN in annotations if WARNING_COUNT > 0 + elif [ $WARNING_COUNT -gt 0 ] ; then + echo "::warning::cppcheck reported ${WARNING_COUNT} warning(s)" + fi diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/dependencies_image.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/dependencies_image.yml new file mode 100644 index 000000000..3163dd8ca --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/dependencies_image.yml @@ -0,0 +1,54 @@ +name: 'OpenTelemetry-cpp dependencies image' +on: + schedule: + - cron: "0 3 * * 6" + +permissions: + contents: read + +jobs: + docker_image: + name: Docker Image + runs-on: ubuntu-latest + timeout-minutes: 300 + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - + name: checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - + name: Set up QEMU + uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 + - + name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 + - + name: Build Image + uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0 + with: + builder: ${{ steps.buildx.outputs.name }} + context: ci/ + file: ./docker/Dockerfile + build-args: BASE_IMAGE=ubuntu:latest + platforms: linux/amd64 + # platforms: linux/amd64,linux/arm64 + push: false + tags: otel-cpp-deps + load: true + - + name: Save Image + run: | + docker images + docker save -o /opt/otel-cpp-deps-debian.tar otel-cpp-deps + - + name: Upload Image + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: otel-cpp-deps + path: /opt/otel-cpp-deps-debian.tar + retention-days: 14 diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/fossa.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/fossa.yml new file mode 100644 index 000000000..8b90aa167 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/fossa.yml @@ -0,0 +1,25 @@ +name: FOSSA scanning + +on: + push: + branches: + - main + +permissions: + contents: read + +jobs: + fossa: + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0 + with: + api-key: ${{secrets.FOSSA_API_KEY}} + team: OpenTelemetry diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/iwyu.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/iwyu.yml new file mode 100644 index 000000000..42881ec6b --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/iwyu.yml @@ -0,0 +1,92 @@ + +name: include-what-you-use + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +permissions: + contents: read + +jobs: + iwyu: + runs-on: ubuntu-latest + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + submodules: 'recursive' + + - name: setup dependencies + run: | + sudo apt update -y + sudo apt install -y --no-install-recommends --no-install-suggests \ + build-essential \ + iwyu \ + ninja-build \ + libssl-dev \ + libcurl4-openssl-dev \ + libprotobuf-dev \ + protobuf-compiler \ + libgmock-dev \ + libgtest-dev \ + libbenchmark-dev + sudo ./ci/setup_cmake.sh + + + - name: setup grpc + run: | + sudo ./ci/setup_grpc.sh + + - name: Prepare CMake + run: | + TOPDIR=`pwd` + mkdir build && cd build + CC="clang" CXX="clang++" cmake \ + -DCMAKE_CXX_STANDARD=14 \ + -DWITH_STL=CXX14 \ + -DCMAKE_CXX_INCLUDE_WHAT_YOU_USE="include-what-you-use;-w;-Xiwyu;--mapping_file=${TOPDIR}/.iwyu.imp;" \ + -DBUILD_TESTING=ON \ + -DBUILD_W3CTRACECONTEXT_TEST=ON \ + -DWITH_OTLP_GRPC=ON \ + -DWITH_OTLP_HTTP=ON \ + -DWITH_OTLP_FILE=ON \ + -DWITH_OPENTRACING=ON \ + -DWITH_OTLP_HTTP_COMPRESSION=ON \ + -DWITH_THREAD_INSTRUMENTATION=ON \ + -DWITH_ZIPKIN=ON \ + -DWITH_PROMETHEUS=ON \ + .. + + - name: iwyu_tool + run: | + cd build + make -k 2>&1 | tee -a iwyu.log + + - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + if: success() || failure() + with: + name: Logs (include-what-you-use) + path: ./build/*.log + + - name: count warnings + run: | + set +e + cd build + readonly WARNING_COUNT=`grep -c "include-what-you-use reported diagnostics:" iwyu.log` + echo "include-what-you-use reported ${WARNING_COUNT} warning(s)" + # Acceptable limit, to decrease over time down to 0 + readonly WARNING_LIMIT=0 + # FAIL the build if WARNING_COUNT > WARNING_LIMIT + if [ $WARNING_COUNT -gt $WARNING_LIMIT ] ; then + exit 1 + # WARN in annotations if WARNING_COUNT > 0 + elif [ $WARNING_COUNT -gt 0 ] ; then + echo "::warning::include-what-you-use reported ${WARNING_COUNT} warning(s)" + fi diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/ossf-scorecard.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/ossf-scorecard.yml new file mode 100644 index 000000000..4ecbe8bf8 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/ossf-scorecard.yml @@ -0,0 +1,52 @@ +name: OSSF Scorecard + +on: + push: + branches: + - main + schedule: + - cron: "56 23 * * 6" # once a week + workflow_dispatch: + +permissions: read-all + +jobs: + analysis: + runs-on: ubuntu-latest + permissions: + # Needed for Code scanning upload + security-events: write + # Needed for GitHub OIDC token if publish_results is true + id-token: write + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + + - uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + with: + results_file: results.sarif + results_format: sarif + publish_results: true + + # Upload the results as artifacts (optional). Commenting out will disable + # uploads of run results in SARIF format to the repository Actions tab. + # https://docs.github.com/en/actions/advanced-guides/storing-workflow-data-as-artifacts + - name: "Upload artifact" + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: SARIF file + path: results.sarif + retention-days: 5 + + # Upload the results to GitHub's code scanning dashboard (optional). + # Commenting out will disable upload of results to your repo's Code Scanning dashboard + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 + with: + sarif_file: results.sarif diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/project_management_comment.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/project_management_comment.yml new file mode 100644 index 000000000..6e64cf0aa --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/project_management_comment.yml @@ -0,0 +1,28 @@ + +name: Add comment +on: + issues: + types: + - labeled +permissions: + contents: read + +jobs: + add-comment: + if: github.event.label.name == 'help wanted' + runs-on: ubuntu-latest + permissions: + issues: write + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - name: Add comment + uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 + with: + issue-number: ${{ github.event.issue.number }} + body: | + This issue is available for anyone to work on. **Make sure to reference this issue in your pull request.** + :sparkles: Thank you for your contribution! :sparkles: diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/project_management_issue_open.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/project_management_issue_open.yml new file mode 100644 index 000000000..14750d1a6 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/project_management_issue_open.yml @@ -0,0 +1,29 @@ +name: OpenTelemetry-cpp project +on: + issues: + types: + - reopened + - opened +permissions: + contents: read + +jobs: + label_issues: + runs-on: ubuntu-latest + permissions: + issues: write + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@0634a2670c59f64b4a01f0f96f84700a4088b9f0 # v2.12.0 + with: + egress-policy: audit + + - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["needs-triage"] + }) diff --git a/ext/opentelemetry-cpp-1.21.0/.github/workflows/stale.yml b/ext/opentelemetry-cpp-1.21.0/.github/workflows/stale.yml new file mode 100644 index 000000000..931403638 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.github/workflows/stale.yml @@ -0,0 +1,21 @@ +name: "Mark stale issues" +on: + schedule: + - cron: "30 1 * * *" + +permissions: + contents: read + +jobs: + stale: + permissions: + issues: write # for actions/stale to close stale issues + pull-requests: write # for actions/stale to close stale PRs + runs-on: ubuntu-latest + steps: + - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 + with: + stale-issue-message: "This issue was marked as stale due to lack of activity." + days-before-issue-stale: 60 + days-before-close: -1 + exempt-issue-labels: "do-not-stale" diff --git a/ext/opentelemetry-cpp-1.21.0/.gitignore b/ext/opentelemetry-cpp-1.21.0/.gitignore new file mode 100644 index 000000000..580df5ac3 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.gitignore @@ -0,0 +1,84 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +# Ref. https://github.com/github/gitignore/blob/master/C%2B%2B.gitignore +# Prerequisites +*.d + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll +*.pdb + +# Fortran module files +*.mod +*.smod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app + +# Bazel files +MODULE.bazel.lock +/bazel-* + +# Mac +.DS_Store + +# Output directories +/out +/out.* +# Indicator that the tools were deployed +.buildtools + +# Temporary Bazel directories +/bazel-* +/plugin +/build + +tags +.cache/clangd/* + +# Temporary dir used when generating semconv +buildscripts/semantic-convention/tmp-semconv/ + +# Generated cert keys in functional tests +functional/cert/ca.csr +functional/cert/ca.pem +functional/cert/ca-key.pem +functional/cert/client_cert.csr +functional/cert/client_cert.pem +functional/cert/client_cert-key.pem +functional/cert/server_cert.csr +functional/cert/server_cert.pem +functional/cert/server_cert-key.pem +functional/cert/ca_b.csr +functional/cert/ca_b.pem +functional/cert/ca_b-key.pem +functional/cert/client_cert_b.csr +functional/cert/client_cert_b.pem +functional/cert/client_cert_b-key.pem +functional/cert/server_cert_b.csr +functional/cert/server_cert_b.pem +functional/cert/server_cert_b-key.pem +functional/cert/unreadable.pem + +localinstall \ No newline at end of file diff --git a/ext/opentelemetry-cpp-1.21.0/.gitmodules b/ext/opentelemetry-cpp-1.21.0/.gitmodules new file mode 100644 index 000000000..88a78e5f9 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.gitmodules @@ -0,0 +1,39 @@ +[submodule "third_party/prometheus-cpp"] +path = third_party/prometheus-cpp +url = https://github.com/jupp0r/prometheus-cpp +branch = master + +[submodule "tools/vcpkg"] +path = tools/vcpkg +url = https://github.com/Microsoft/vcpkg +branch = master + +[submodule "third_party/ms-gsl"] +path = third_party/ms-gsl +url = https://github.com/microsoft/GSL +branch = main + +[submodule "third_party/googletest"] +path = third_party/googletest +url = https://github.com/google/googletest +branch = main + +[submodule "third_party/benchmark"] +path = third_party/benchmark +url = https://github.com/google/benchmark +branch = main + +[submodule "third_party/opentelemetry-proto"] +path = third_party/opentelemetry-proto +url = https://github.com/open-telemetry/opentelemetry-proto +branch = main + +[submodule "third_party/nlohmann-json"] +path = third_party/nlohmann-json +url = https://github.com/nlohmann/json +branch = master + +[submodule "third_party/opentracing-cpp"] +path = third_party/opentracing-cpp +url = https://github.com/opentracing/opentracing-cpp.git +branch = master diff --git a/ext/opentelemetry-cpp-1.21.0/.iwyu.imp b/ext/opentelemetry-cpp-1.21.0/.iwyu.imp new file mode 100644 index 000000000..123e72f32 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.iwyu.imp @@ -0,0 +1,31 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +# include-what-you-use mapping file + +[ + # Work around for C++ STL + { "include": ["", "private", "", "public"] }, + { "include": ["", "private", "", "public"] }, + { "include": ["", "private", "", "public"] }, + { "include": ["", "private", "", "public"] }, + { "include": ["", "private", "", "public"] }, + + # Local opentelemetry-cpp style + + # We prefer to include for simplicity + { "include": ["", "private", "", "public"] }, + { "include": ["", "private", "", "public"] }, + { "include": ["", "private", "", "public"] }, + { "include": ["", "private", "", "public"] }, + { "include": ["", "private", "", "public"] }, + { "include": ["", "private", "", "public"] }, + + # We prefer to include for simplicity + { "include": ["", "private", "", "public"] }, + { "include": ["", "private", "", "public"] }, + + # We prefer to include for simplicity + { "include": ["", "private", "", "public"] }, +] + diff --git a/ext/opentelemetry-cpp-1.21.0/.markdownlint.json b/ext/opentelemetry-cpp-1.21.0/.markdownlint.json new file mode 100644 index 000000000..48c2ae2a3 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.markdownlint.json @@ -0,0 +1,8 @@ +{ + "default": true, + "MD013": + { + "code_blocks": false, + "tables": false + } +} diff --git a/ext/opentelemetry-cpp-1.21.0/.markdownlintignore b/ext/opentelemetry-cpp-1.21.0/.markdownlintignore new file mode 100644 index 000000000..f0cec48db --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/.markdownlintignore @@ -0,0 +1,3 @@ +third_party/** +tools/** +examples/otlp/README.md diff --git a/ext/opentelemetry-cpp-1.21.0/CHANGELOG.md b/ext/opentelemetry-cpp-1.21.0/CHANGELOG.md new file mode 100644 index 000000000..30c94679e --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/CHANGELOG.md @@ -0,0 +1,2816 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Guideline to update the version + +Increment the: + +* MAJOR version when you make incompatible API/ABI changes, +* MINOR version when you add functionality in a backwards compatible manner, and +* PATCH version when you make backwards compatible bug fixes. + +## [Unreleased] + +## [1.21 2025-05-28] + +* [BUILD] Remove WITH_ABSEIL + [#3318](https://github.com/open-telemetry/opentelemetry-cpp/pull/3318) + +* [INSTALL] Add CMake components to the opentelemetry-cpp package + [#3320](https://github.com/open-telemetry/opentelemetry-cpp/pull/3220) + +* [CI] Harden GitHub Actions + [#3338](https://github.com/open-telemetry/opentelemetry-cpp/pull/3338) + +* [StepSecurity] Harden GibHub Actions, part 2 + [#3340](https://github.com/open-telemetry/opentelemetry-cpp/pull/3340) + +* Bump github/codeql-action from 3.28.12 to 3.28.13 + [#3341](https://github.com/open-telemetry/opentelemetry-cpp/pull/3341) + +* [DEVCONTAINER] expose cmake version setting as docker arg and environment variable + [#3347](https://github.com/open-telemetry/opentelemetry-cpp/pull/3347) + +* [CI] disable bzip2 in conan builds + [#3352](https://github.com/open-telemetry/opentelemetry-cpp/pull/3352) + +* [SEMANTIC CONVENTIONS] Upgrade semantic conventions to 1.32.0 + [#3351](https://github.com/open-telemetry/opentelemetry-cpp/pull/3351) + +* Bump github/codeql-action from 3.28.13 to 3.28.15 + [#3353](https://github.com/open-telemetry/opentelemetry-cpp/pull/3353) + +* [CMAKE] bump cmake minimum required version to 3.14 + [#3349](https://github.com/open-telemetry/opentelemetry-cpp/pull/3349) + +* Bump codecov/codecov-action from 5.4.0 to 5.4.2 + [#3362](https://github.com/open-telemetry/opentelemetry-cpp/pull/3362) + +* [DOC] Fix documentation tags in logger API + [#3371](https://github.com/open-telemetry/opentelemetry-cpp/pull/3371) + +* [CI] fix artifacts download/upload + [#3369](https://github.com/open-telemetry/opentelemetry-cpp/pull/3369) + +* [API] Add Enabled method to Tracer + [#3357](https://github.com/open-telemetry/opentelemetry-cpp/pull/3357) + +* [BUILD] Fixes warnings of ciso646 in C++17 + [#3360](https://github.com/open-telemetry/opentelemetry-cpp/pull/3360) + +* Bump github/codeql-action from 3.28.15 to 3.28.16 + [#3377](https://github.com/open-telemetry/opentelemetry-cpp/pull/3377) + +* Bump step-security/harden-runner from 2.11.1 to 2.12.0 + [#3373](https://github.com/open-telemetry/opentelemetry-cpp/pull/3373) + +* Bump docker/build-push-action from 6.15.0 to 6.16.0 + [#3382](https://github.com/open-telemetry/opentelemetry-cpp/pull/3382) + +* Bump actions/download-artifact from 4.2.1 to 4.3.0 + [#3381](https://github.com/open-telemetry/opentelemetry-cpp/pull/3381) + +* [CI] Harden Github actions - pinned-dependencies (part -1) + [#3380](https://github.com/open-telemetry/opentelemetry-cpp/pull/3380) + +* [StepSecurity] ci: Harden GitHub Actions + [#3378](https://github.com/open-telemetry/opentelemetry-cpp/pull/3378) + +* [SDK] Base2 exponential histogram aggregation + [#3346](https://github.com/open-telemetry/opentelemetry-cpp/pull/3346) + +* [StepSecurity] ci: Harden GitHub Actions + [#3379](https://github.com/open-telemetry/opentelemetry-cpp/pull/3379) + +* [BUILD] Fixes glibc++ 5 checking + [#3355](https://github.com/open-telemetry/opentelemetry-cpp/pull/3355) + +* [TEST] Add stress test for histogram metric for multiple threads validation + [#3388](https://github.com/open-telemetry/opentelemetry-cpp/pull/3388) + +* Bump github/codeql-action from 3.28.16 to 3.28.17 + [#3389](https://github.com/open-telemetry/opentelemetry-cpp/pull/3389) + +* [SDK] Optimize PeriodicExportingMetricReader Thread Usage + [#3383](https://github.com/open-telemetry/opentelemetry-cpp/pull/3383) + +* [Metrics SDK] Use nostd::function_ref in AttributesHashMap + [#3393](https://github.com/open-telemetry/opentelemetry-cpp/pull/3393) + +* [SDK] support aggregation of identical instruments + [#3358](https://github.com/open-telemetry/opentelemetry-cpp/pull/3358) + +* [BUILD] Fixes unused var + [#3397](https://github.com/open-telemetry/opentelemetry-cpp/pull/3397) + +* [INSTALL] Unify cmake install functions and dynamically set component dependencies + [#3368](https://github.com/open-telemetry/opentelemetry-cpp/pull/3368) + +* [BUILD] Upgrade nlohmann_json to 3.12.0 + [#3406](https://github.com/open-telemetry/opentelemetry-cpp/pull/3406) + +* [BUILD] Upgrade opentelemetry-proto to 1.6.0 + [#3407](https://github.com/open-telemetry/opentelemetry-cpp/pull/3407) + +* [CMAKE] add generated protobuf headers to the opentelemetry_proto target + [#3400](https://github.com/open-telemetry/opentelemetry-cpp/pull/3400) + +* [MERGE] Fix accidental rollback of nlohmann-json submodule + [#3415](https://github.com/open-telemetry/opentelemetry-cpp/pull/3415) + +* Bump fossas/fossa-action from 1.6.0 to 1.7.0 + [#3414](https://github.com/open-telemetry/opentelemetry-cpp/pull/3414) + +* Bump docker/build-push-action from 6.16.0 to 6.17.0 + [#3420](https://github.com/open-telemetry/opentelemetry-cpp/pull/3420) + +* Bump codecov/codecov-action from 5.4.2 to 5.4.3 + [#3419](https://github.com/open-telemetry/opentelemetry-cpp/pull/3419) + +* [SEMANTIC CONVENTIONS] Upgrade semantic conventions to 1.33 + [#3416](https://github.com/open-telemetry/opentelemetry-cpp/pull/3416) + +* [DOCS] update the INSTALL guide on cmake components + [#3422](https://github.com/open-telemetry/opentelemetry-cpp/pull/3422) + +* Bump github/codeql-action from 3.28.17 to 3.28.18 + [#3423](https://github.com/open-telemetry/opentelemetry-cpp/pull/3423) + +* [CMAKE] update cmake files in examples directory + [#3421](https://github.com/open-telemetry/opentelemetry-cpp/pull/3421) + +* [SDK] Fix Base2ExponentialHistogramAggregation Merge with empty buckets + [#3425](https://github.com/open-telemetry/opentelemetry-cpp/pull/3425) + +* [SDK] Fix MetricProducer interface + [#3413](https://github.com/open-telemetry/opentelemetry-cpp/pull/3413) + +* [CMAKE] remove global include_directories usage and rely on target properties + [#3426](https://github.com/open-telemetry/opentelemetry-cpp/pull/3426) + +* [BUILD] remove unused WITH_CURL build flag + [#3429](https://github.com/open-telemetry/opentelemetry-cpp/pull/3429) + +* [SEMANTIC CONVENTIONS] Upgrade to semantic conventions 1.34.0 + [#3428](https://github.com/open-telemetry/opentelemetry-cpp/pull/3428) + +* [EXPORTER] ostream log exporter, fix memory ownership issues + [#3417](https://github.com/open-telemetry/opentelemetry-cpp/pull/3417) + +* [TEST] add all components to the cmake fetch content test + [#3433](https://github.com/open-telemetry/opentelemetry-cpp/pull/3433) + +* [BUILD] Error out when building DLL without MSVC + [#3438](https://github.com/open-telemetry/opentelemetry-cpp/pull/3438) + +* [BUILD] Add missing CMake keyword for target_link_libraries + [#3442](https://github.com/open-telemetry/opentelemetry-cpp/pull/3442) + +* [CMAKE] Remove third-party version mismatch warning + [#3432](https://github.com/open-telemetry/opentelemetry-cpp/pull/3432) + +* Bump docker/build-push-action from 6.17.0 to 6.18.0 + [#3446](https://github.com/open-telemetry/opentelemetry-cpp/pull/3446) + +* [SEMANTIC CONVENTIONS] Fix comment style to preserve markup. + [#3444](https://github.com/open-telemetry/opentelemetry-cpp/pull/3444) + +* [EXPORTER] support unix sockets in grpc client + [#3410](https://github.com/open-telemetry/opentelemetry-cpp/pull/3410) + +* [BUILD] Propagate INTERFACE_COMPILE_DEFINITIONS from API through common_foo_library + [#3440](https://github.com/open-telemetry/opentelemetry-cpp/pull/3440) + +New Features: + +* [SDK] Base2 exponential histogram aggregation + [#3346](https://github.com/open-telemetry/opentelemetry-cpp/pull/3346) + + * Add base2 exponential histogram aggregation. Includes a new aggregation type, + ostream exporter, and otlp/grpc exporter. Updated histogram aggregation and + benchmark tests. + +Important changes: + +* [EXPORTER] ostream log exporter, fixed memory ownership issues + [#3417](https://github.com/open-telemetry/opentelemetry-cpp/pull/3417) + + * In the SDK, the following classes implementation has changed: + + * opentelemetry::sdk::logs::ReadableLogRecord + * opentelemetry::sdk::logs::ReadWriteLogRecord + + * An application implementing a custom log record exporter, + that reuses these classes from the opentelemetry-cpp SDK, + will need code adjustments, in particular for methods: + + * GetBody() + * GetAttributes() + + * Applications not using these SDK classes directly are not affected. + +* [BUILD] Remove WITH_ABSEIL + [#3318](https://github.com/open-telemetry/opentelemetry-cpp/pull/3318) + + * The build option `WITH_ABSEIL` is no longer used, and opentelemetry-cpp + will no longer use any release of abseil provided externally, + for its own use. + + * Instead, opentelemetry-cpp will only use an internal abseil version. + + * This change resolves long standing binary integrity issues, + that occurred in the past when mixing several versions of abseil + in the build. + +## [1.20 2025-04-01] + +* [BUILD] Update opentelemetry-proto version + [#3254](https://github.com/open-telemetry/opentelemetry-cpp/pull/3254) + +* [BUILD] Build break with CURL 7.29.0 + [#3255](https://github.com/open-telemetry/opentelemetry-cpp/pull/3255) + +* [SEMANTIC CONVENTIONS] Upgrade to semantic conventions 1.30.0 + [#3258](https://github.com/open-telemetry/opentelemetry-cpp/pull/3258) + +* [SDK] Add tracer scope configurator + [#3137](https://github.com/open-telemetry/opentelemetry-cpp/pull/3137) + +* [DOC] Add document and example for sharing gRPC Client + [#3260](https://github.com/open-telemetry/opentelemetry-cpp/pull/3260) + +* [SDK] Fix BatchLogRecordProcessor to instrument shutdown + [#3262](https://github.com/open-telemetry/opentelemetry-cpp/pull/3262) + +* [SDK] Support OTEL_SDK_DISABLED environment variable + [#3245](https://github.com/open-telemetry/opentelemetry-cpp/pull/3245) + +* [CI] OTLP in Windows builds + [#3263](https://github.com/open-telemetry/opentelemetry-cpp/pull/3263) + +* [BUILD] Fixes compatibility of type_traits + [#3274](https://github.com/open-telemetry/opentelemetry-cpp/pull/3274) + +* [BUILD] Fix compilation with Regex being disabled + [#3276](https://github.com/open-telemetry/opentelemetry-cpp/pull/3276) + +* [EXPORTER] Support exporting event_name using OTLP Exporter + [#3277](https://github.com/open-telemetry/opentelemetry-cpp/pull/3277) + +* [CI] Add FOSSA scanning workflow + [#3279](https://github.com/open-telemetry/opentelemetry-cpp/pull/3279) + +* [BUILD] Adding typecast without whom c++latest build fails + [#3281](https://github.com/open-telemetry/opentelemetry-cpp/pull/3281) + +* [ADMIN] Add FOSSA badges + [#3280](https://github.com/open-telemetry/opentelemetry-cpp/pull/3280) + +* [BUILD] Fix compiling problems with abiv2 and MSVC + [#3284](https://github.com/open-telemetry/opentelemetry-cpp/pull/3284) + +* [BUILD] Enable old behavior of CMP0092 + [#3269](https://github.com/open-telemetry/opentelemetry-cpp/pull/3269) + +* [SDK] Add meter scope configurator + [#3268](https://github.com/open-telemetry/opentelemetry-cpp/pull/3268) + +* [DEVCONTAINER] Support customization and run as non-root user + [#3270](https://github.com/open-telemetry/opentelemetry-cpp/pull/3270) + +* [ETW] Add configuration to export 64-bit integer as timestamp + [#3286](https://github.com/open-telemetry/opentelemetry-cpp/pull/3286) + +* [API] Deprecate event logger + [#3285](https://github.com/open-telemetry/opentelemetry-cpp/pull/3285) + +* [BUILD] Add link directory to support curl 8.12 + [#3272](https://github.com/open-telemetry/opentelemetry-cpp/pull/3272) + +* [API] Change the param-pack unpacking order to start from left to right + [#3296](https://github.com/open-telemetry/opentelemetry-cpp/pull/3296) + +* [SDK] Implement spec: MetricFilter + [#3235](https://github.com/open-telemetry/opentelemetry-cpp/pull/3235) + +* [SEMANTIC CONVENTIONS] Upgrade semantic conventions to 1.31.0 + [#3297](https://github.com/open-telemetry/opentelemetry-cpp/pull/3297) + +* [SDK] Add logger scope configurator + [#3282](https://github.com/open-telemetry/opentelemetry-cpp/pull/3282) + +* [EXAMPLE] fix buffer overrun in the gRPC sample project + [#3304](https://github.com/open-telemetry/opentelemetry-cpp/pull/3304) + +* [CI] Bump fossas/fossa-action from 1.5.0 to 1.6.0 + [#3305](https://github.com/open-telemetry/opentelemetry-cpp/pull/3305) + +* [TEST] fix segfault in singleton test with cmake on macos-latest + [#3316](https://github.com/open-telemetry/opentelemetry-cpp/pull/3316) + +* [TEST] fix test failure with elasticsearch exporter on cxx20 + [#3308](https://github.com/open-telemetry/opentelemetry-cpp/pull/3308) + +* [TEST] otlp grpc exporter retry test fix + [#3311](https://github.com/open-telemetry/opentelemetry-cpp/pull/3311) + +* [SDK] Use OPENTELEMETRY_EXPORT and static local variables + [#3314](https://github.com/open-telemetry/opentelemetry-cpp/pull/3314) + +* [BUILD] Fix elasticsearch exporter json compatibility + [#3313](https://github.com/open-telemetry/opentelemetry-cpp/pull/3313) + +* [BUILD] Fix missing exported definition for OTLP file exporter and forceflush + [#3319](https://github.com/open-telemetry/opentelemetry-cpp/pull/3319) + +* [BUILD] Remove gRPC header including in OtlpGrpcClientFactory + [#3321](https://github.com/open-telemetry/opentelemetry-cpp/pull/3321) + +* [ADMIN] Add Pranav Sharma in cpp-approvers + [#3323](https://github.com/open-telemetry/opentelemetry-cpp/pull/3323) + +* [DEVCONTAINER] fix grpc install + [#3325](https://github.com/open-telemetry/opentelemetry-cpp/pull/3325) + +* [ADMIN] Add dbarker to approvers + [#3331](https://github.com/open-telemetry/opentelemetry-cpp/pull/3331) + +* [CI] Upgrade CI to ubuntu 22.04 + [#3330](https://github.com/open-telemetry/opentelemetry-cpp/pull/3330) + +* [CI] Add ossf-scorecard scanning workflow + [#3332](https://github.com/open-telemetry/opentelemetry-cpp/pull/3332) + +* [CI] pin cmake in ci and devcontainer + [#3336](https://github.com/open-telemetry/opentelemetry-cpp/pull/3336) + +* [METRICS SDK] Fix hash collision in MetricAttributes + [#3322](https://github.com/open-telemetry/opentelemetry-cpp/pull/3322) + +Important changes: + +* [SDK] Support OTEL_SDK_DISABLED environment variable + [#3245](https://github.com/open-telemetry/opentelemetry-cpp/pull/3245) + + * The SDK now exposes the following new methods: + + * opentelemetry::sdk::trace::Provider::SetTracerProvider() + * opentelemetry::sdk::metrics::Provider::SetMeterProvider() + * opentelemetry::sdk::logs::Provider::SetLoggerProvider() + + * These methods do support the `OTEL_SDK_DISABLED` environment variable, + unlike the corresponding existing API Provider classes. + + * Applications are encouraged to migrate from the API to the SDK + `Provider` classes, to benefit from this feature. + + * All the example code has been updated to reflect the new usage. + +## [1.19 2025-01-22] + +* [PROMETHEUS_EXPORTER] Fix default for emitting otel_scope attributes + [#3171](https://github.com/open-telemetry/opentelemetry-cpp/pull/3171) + +* [Code health] Include what you use cleanup, part 5 + [#3140](https://github.com/open-telemetry/opentelemetry-cpp/pull/3140) + +* [BUILD] Upgrade cmake + [#3167](https://github.com/open-telemetry/opentelemetry-cpp/pull/3167) + +* [SHIM] Fix string_view mappings between OT and OTel + [#3181](https://github.com/open-telemetry/opentelemetry-cpp/pull/3181) + +* [EXPORTER] Refactor ElasticSearchRecordable + [#3164](https://github.com/open-telemetry/opentelemetry-cpp/pull/3164) + +* [SEMANTIC CONVENTIONS] Upgrade to semantic conventions 1.29.0 + [#3182](https://github.com/open-telemetry/opentelemetry-cpp/pull/3182) + +* [BUILD] Fix cross-compilation with protoc + [#3186](https://github.com/open-telemetry/opentelemetry-cpp/pull/3186) + +* [Code health] Perform cppcheck cleanup + [#3150](https://github.com/open-telemetry/opentelemetry-cpp/pull/3150) + +* [EXPORTER] add instrumentation scope attributes + to otlp proto messages for traces and metrics + [#3185](https://github.com/open-telemetry/opentelemetry-cpp/pull/3185) + +* [SDK] Tracer provider shutdown blocks in-definitively + [#3191](https://github.com/open-telemetry/opentelemetry-cpp/pull/3191) + +* [SEMANTIC CONVENTIONS] Upgrade to weaver 0.11.0 + [#3194](https://github.com/open-telemetry/opentelemetry-cpp/pull/3194) + +* [DOC] Update existing maintaining dependencies doc + [#3195](https://github.com/open-telemetry/opentelemetry-cpp/pull/3195) + +* [TEST] Change is_called_ and got_response_ to use atomic + [#3204](https://github.com/open-telemetry/opentelemetry-cpp/pull/3204) + +* [SEMANTIC CONVENTIONS] update links to openmetrics to reference the v1.0.0 release + [#3205](https://github.com/open-telemetry/opentelemetry-cpp/pull/3205) + +* [CI] Fix CI on ubuntu-latest + [#3207](https://github.com/open-telemetry/opentelemetry-cpp/pull/3207) + +* [BUILD] Build break using protoc 3.14 + [#3211](https://github.com/open-telemetry/opentelemetry-cpp/pull/3211) + +* [TEST] Build the singleton test on windows + [#3183](https://github.com/open-telemetry/opentelemetry-cpp/pull/3183) + +* [BUILD] Add cxx feature detections + [#3203](https://github.com/open-telemetry/opentelemetry-cpp/pull/3203) + +* [SDK] Do not frequently create and destroy http client threads + [#3198](https://github.com/open-telemetry/opentelemetry-cpp/pull/3198) + +* [EXPORTER] Optimize OTLP HTTP compression + [#3178](https://github.com/open-telemetry/opentelemetry-cpp/pull/3178) + +* [SDK] Fix include instrumentation scope attributes in equal method + [#3214](https://github.com/open-telemetry/opentelemetry-cpp/pull/3214) + +* Upgrade to opentelemetry-proto 1.5.0 + [#3210](https://github.com/open-telemetry/opentelemetry-cpp/pull/3210) + +* [TEST] Added support for SELINUX in functional tests + [#3212](https://github.com/open-telemetry/opentelemetry-cpp/pull/3212) + +* [EDITORIAL] fix changelog entry for PR 3185 + [#3217](https://github.com/open-telemetry/opentelemetry-cpp/pull/3217) + +* [TEST] Functional tests for OTLP/gRPC with mutual TLS + [#3227](https://github.com/open-telemetry/opentelemetry-cpp/pull/3227) + +* [SEMCONV] Metrics are incorrectly prefixed with 'metric' + [#3228](https://github.com/open-telemetry/opentelemetry-cpp/pull/3228) + +* [BUILD] Add OTLP/file exporter for dll and examples + [#3231](https://github.com/open-telemetry/opentelemetry-cpp/pull/3231) + +* [Code Health] Include what you use, part 6 + [#3216](https://github.com/open-telemetry/opentelemetry-cpp/pull/3216) + +* [CI] Spurious test failures + [#3233](https://github.com/open-telemetry/opentelemetry-cpp/pull/3233) + +* [BUILD] Fix error ‘uint8_t’ does not name a type with gcc-15 + [#3240](https://github.com/open-telemetry/opentelemetry-cpp/pull/3240) + +* [EXPORTER] fix throw in OtlpGrpcMetricExporter with shared grpc client + [#3243](https://github.com/open-telemetry/opentelemetry-cpp/pull/3243) + +* [SDK] Better control of threads executed by opentelemetry-cpp + [#3175](https://github.com/open-telemetry/opentelemetry-cpp/pull/3175) + +* [Code Health] Include what you use, part 7 + [#3238](https://github.com/open-telemetry/opentelemetry-cpp/pull/3238) + +* [SDK] Fix lifetime of GlobalLogHandler + [#3221](https://github.com/open-telemetry/opentelemetry-cpp/pull/3221) + +* [MAINTAINER] Add devcontainer + [#3123](https://github.com/open-telemetry/opentelemetry-cpp/pull/3123) + +* [SDK] enable deriving from ResourceDetector to create a Resource + [#3247](https://github.com/open-telemetry/opentelemetry-cpp/pull/3247) + +* [EXPORTER] Support handling retry-able errors for OTLP/HTTP + [#3223](https://github.com/open-telemetry/opentelemetry-cpp/pull/3223) + +* [CI] Add GRPC in maintainer CI + [#3248](https://github.com/open-telemetry/opentelemetry-cpp/pull/3248) + +* [EXPORTER] Support handling retry-able errors for OTLP/gRPC + [#3219](https://github.com/open-telemetry/opentelemetry-cpp/pull/3219) + +* [SDK] Optimize Metric Processing for Single Collector with Delta Temporality + [#3236](https://github.com/open-telemetry/opentelemetry-cpp/pull/3236) + +New features: + +* [SDK] Better control of threads executed by opentelemetry-cpp + [#3175](https://github.com/open-telemetry/opentelemetry-cpp/pull/3175) + + * This feature provides a way for applications, + when configuring the SDK and exporters, + to participate in the execution path + of internal opentelemetry-cpp threads. + + * The opentelemetry-cpp library provides the following: + + * a new ThreadInstrumentation interface, + * new runtime options structures, to optionally configure the SDK: + * BatchSpanProcessorRuntimeOptions + * PeriodicExportingMetricReaderRuntimeOptions + * BatchLogRecordProcessorRuntimeOptions + * new runtime options structures, + to optionally configure the OTLP HTTP exporters: + * OtlpHttpExporterRuntimeOptions + * OtlpHttpMetricExporterRuntimeOptions + * OtlpHttpLogRecordExporterRuntimeOptions + * new ThreadInstrumentation parameters, + to optionally configure the CURL HttpClient + * new runtime options structures, + to optionally configure the OTLP FILE exporters: + * OtlpFileExporterRuntimeOptions + * OtlpFileMetricExporterRuntimeOptions + * OtlpFileLogRecordExporterRuntimeOptions + * new runtime options structure, + to optionally configure the OTLP FILE client: + * OtlpFileClientRuntimeOptions + + * Using the optional runtime options structures, + an application can subclass the ThreadInstrumentation interface, + and be notified of specific events of interest during the execution + of an internal opentelemetry-cpp thread. + + * This allows an application to call, for example: + + * pthread_setaffinity_np(), for better performances, + * setns(), to control the network namespace used by HTTP CURL connections + * pthread_setname_np(), for better observability from the operating system + * many more specific apis, as needed + + * See the documentation for ThreadInstrumentation for details. + + * A new example program, example_otlp_instrumented_http, + shows how to use the feature, + and add application logic in the thread execution code path. + + * Note that this feature is experimental, + protected by a WITH_THREAD_INSTRUMENTATION_PREVIEW + flag in CMake. Various runtime options structures, + as well as the thread instrumentation interface, + may change without notice before this feature is declared stable. + +* [EXPORTER] Support handling retry-able errors for OTLP/HTTP + [#3223](https://github.com/open-telemetry/opentelemetry-cpp/pull/3223) + + * This feature is experimental, + protected by a WITH_OTLP_RETRY_PREVIEW + flag in CMake. + +* [EXPORTER] Support handling retry-able errors for OTLP/gRPC + [#3219](https://github.com/open-telemetry/opentelemetry-cpp/pull/3219) + + * This feature is experimental, + protected by a WITH_OTLP_RETRY_PREVIEW + flag in CMake. + +## [1.18 2024-11-25] + +* [EXPORTER] Fix crash in ElasticsearchLogRecordExporter + [#3082](https://github.com/open-telemetry/opentelemetry-cpp/pull/3082) + +* [BUILD] Avoid buggy warning with gcc <= 8 + [#3087](https://github.com/open-telemetry/opentelemetry-cpp/pull/3087) + +* [API] Jaeger Propagator should not be deprecated + [#3086](https://github.com/open-telemetry/opentelemetry-cpp/pull/3086) + +* Update bzlmod version + [#3093](https://github.com/open-telemetry/opentelemetry-cpp/pull/3093) + +* [BUILD] Remove std::make_unique + [#3098](https://github.com/open-telemetry/opentelemetry-cpp/pull/3098) + +* [BUILD] Fix compiling problems for gcc 4.8 + [#3100](https://github.com/open-telemetry/opentelemetry-cpp/pull/3100) + +* [TEST] Fix linking order and gmock linking + [#3106](https://github.com/open-telemetry/opentelemetry-cpp/pull/3106) + +* [EXPORTER] Add config options to prometheus exporter + [#3104](https://github.com/open-telemetry/opentelemetry-cpp/pull/3104) + +* [BUILD] Add a CMake option to disable shared libs + [#3095](https://github.com/open-telemetry/opentelemetry-cpp/pull/3095) + +* [EXPORTER] Remove out of date ETW exporter doc + [#3103](https://github.com/open-telemetry/opentelemetry-cpp/pull/3103) + +* [EXPORTER] Add logging for async gRPC errors + [#3108](https://github.com/open-telemetry/opentelemetry-cpp/pull/3108) + +* [BUILD] Remove aligned_storage from nostd + [#3112](https://github.com/open-telemetry/opentelemetry-cpp/pull/3112) + +* [EXPORTER] Elastic Search exporter follow ECS guidelines + [#3107](https://github.com/open-telemetry/opentelemetry-cpp/pull/3107) + +* [INSTALL] Resolve dependencies in opentelemetry-cpp-config.cmake + [#3094](https://github.com/open-telemetry/opentelemetry-cpp/pull/3094) + +* [API] Add synchronous gauge + [#3029](https://github.com/open-telemetry/opentelemetry-cpp/pull/3029) + +* [BUILD] allow building with -DWITH_OTLP_HTTP_COMPRESSION=OFF without zlib + [#3120](https://github.com/open-telemetry/opentelemetry-cpp/pull/3120) + +* [CI] Comment the arm64 CI + [#3125](https://github.com/open-telemetry/opentelemetry-cpp/pull/3125) + +* [API] Comply with W3C Trace Context + [#3115](https://github.com/open-telemetry/opentelemetry-cpp/pull/3115) + +* [EXPORTER] bump prometheus to v1.3.0 + [#3122](https://github.com/open-telemetry/opentelemetry-cpp/pull/3122) + +* [EXPORTER] Log SSL Connection Information + [#3113](https://github.com/open-telemetry/opentelemetry-cpp/pull/3113) + +* [BUILD] Improve how to handle yield() in ARM + [#3129](https://github.com/open-telemetry/opentelemetry-cpp/pull/3129) + +* [BUILD] Fix -Wmissing-template-arg-list-after-template-kw warning + [#3133](https://github.com/open-telemetry/opentelemetry-cpp/pull/3133) + +* [EXPORTER]: Elasticsearch exporter put log resource in root instead of under 'resources' + [#3131](https://github.com/open-telemetry/opentelemetry-cpp/pull/3131) + +* [TEST] Rename w3c_tracecontext_test to w3c_tracecontext_http_test_server + [#3132](https://github.com/open-telemetry/opentelemetry-cpp/pull/3132) + +* [BUILD] Patches for building on AIX + [#3127](https://github.com/open-telemetry/opentelemetry-cpp/pull/3127) + +* [SEMANTIC CONVENTIONS] Migration to weaver + [#3105](https://github.com/open-telemetry/opentelemetry-cpp/pull/3105) + +* [SEMANTIC CONVENTIONS] Upgrade to semantic conventions 1.28.0 + [#3139](https://github.com/open-telemetry/opentelemetry-cpp/pull/3139) + +* [EXPORTER] handling of invalid ports in UrlParser + [#3142](https://github.com/open-telemetry/opentelemetry-cpp/pull/3142) + +* [CI] speed up clang-tidy workflow + [#3148](https://github.com/open-telemetry/opentelemetry-cpp/pull/3148) + +* [EXPORTER] Allow to share gRPC clients between OTLP exporters + [#3041](https://github.com/open-telemetry/opentelemetry-cpp/pull/3041) + +* Bump codecov/codecov-action from 4 to 5 + [#3143](https://github.com/open-telemetry/opentelemetry-cpp/pull/3143) + +* [CI] Add cppcheck in the build + [#3151](https://github.com/open-telemetry/opentelemetry-cpp/pull/3151) + +* [BUILD] Fix error message + [#3152](https://github.com/open-telemetry/opentelemetry-cpp/pull/3152) + +* [EXPORTER] fix clang-tidy warnings in UrlParser + [#3146](https://github.com/open-telemetry/opentelemetry-cpp/pull/3146) + +* [EXPORTER] Upgrade to opentelemetry-proto 1.4.0 + [#3157](https://github.com/open-telemetry/opentelemetry-cpp/pull/3157) + +* [TEST] refactor UrlParser tests to use value-paramterized tests + [#3153](https://github.com/open-telemetry/opentelemetry-cpp/pull/3153) + +* [TEST] add a test for ElasticSearchRecordable + [#3154](https://github.com/open-telemetry/opentelemetry-cpp/pull/3154) + +* [BUILD] Fix missing dependency on protoc compiler + [#3159](https://github.com/open-telemetry/opentelemetry-cpp/pull/3159) + +* [bazel] Update prometheus-cpp in MODULE.bazel + [#3162](https://github.com/open-telemetry/opentelemetry-cpp/pull/3162) + +* [bazel] Enable --incompatible_disallow_empty_glob + [#2642](https://github.com/open-telemetry/opentelemetry-cpp/pull/2642) + +* [INSTALL] Fix cmake/opentelemetry-cpp-config.cmake.in + [#3165](https://github.com/open-telemetry/opentelemetry-cpp/pull/3165) + +* [BUILD] Do not set OTELCPP_PROTO_PATH in the CMake cache + [#3160](https://github.com/open-telemetry/opentelemetry-cpp/pull/3160) + +* [BUILD] Fix build for esp32 + [#3155](https://github.com/open-telemetry/opentelemetry-cpp/pull/3155) + +* [bazel] Update opentelemetry-proto in MODULE.bazel + [#3163](https://github.com/open-telemetry/opentelemetry-cpp/pull/3163) + +Important changes: + +* [API] Jaeger Propagator should not be deprecated + [#3086](https://github.com/open-telemetry/opentelemetry-cpp/pull/3086) + + * Deprecation of the Jaeger propagator, as announced on 2023-01-31 + in version 1.8.2, is now reverted. + * This deprecation turned out to be not justified, + as the Jaeger propagator can be used without the (now removed) + Jaeger exporter. + +* [EXPORTER] Change log resources location for ElasticsearchLogRecordExporter + [#3119](https://github.com/open-telemetry/opentelemetry-cpp/pull/3131) + + * Moved from `root/resources` to `root` + +* [SEMANTIC CONVENTIONS] Migration to weaver + [#3105](https://github.com/open-telemetry/opentelemetry-cpp/pull/3105) + + * `semantic_convention.h` header files are deprecated, + replaced by `semconv/xxx_attributes.h` header files, + for each `xxx` semantic attribute group. + * See file DEPRECATED.md for details. + +Deprecations: + +* This release contains deprecations, see file DEPRECATED.md for details. + +## [1.17 2024-10-07] + +* [CI] Add a clang-tidy build + [#3001](https://github.com/open-telemetry/opentelemetry-cpp/pull/3001) + +* [BUILD] Upgrade to opentelemetry-proto 1.3.2 + [#2991](https://github.com/open-telemetry/opentelemetry-cpp/pull/2991) + +* [REMOVAL] Remove build option `WITH_DEPRECATED_SDK_FACTORY` + [#2717](https://github.com/open-telemetry/opentelemetry-cpp/pull/2717) + +* [EXPORTER] ForceFlush before canceling the running requests on shutdown + [#2727](https://github.com/open-telemetry/opentelemetry-cpp/pull/2727) + +* [SDK] Fix crash in PeriodicExportingMetricReader + [#2983](https://github.com/open-telemetry/opentelemetry-cpp/pull/2983) + +* [SDK] Fix memory leak in TlsRandomNumberGenerator() constructor + [#2661](https://github.com/open-telemetry/opentelemetry-cpp/pull/2661) + +* [EXPORTER] Ignore exception when create thread in OTLP file exporter + [#3012](https://github.com/open-telemetry/opentelemetry-cpp/pull/3012) + +* [BUILD] Update the version in MODULE.bazel + [#3015](https://github.com/open-telemetry/opentelemetry-cpp/pull/3015) + +* [BUILD] Fix build without vcpkg on Windows when gRPC is disabled + [#3016](https://github.com/open-telemetry/opentelemetry-cpp/pull/3016) + +* [BUILD] Add abi_version_no bazel flag + [#3020](https://github.com/open-telemetry/opentelemetry-cpp/pull/3020) + +* [Code health] Expand iwyu coverage to include unit tests + [#3022](https://github.com/open-telemetry/opentelemetry-cpp/pull/3022) + +* [BUILD] Version opentelemetry_proto/proto_grpc shared libraries + [#2992](https://github.com/open-telemetry/opentelemetry-cpp/pull/2992) + +* [SEMANTIC CONVENTIONS] Upgrade semantic conventions to 1.27.0 + [#3023](https://github.com/open-telemetry/opentelemetry-cpp/pull/3023) + +* [SDK] Support empty histogram buckets + [#3027](https://github.com/open-telemetry/opentelemetry-cpp/pull/3027) + +* [TEST] Fix sync problems in OTLP File exporter tests + [#3031](https://github.com/open-telemetry/opentelemetry-cpp/pull/3031) + +* [SDK] PeriodicExportingMetricReader: future is never set, blocks until timeout + [#3030](https://github.com/open-telemetry/opentelemetry-cpp/pull/3030) + +* [Code Health] Clang Tidy cleanup, Part 2 + [#3038](https://github.com/open-telemetry/opentelemetry-cpp/pull/3038) + +* [Code Health] include-what-you-use cleanup, part 3 + [#3004](https://github.com/open-telemetry/opentelemetry-cpp/pull/3004) + +* [SDK] Fix overflow in timeout logic + [#3046](https://github.com/open-telemetry/opentelemetry-cpp/pull/3046) + +* [TEST] Add missing tests to Bazel build + [#3045](https://github.com/open-telemetry/opentelemetry-cpp/pull/3045) + +* [TEST] update collector tests with debug exporter + [#3050](https://github.com/open-telemetry/opentelemetry-cpp/pull/3050) + +* [EXAMPLE] update collector example with debug exporter + [#3049](https://github.com/open-telemetry/opentelemetry-cpp/pull/3049) + +* [TEST] update references to logging exporter + [#3053](https://github.com/open-telemetry/opentelemetry-cpp/pull/3053) + +* [EXAMPLE] Clean the tracer initialization in OStream example + [#3051](https://github.com/open-telemetry/opentelemetry-cpp/pull/3051) + +* [EXPORTER] Fix the format of SpanLink for ETW + [#3054](https://github.com/open-telemetry/opentelemetry-cpp/pull/3054) + +* [EXPORTER] Add in-memory metric exporter + [#3043](https://github.com/open-telemetry/opentelemetry-cpp/pull/3043) + +* [Code Health] include-what-you-use cleanup, part 4 + [#3040](https://github.com/open-telemetry/opentelemetry-cpp/pull/3040) + +* [BUILD] add loongarch info + [#3052](https://github.com/open-telemetry/opentelemetry-cpp/pull/3052) + +* [CI] Update otel-collector version + [#3067](https://github.com/open-telemetry/opentelemetry-cpp/pull/3067) + +* [SDK] Update MetricProducer interface to match spec + [#3044](https://github.com/open-telemetry/opentelemetry-cpp/pull/3044) + +* [EXPORTER] Fix URL in ES exporter, fix ipv6 supporting for http client + [#3081](https://github.com/open-telemetry/opentelemetry-cpp/pull/3081) + +* [EXPORTER] Add HttpHeaders in ElasticsearchLogRecordExporter + [#3083](https://github.com/open-telemetry/opentelemetry-cpp/pull/3083) + +Breaking changes: + +* [REMOVAL] Remove build option `WITH_DEPRECATED_SDK_FACTORY` + [#2717](https://github.com/open-telemetry/opentelemetry-cpp/pull/2717) + + * As announced in opentelemetry-cpp previous release 1.16.0, + CMake option `WITH_DEPRECATED_SDK_FACTORY` was temporary, + and to be removed by the next release. + * This option is now removed. + * Code configuring the SDK must be adjusted, as previously described: + + * [API/SDK] Provider cleanup + [#2664](https://github.com/open-telemetry/opentelemetry-cpp/pull/2664) + + * Before this fix: + * SDK factory methods such as: + * opentelemetry::sdk::trace::TracerProviderFactory::Create() + * opentelemetry::sdk::metrics::MeterProviderFactory::Create() + * opentelemetry::sdk::logs::LoggerProviderFactory::Create() + * opentelemetry::sdk::logs::EventLoggerProviderFactory::Create() + + returned an API object (opentelemetry::trace::TracerProvider) + to the caller. + + * After this fix, these methods return an SDK level object + (opentelemetry::sdk::trace::TracerProvider) to the caller. + * Returning an SDK object is necessary for the application to + cleanup and invoke SDK level methods, such as ForceFlush(), + on a provider. + * The application code that configures the SDK, by calling + the various provider factories, may need adjustment. + * All the examples have been updated, and in particular no + longer perform static_cast do convert an API object to an SDK object. + Please refer to examples for guidance on how to adjust. + +## [1.16.1 2024-07-17] + +* [BUILD] Add bazel missing BUILD file + [#2720](https://github.com/open-telemetry/opentelemetry-cpp/pull/2720) + +* [SDK] Added reserve for spans array in BatchSpanProcessor. + [#2724](https://github.com/open-telemetry/opentelemetry-cpp/pull/2724) + +* [DOC] Update "Using triplets" section in building-with-vcpkg documentation. + [#2726](https://github.com/open-telemetry/opentelemetry-cpp/pull/2726) + +* [DOC] Remove comment for unused LoggerProvider initialization params + [#2972](https://github.com/open-telemetry/opentelemetry-cpp/pull/2972) + +* [SECURITY] Remove OTLP HTTP support for TLS 1.0 and TLS 1.1, + require TLS 1.2 or better + [#2722](https://github.com/open-telemetry/opentelemetry-cpp/pull/2722) + +* [TEST] Fix opentelemetry-collector bind address + [#2989](https://github.com/open-telemetry/opentelemetry-cpp/pull/2989) + +* [EXPORTER] Fix references in AttributeValueVisitor + [#2985](https://github.com/open-telemetry/opentelemetry-cpp/pull/2985) + +* [Code health] include-what-you-use cleanup, part 2 + [#2704](https://github.com/open-telemetry/opentelemetry-cpp/pull/2704) + +* [Code Health] clang-tidy cleanup, part 1 + [#2990](https://github.com/open-telemetry/opentelemetry-cpp/pull/2990) + +* [CI] Build failures with ABSEIL 20240116 and CMAKE 3.30 + [#3002](https://github.com/open-telemetry/opentelemetry-cpp/pull/3002) + +* [CI] Enable bzlmod + [#2995](https://github.com/open-telemetry/opentelemetry-cpp/pull/2995) + +* [Metrics SDK] Fix hash calculation for nostd::string + [#2999](https://github.com/open-telemetry/opentelemetry-cpp/pull/2999) + +Breaking changes: + +* [SECURITY] Remove OTLP HTTP support for TLS 1.0 and TLS 1.1, + require TLS 1.2 or better + [#2722](https://github.com/open-telemetry/opentelemetry-cpp/pull/2722) + * The OTLP HTTP exporter no longer accept options like: + * min_TLS = 1.0 + * min_TLS = 1.1 + * max_TLS = 1.0 + * max_TLS = 1.1 + * When connecting to an OTLP HTTP endpoint, using `https`, + the connection will require TLS 1.2 by default, + unless min_TLS is set to 1.3 + * Plain `http` connections (insecure) are not affected. + +## [1.16.0] 2024-06-21 + +* [BUILD] Upgrade bazel abseil from 20220623.1 to 20230802.2 + [#2650](https://github.com/open-telemetry/opentelemetry-cpp/pull/2650) +* [BUILD] Use nostd::enable_if_t instead of std::enable_if_t + [#2648](https://github.com/open-telemetry/opentelemetry-cpp/pull/2648) +* [EXEMPLAR] Update ExemplarFilter and ExemplarReservoir for spec + [#2372](https://github.com/open-telemetry/opentelemetry-cpp/pull/2372) +* [BUILD] Link CoreFoundation on apple systems + [#2655](https://github.com/open-telemetry/opentelemetry-cpp/pull/2655) +* [SDK] Avoid missing conditional variable update and simplify atomic bool + [#2553](https://github.com/open-telemetry/opentelemetry-cpp/pull/2553) +* [BUILD] Build break in OLTP_FILE tests + [#2659](https://github.com/open-telemetry/opentelemetry-cpp/pull/2659) +* [EXPORTER] General cleanup for is_shutdown flags in exporters. + [#2663](https://github.com/open-telemetry/opentelemetry-cpp/pull/2663) +* [CI] Upgrade Maintainers CI to ubuntu-24.04 + [#2670](https://github.com/open-telemetry/opentelemetry-cpp/pull/2670) +* [BUILD] Upgrade to opentelemetry-proto 1.3.1 + [#2669](https://github.com/open-telemetry/opentelemetry-cpp/pull/2669) +* [API] Return NoopLogRecord from NoopLogger + [#2668](https://github.com/open-telemetry/opentelemetry-cpp/pull/2668) +* [BUILD] Remove the hard-coded separator in tracestate + [#2672](https://github.com/open-telemetry/opentelemetry-cpp/pull/2672) +* [SDK] Fix forceflush may wait for ever + [#2584](https://github.com/open-telemetry/opentelemetry-cpp/pull/2584) +* [API] DO not allow unsafe Logger::EmitLogRecord + [#2673](https://github.com/open-telemetry/opentelemetry-cpp/pull/2673) +* [BUILD] Read default proto version from third_party_release + [#2677](https://github.com/open-telemetry/opentelemetry-cpp/pull/2677) +* [CI] include-what-you-use + [#2629](https://github.com/open-telemetry/opentelemetry-cpp/pull/2629) +* [CI] Upgrade to clang-format 18 + [#2684](https://github.com/open-telemetry/opentelemetry-cpp/pull/2684) +* [CI] Fix CI failures on Ubuntu 24.04 + [#2686](https://github.com/open-telemetry/opentelemetry-cpp/pull/2686) +* [SEMANTIC CONVENTIONS] Upgrade to version 1.26.0 + [#2687](https://github.com/open-telemetry/opentelemetry-cpp/pull/2687) +* [API/SDK] Provider cleanup + [#2664](https://github.com/open-telemetry/opentelemetry-cpp/pull/2664) +* [ETW] Add table name mapping for Logs other than the default Log table + [#2691](https://github.com/open-telemetry/opentelemetry-cpp/pull/2691) +* [CI] Remove benchmark overlay for vcpkg + [#2695](https://github.com/open-telemetry/opentelemetry-cpp/pull/2695) +* [BUILD] Remove the incorrect set of CMAKE_MSVC_RUNTIME_LIBRARY for vcpkg + [#2696](https://github.com/open-telemetry/opentelemetry-cpp/pull/2696) +* [BUILD] CMakeLists.txt: Enable CMAKE_MSVC_RUNTIME_LIBRARY support + [#2652](https://github.com/open-telemetry/opentelemetry-cpp/pull/2652) +* [EXPORTER] OTLP file: use thread-safe file/io + [#2675](https://github.com/open-telemetry/opentelemetry-cpp/pull/2675) +* [bazel] Bump version and deps + [#2679](https://github.com/open-telemetry/opentelemetry-cpp/pull/2679) +* [BUILD] Add support for bzlmod + [#2608](https://github.com/open-telemetry/opentelemetry-cpp/pull/2608) +* [BUILD] Fix Import Abseil-cpp + [#2701](https://github.com/open-telemetry/opentelemetry-cpp/pull/2701) +* [Code health] include-what-you-use cleanup + [#2692](https://github.com/open-telemetry/opentelemetry-cpp/pull/2692) +* [BUILD] Restore Bazel flag removed from public API + [#2702](https://github.com/open-telemetry/opentelemetry-cpp/pull/2702) +* [DOC] Fix typo tace_id -> trace_id in logger.h + [#2703](https://github.com/open-telemetry/opentelemetry-cpp/pull/2703) +* Bump docker/build-push-action from 5 to 6 + [#2705](https://github.com/open-telemetry/opentelemetry-cpp/pull/2705) +* [CI] Enable ARM64 build in CI + [#2699](https://github.com/open-telemetry/opentelemetry-cpp/pull/2699) +* [Code health] Remove Unicode Text from Source files + [#2707](https://github.com/open-telemetry/opentelemetry-cpp/pull/2707) +* [BUILD] Add option WITH_OTLP_GRPC_SSL_MTLS_PREVIEW + [#2714](https://github.com/open-telemetry/opentelemetry-cpp/pull/2714) +* [EXPORTER] All 2xx return codes should be considered successful. + [#2712](https://github.com/open-telemetry/opentelemetry-cpp/pull/2712) + +Important changes: + +* [API/SDK] Provider cleanup + [#2664](https://github.com/open-telemetry/opentelemetry-cpp/pull/2664) + * Before this fix: + * The API class `opentelemetry::trace::Tracer` exposed methods such + as `ForceFlush()`, `ForceFlushWithMicroseconds()`, `Close()` + and `CloseWithMicroseconds()`. + * These methods are meant to be used when configuring the SDK, + and should not be part of the API. Exposing them was an oversight. + * Two of these methods are virtual, and therefore part of the ABI. + * After this fix: + * In `OPENTELEMETRY_ABI_VERSION_NO 1`, nothing is changed, + because removing this code would break the ABI. + * In `OPENTELEMETRY_ABI_VERSION_NO 2`, these methods are moved + from the API to the SDK. This is a breaking change for ABI version 2, + which is still experimental. + * In all cases, instrumenting an application should not + invoke flush or close on a tracer, do not use these methods. + +Breaking changes: + +* [API/SDK] Provider cleanup + [#2664](https://github.com/open-telemetry/opentelemetry-cpp/pull/2664) + * Before this fix: + * SDK factory methods such as: + * opentelemetry::sdk::trace::TracerProviderFactory::Create() + * opentelemetry::sdk::metrics::MeterProviderFactory::Create() + * opentelemetry::sdk::logs::LoggerProviderFactory::Create() + * opentelemetry::sdk::logs::EventLoggerProviderFactory::Create() + returned an API object (opentelemetry::trace::TracerProvider) + to the caller. + * After this fix, these methods return an SDK level object + (opentelemetry::sdk::trace::TracerProvider) to the caller. + * Returning an SDK object is necessary for the application to + cleanup and invoke SDK level methods, such as ForceFlush(), + on a provider. + * The application code that configures the SDK, by calling + the various provider factories, may need adjustment. + * All the examples have been updated, and in particular no + longer perform static_cast do convert an API object to an SDK object. + Please refer to examples for guidance on how to adjust. + * If adjusting application code is impractical, + an alternate and temporary solution is to build with option + WITH_DEPRECATED_SDK_FACTORY=ON in CMake. + * Option WITH_DEPRECATED_SDK_FACTORY=ON will allow to build code + without application changes, posponing changes for later. + * WITH_DEPRECATED_SDK_FACTORY=ON is temporary, only to provide + an easier migration path. Expect this flag to be removed, + as early as by the next release. + +Notes on experimental features: + +* [#2372](https://github.com/open-telemetry/opentelemetry-cpp/issues/2372) + introduced `MeterProvider::SetExemplar()` which accepts en + `ExemplarFilterType` enumeration with `kAlwaysOff`, `kAlwaysOn` and + `kTraceBased`. + +## [1.15.0] 2024-04-21 + +* [EXPORTER] Change OTLP HTTP content_type default to binary + [#2564](https://github.com/open-telemetry/opentelemetry-cpp/pull/2564) +* [DOC] Fix OTLP documentation: Default endpoint is wrong for OTLP/HTTP + [#2560](https://github.com/open-telemetry/opentelemetry-cpp/pull/2560) +* [BUILD] Fix old style cast warning + [#2567](https://github.com/open-telemetry/opentelemetry-cpp/pull/2567) +* [EXPORTER] Gzip compression support for OTLP/HTTP and OTLP/gRPC exporter + [#2530](https://github.com/open-telemetry/opentelemetry-cpp/pull/2530) +* [BUILD] update vcpkg submodule to 2024.02.14 + [#2575](https://github.com/open-telemetry/opentelemetry-cpp/pull/2575) +* [SDK] Support for OTEL_SERVICE_NAME + [#2577](https://github.com/open-telemetry/opentelemetry-cpp/pull/2577) +* [EXPORTER] Support URL-encoded values for `OTEL_EXPORTER_OTLP_HEADERS` + [#2579](https://github.com/open-telemetry/opentelemetry-cpp/pull/2579) +* [BUILD] CMake cleanup for message() + [#2582](https://github.com/open-telemetry/opentelemetry-cpp/pull/2582) +* [BUILD] Bump CMake minimum required version to 3.9 + [#2581](https://github.com/open-telemetry/opentelemetry-cpp/pull/2581) +* [BUILD] Provide LIKELY / UNLIKELY macros + [#2580](https://github.com/open-telemetry/opentelemetry-cpp/pull/2580) +* [EXPORTER] OTLP: Fix missing ResourceMetrics SchemaURL + [#2587](https://github.com/open-telemetry/opentelemetry-cpp/pull/2587) +* [ETW] cleanup include path + [#2594](https://github.com/open-telemetry/opentelemetry-cpp/pull/2594) +* Upgrade to googletest 1.14.0 + [#2596](https://github.com/open-telemetry/opentelemetry-cpp/pull/2596) +* Upgrade to nlohmann_json 3.11.3 + [#2595](https://github.com/open-telemetry/opentelemetry-cpp/pull/2595) +* [BAZEL] Move -std=c++14 to .bazelrc + [#2600](https://github.com/open-telemetry/opentelemetry-cpp/pull/2600) +* [BAZEL] Fix -std=c++14 warning on Windows + [#2601](https://github.com/open-telemetry/opentelemetry-cpp/pull/2601) +* Upgrade to benchmark 1.8.3 + [#2597](https://github.com/open-telemetry/opentelemetry-cpp/pull/2597) +* Upgrade to prometheus 1.2.4 + [#2598](https://github.com/open-telemetry/opentelemetry-cpp/pull/2598) +* [DOC] Fix typo: Asynchronouse -> Asynchronous in meter.h + [#2604](https://github.com/open-telemetry/opentelemetry-cpp/pull/2604) +* [BUILD] Do not link prometheus-cpp::util when it doesn't exist + [#2606](https://github.com/open-telemetry/opentelemetry-cpp/pull/2606) +* [SDK] Remove unused variable + [#2609](https://github.com/open-telemetry/opentelemetry-cpp/pull/2609) +* [METRICS SDK] Remove extra OfferMeasurement call + in SyncMetricsStorage::OfferMeasurement + [#2610](https://github.com/open-telemetry/opentelemetry-cpp/pull/2610) +* [MISC] Use set -e on all shell scripts and pass shellcheck --severity=error + [#2616](https://github.com/open-telemetry/opentelemetry-cpp/pull/2616) +* [CI] Add shellcheck --severity=error as a CI step + [#2618](https://github.com/open-telemetry/opentelemetry-cpp/pull/2618) +* [CI] Upgrade to abseil 20240116.1 (CMake only) + [#2599](https://github.com/open-telemetry/opentelemetry-cpp/pull/2599) +* [CI] Benchmark, provide units with --benchmark_min_time + [#2621](https://github.com/open-telemetry/opentelemetry-cpp/pull/2621) +* [EXPORTER] OTLP file exporter + [#2540](https://github.com/open-telemetry/opentelemetry-cpp/pull/2540) +* [CI] Use platform CMake + [#2627](https://github.com/open-telemetry/opentelemetry-cpp/pull/2627) +* [PROTO] Upgrade to opentelemetry-proto 1.2.0 + [#2631](https://github.com/open-telemetry/opentelemetry-cpp/pull/2631) +* [SDK] DefaultLogHandler to print errors to std::cerr, add LogLevel::None + [#2622](https://github.com/open-telemetry/opentelemetry-cpp/pull/2622) +* [SEMANTIC CONVENTIONS] Upgrade to semantic convention 1.25.0 + [#2633](https://github.com/open-telemetry/opentelemetry-cpp/pull/2633) +* [DOC] Add readme and examples for OTLP FILE exporters. + [#2638](https://github.com/open-telemetry/opentelemetry-cpp/pull/2638) +* [SEMANTIC CONVENTIONS] Rework on semantic conventions 1.25.0 + [#2640](https://github.com/open-telemetry/opentelemetry-cpp/pull/2640) +* [DOC] Update INSTALL.md + [#2592](https://github.com/open-telemetry/opentelemetry-cpp/pull/2592) + +Important changes: + +* [EXPORTER] Gzip compression support for OTLP/HTTP and OTLP/gRPC exporter + [#2530](https://github.com/open-telemetry/opentelemetry-cpp/pull/2530) + * In the `OtlpHttpExporterOptions` and `OtlpGrpcExporterOptions`, a new + field called compression has been introduced. This field can be set + to "gzip” to enable gzip compression. + * The CMake option `WITH_OTLP_HTTP_COMPRESSION` is introduced to enable + gzip compression support for the OTLP HTTP Exporter and includes a + dependency on zlib. +* [SDK] Change OTLP HTTP content_type default to binary + [#2558](https://github.com/open-telemetry/opentelemetry-cpp/pull/2558) +* [CI] Use platform CMake + [#2627](https://github.com/open-telemetry/opentelemetry-cpp/pull/2627) + * The `CI` in github no longer install a different version of `cmake`. + * It now always use the `cmake` provided by the platform. + * As part of this change, the script `ci/setup_cmake.sh` was renamed + to `ci/setup_googletest.sh`, for clarity, now that this script + only installs googletest. +* [SDK] DefaultLogHandler to print to std::cerr, add LogLevel::None + [#2622](https://github.com/open-telemetry/opentelemetry-cpp/pull/2622) + * Change DefaultLogHandler output + * Before, the default internal logger, DefaultLogHandler, + used to print to std::cout. + * Now, DefaultLogHandler prints errors and warnings to std::cerr, + as expected, while printing info and debug messages to std::cout. + * Applications that expected to find the opentelemetry-cpp internal + error log in std::cout may need adjustments, either by looking + at std::cerr instead, or by using a custom log handler. + * Additional LogLevel::None + * LogLevel::None is a new supported log level, which does not print + any message. + * Custom log handlers may need to implement a new case, to avoid + compiler warnings. + * Numbering of log levels like OTEL_INTERNAL_LOG_LEVEL_ERROR + has changed, which requires to rebuild, as the SDK ABI differs. + +## [1.14.2] 2024-02-27 + +* [SDK] Fix observable attributes drop + [#2557](https://github.com/open-telemetry/opentelemetry-cpp/pull/2557) + +## [1.14.1] 2024-02-23 + +* [SDK] Restore Recordable API compatibility with versions < 1.14.0 + [#2547](https://github.com/open-telemetry/opentelemetry-cpp/pull/2547) +* [DOC] Add missing CHANGELOG. + [#2549](https://github.com/open-telemetry/opentelemetry-cpp/pull/2549) +* [EXPORTER] Error when grpc endpoint is empty + [#2507](https://github.com/open-telemetry/opentelemetry-cpp/pull/2507) +* [DOC] Fix typo in benchmarks.rst + [#2542](https://github.com/open-telemetry/opentelemetry-cpp/pull/2542) + +Important changes: + +* [SDK] Restore Recordable API compatibility with versions < 1.14.0 + [#2547](https://github.com/open-telemetry/opentelemetry-cpp/pull/2547) + * For third party _extending_ the SDK, release 1.14.0 introduced + an API breaking change compared to 1.13.0 + * This fix restores API (but not ABI) compatibility of + release 1.14.1 with release 1.13.0. + * This allows to build a third party exporter with no source code changes, + for both releases 1.14.1 and 1.13.0. + +## [1.14.0] 2024-02-16 + +* [BUILD] Add DLL build CI pipeline with CXX20 + [#2465](https://github.com/open-telemetry/opentelemetry-cpp/pull/2465) +* [EXPORTER] Set `is_monotonic` flag for Observable Counters + [#2478](https://github.com/open-telemetry/opentelemetry-cpp/pull/2478) +* [PROTO] Upgrade to opentelemetry-proto v1.1.0 + [#2488](https://github.com/open-telemetry/opentelemetry-cpp/pull/2488) +* [BUILD] Introduce CXX 20 CI pipeline for MSVC/Windows + [#2450](https://github.com/open-telemetry/opentelemetry-cpp/pull/2450) +* [API] Propagation: fix for hex conversion to binary for odd hex strings + [#2533](https://github.com/open-telemetry/opentelemetry-cpp/pull/2533) +* [DOC] Fix calendar link + [#2532](https://github.com/open-telemetry/opentelemetry-cpp/pull/2532) +* [ETW EXPORTER] Remove namespace using in ETW exporter which affects global + namespace + [#2531](https://github.com/open-telemetry/opentelemetry-cpp/pull/2531) +* [BUILD] Don't invoke vcpkg from this repo with CMAKE_TOOLCHAIN_FILE set + [#2527](https://github.com/open-telemetry/opentelemetry-cpp/pull/2527) +* [EXPORTER] Async exporting for otlp grpc + [#2407](https://github.com/open-telemetry/opentelemetry-cpp/pull/2407) +* [METRICS SDK] Fix attribute filtering for synchronous instruments. + [#2472](https://github.com/open-telemetry/opentelemetry-cpp/pull/2472) +* [BUILD] Better handling of OPENTELEMETRY_STL_VERSION under Bazel. + [#2503](https://github.com/open-telemetry/opentelemetry-cpp/pull/2503) +* [DOC] Fixes CI markdown error MD055 - Table pipe style + [#2517](https://github.com/open-telemetry/opentelemetry-cpp/pull/2517) +* [API] Propagators: do not overwrite the active span with a default invalid + span [#2511](https://github.com/open-telemetry/opentelemetry-cpp/pull/2511) +* [BUILD] Updated the recorded vcpkg submodule version + [#2513](https://github.com/open-telemetry/opentelemetry-cpp/pull/2513) +* [BUILD] Remove unnecessary usage/includes of nostd/type_traits + [#2509](https://github.com/open-telemetry/opentelemetry-cpp/pull/2509) +* [SEMANTIC CONVENTIONS] Upgrade to version 1.24.0 + [#2461](https://github.com/open-telemetry/opentelemetry-cpp/pull/2461) +* [EXAMPLES] Improve readme of Metrics example + [#2510](https://github.com/open-telemetry/opentelemetry-cpp/pull/2510) +* [BUILD] Clang-15 warning about `__has_trivial_destructor` + [#2502](https://github.com/open-telemetry/opentelemetry-cpp/pull/2502) +* [BUILD] Fix multiple assignment operators for SpinLockMutex + [#2501](https://github.com/open-telemetry/opentelemetry-cpp/pull/2501) +* [BUILD] Alternative way of exporting symbols (generating .def file) + [#2476](https://github.com/open-telemetry/opentelemetry-cpp/pull/2476) +* [CI] Make cmake.c++20*.test actually use C++20 and add cmake.c++23*.test + [#2496](https://github.com/open-telemetry/opentelemetry-cpp/pull/2496) +* [DOCUMENTATION] Add api reference documentation for logs + [#2497](https://github.com/open-telemetry/opentelemetry-cpp/pull/2497) +* [BUILD] Fix variable shadow + [#2498](https://github.com/open-telemetry/opentelemetry-cpp/pull/2498) +* [BUILD] Fix checks on __cplusplus under MSVC, do not assume /Zc + [#2493](https://github.com/open-telemetry/opentelemetry-cpp/pull/2493) +* [EXAMPLES] Use logs API instead of logs bridge API in the example + [#2494](https://github.com/open-telemetry/opentelemetry-cpp/pull/2494) +* [EXPORTER] Fix forward protocol encoding for ETW exporter + [#2473](https://github.com/open-telemetry/opentelemetry-cpp/pull/2473) +* [BUILD] Skip patch alias target + [#2457](https://github.com/open-telemetry/opentelemetry-cpp/pull/2457) +* [EXPORTER] Rename populate_otel_scope to without_otel_scope + [#2479](https://github.com/open-telemetry/opentelemetry-cpp/pull/2479) +* [EXPORTER SDK] Additional fixes after NOMINMAX removal on Windows + [#2475](https://github.com/open-telemetry/opentelemetry-cpp/pull/2475) +* [EXPORTER] Do not use regex in `CleanUpString` because some implementations of + STL may crash. + [#2464](https://github.com/open-telemetry/opentelemetry-cpp/pull/2464) +* [EXPORTER] Fix Aggregation type detection in OTLP Exporter + [#2467](https://github.com/open-telemetry/opentelemetry-cpp/pull/2467) +* [EXPORTER] Add option to disable Prometheus otel_scope_name and + otel_scope_version attributes + [#2451](https://github.com/open-telemetry/opentelemetry-cpp/pull/2451) +* [SEMANTIC CONVENTIONS] Code generation script fails on SELINUX + [#2455](https://github.com/open-telemetry/opentelemetry-cpp/pull/2455) +* [BUILD] Fix removing of NOMINMAX on Windows + [#2449](https://github.com/open-telemetry/opentelemetry-cpp/pull/2449) +* [BUILD] Accept path list in OPENTELEMETRY_EXTERNAL_COMPONENT_PATH + [#2439](https://github.com/open-telemetry/opentelemetry-cpp/pull/2439) +* [BUILD] Remove gmock from GTEST_BOTH_LIBRARIES + [#2437](https://github.com/open-telemetry/opentelemetry-cpp/pull/2437) +* [REMOVAL] Remove option WITH_OTLP_HTTP_SSL_PREVIEW + [#2435](https://github.com/open-telemetry/opentelemetry-cpp/pull/2435) + +Important changes: + +Breaking changes: + +* [REMOVAL] Remove option WITH_OTLP_HTTP_SSL_PREVIEW + [#2435](https://github.com/open-telemetry/opentelemetry-cpp/pull/2435) + * CMake options `WITH_OTLP_HTTP_SSL_PREVIEW` and + `WITH_OTLP_HTTP_SSL_TLS_PREVIEW` are removed. Building opentelemetry-cpp + without SSL support is no longer possible. + +* [PROTO] Upgrade to opentelemetry-proto v1.1.0 + [#2488](https://github.com/open-telemetry/opentelemetry-cpp/pull/2488) + * Class `opentelemetry::sdk::trace::Recordable` has a new virtual method, + `SetTraceFlags()`. + * This is an incompatible change for the SDK Recordable API and ABI. + * Applications _configuring_ the SDK are not affected. + * Third parties providing SDK _extensions_ are affected, + and must provide a `SetTraceFlags()` implementation, + starting with opentelemetry-cpp 1.14.0. + +## [1.13.0] 2023-12-06 + +* [BUILD] Remove WITH_REMOVE_METER_PREVIEW, use WITH_ABI_VERSION_2 instead + [#2370](https://github.com/open-telemetry/opentelemetry-cpp/pull/2370) +* [SDK] Metrics ObservableRegistry Cleanup + [#2376](https://github.com/open-telemetry/opentelemetry-cpp/pull/2376) +* [BUILD] Make WITH_OTLP_HTTP_SSL_PREVIEW mainstream + [#2378](https://github.com/open-telemetry/opentelemetry-cpp/pull/2378) +* [SDK] Creating DoubleUpDownCounter with no matching view + [#2379](https://github.com/open-telemetry/opentelemetry-cpp/pull/2379) +* [API] Add InstrumentationScope attributes in TracerProvider::GetTracer() + [#2371](https://github.com/open-telemetry/opentelemetry-cpp/pull/2371) +* [BUILD] DLL export interface for Metrics + [#2344](https://github.com/open-telemetry/opentelemetry-cpp/pull/2344) +* [BUILD] enum CanonicalCode names too generic... conflict with old C defines + [#2385](https://github.com/open-telemetry/opentelemetry-cpp/pull/2385) +* [BUILD] Fix cpack broken package version + [#2386](https://github.com/open-telemetry/opentelemetry-cpp/pull/2386) +* [API] Add a new AddLink() operation to Span + [#2380](https://github.com/open-telemetry/opentelemetry-cpp/pull/2380) +* [opentracing-shim] Add check for sampled context + [#2390](https://github.com/open-telemetry/opentelemetry-cpp/pull/2390) +* [BUILD] Fix exported definitions when building DLL with STL + [#2387](https://github.com/open-telemetry/opentelemetry-cpp/pull/2387) +* [BUILD] Add missing includes to runtime_context_test + [#2395](https://github.com/open-telemetry/opentelemetry-cpp/pull/2395) +* [ADMIN] Add file .github/repository-settings.md + [#2392](https://github.com/open-telemetry/opentelemetry-cpp/pull/2392) +* [SDK] Fix GetLogger with empty library name + [#2398](https://github.com/open-telemetry/opentelemetry-cpp/pull/2398) +* [TEST] Fix compiling problem and removed -DENABLE_TEST + [#2401](https://github.com/open-telemetry/opentelemetry-cpp/pull/2401) +* [BUILD] Check windows options are not passed to non-Windows build + [#2399](https://github.com/open-telemetry/opentelemetry-cpp/pull/2399) +* [EXPORTER] Rework OTLP/HTTP and OTLP/GRPC exporter options + [#2388](https://github.com/open-telemetry/opentelemetry-cpp/pull/2388) +* [Build] Update vcpkg to latest release + [#2412](https://github.com/open-telemetry/opentelemetry-cpp/pull/2412) +* [SDK] Cardinality limits for metrics streams + (Sync Instruments + Delta Temporality) + [#2255](https://github.com/open-telemetry/opentelemetry-cpp/pull/2255) +* [EXPORTER] Prometheus: Add unit to names, convert to word + [#2213](https://github.com/open-telemetry/opentelemetry-cpp/pull/2213) +* [Metrics] Make context optional for histogram instruments in Metrics SDK + [#2416](https://github.com/open-telemetry/opentelemetry-cpp/pull/2416) +* [BUILD] Fix references to trace namespace to be fully qualified + [#2422](https://github.com/open-telemetry/opentelemetry-cpp/pull/2422) +* [BUILD] Bump third_party/googletest to same version as bazel + [#2421](https://github.com/open-telemetry/opentelemetry-cpp/pull/2421) +* [BUILD] Remove defining NOMINMAX from api + [#2420](https://github.com/open-telemetry/opentelemetry-cpp/pull/2420) +* [BUILD] 'uint8_t' not declared in this scope with gcc 13.2.1 + [#2423](https://github.com/open-telemetry/opentelemetry-cpp/pull/2423) +* [BUILD] Improve the handling of OPENTELEMETRY_HAVE_WORKING_REGEX + [#2430](https://github.com/open-telemetry/opentelemetry-cpp/pull/2430) +* [SEMANTIC CONVENTION] Upgrade to semconv 1.23.1 + [#2428](https://github.com/open-telemetry/opentelemetry-cpp/pull/2428) +* [BUILD] Use fully qualified references to trace/common namespace + [#2424](https://github.com/open-telemetry/opentelemetry-cpp/pull/2424) +* [API] Create root span with active span + [#2427](https://github.com/open-telemetry/opentelemetry-cpp/pull/2427) +* [REMOVAL] Remove ZPAGES + [#2433](https://github.com/open-telemetry/opentelemetry-cpp/pull/2433) + +Important changes: + +* [API] Add InstrumentationScope attributes in TracerProvider::GetTracer() + [#2371](https://github.com/open-telemetry/opentelemetry-cpp/pull/2371) + * TracerProvider::GetTracer() now accepts InstrumentationScope attributes. + * Because this is an `ABI` breaking change, the fix is only available + with the `CMake` option `WITH_ABI_VERSION_2=ON`. + * When building with `CMake` option `WITH_ABI_VERSION_1=ON` (by default) + the `ABI` is unchanged, and the fix is not available. + +* [API] Add a new AddLink() operation to Span + [#2380](https://github.com/open-telemetry/opentelemetry-cpp/pull/2380) + * New `API` Span::AddLink() adds a single link to a span. + * New `API` Span::AddLinks() adds multiple links to a span. + * Because this is an `ABI` breaking change, the fix is only available + with the `CMake` option `WITH_ABI_VERSION_2=ON`. + * When building with `CMake` option `WITH_ABI_VERSION_1=ON` (by default) + the `ABI` is unchanged, and the fix is not available. + +* [BUILD] Make WITH_OTLP_HTTP_SSL_PREVIEW mainstream + [#2378](https://github.com/open-telemetry/opentelemetry-cpp/pull/2378) + * The experimental `CMake` option `WITH_OTLP_HTTP_SSL_PREVIEW` + is now promoted to stable. The default is changed to `ON`. + * The experimental `CMake` option `WITH_OTLP_HTTP_SSL_TLS_PREVIEW` + is now promoted to stable. The default is changed to `ON`. + * These build options are scheduled to be removed by the next release, + building without SSL/TLS will no longer be possible. + +* [EXPORTER] Rework OTLP/HTTP and OTLP/GRPC exporter options + [#2388](https://github.com/open-telemetry/opentelemetry-cpp/pull/2388) + * `OtlpGrpcMetricExporterOptions` used to honor `_TRACES_` + environment variables, instead of `_METRICS_` environment variables. + * The implementation of `OtlpGrpcMetricExporterOptions` is now fixed. + * Please check configuration variables, + to make sure `_METRICS_` variables are set as expected. + +Breaking changes: + +* [BUILD] Remove WITH_REMOVE_METER_PREVIEW, use WITH_ABI_VERSION_2 instead + [#2370](https://github.com/open-telemetry/opentelemetry-cpp/pull/2370) + * The experimental `CMake` option `WITH_REMOVE_METER_PREVIEW` is removed, + use option `WITH_ABI_VERSION_2` instead. + +* [BUILD] enum CanonicalCode names too generic... conflict with old C defines + [#2385](https://github.com/open-telemetry/opentelemetry-cpp/pull/2385) + * Header file `opentelemetry/trace/canonical_code.h` is unused, + and is now removed. + * This header should not be included directly in an application. + If this is the case, please remove any remaining include directives. + +* [BUILD] Fix exported definitions when building DLL with STL + [#2387](https://github.com/open-telemetry/opentelemetry-cpp/pull/2387) + * The MeterSelector, MeterSelectorFactory, InstrumentSelector, + and InstrumentSelectorFactory APIs now use const std::string& + instead of nostd::string_view for name, version and schema to + maintain a single export definition for DLL. + +* [EXPORTER] Rework OTLP/HTTP and OTLP/GRPC exporter options + [#2388](https://github.com/open-telemetry/opentelemetry-cpp/pull/2388) + * `OtlpGrpcLogRecordExporter` incorrectly used `OtlpGrpcExporterOptions`, + which are options for traces and not logs. + * This created a bug: the `OtlpGrpcLogRecordExporter` honors `_TRACES_` + environment variables, instead of `_LOGS_` environment variables. + * `OtlpGrpcLogRecordExporter` is changed to use + `OtlpGrpcLogRecordExporterOptions` instead, fixing the bug. + * User code that initializes the SDK with a GRPC Log exporter, + and uses exporter options, should adjust to replace + `OtlpGrpcExporterOptions` with `OtlpGrpcLogRecordExporterOptions`. + * Please check configuration variables, + to make sure `_LOGS_` variables are set as expected. + +* [REMOVAL] Remove ZPAGES + [#2433](https://github.com/open-telemetry/opentelemetry-cpp/pull/2433) + * As announced in release 1.12.0, + the deprecated ZPAGES exporter is now removed. + +## [1.12.0] 2023-10-16 + +* [BUILD] Support `pkg-config` + [#2269](https://github.com/open-telemetry/opentelemetry-cpp/pull/2269) +* [CI] Do not automatically close stale issues + [#2277](https://github.com/open-telemetry/opentelemetry-cpp/pull/2277) +* [CI] Benchmark workflow fails, C++14 required to build grpc + [#2278](https://github.com/open-telemetry/opentelemetry-cpp/pull/2278) +* [SDK] Increase metric name maximum length from 63 to 255 characters + [#2284](https://github.com/open-telemetry/opentelemetry-cpp/pull/2284) +* [SEMANTIC CONVENTION] Deprecated semconv (in the spec) + not deprecated (in C++) + [#2285](https://github.com/open-telemetry/opentelemetry-cpp/pull/2285) +* [SDK] Remove unused member variables from SyncMetricStorage + [#2294](https://github.com/open-telemetry/opentelemetry-cpp/pull/2294) +* [DEPRECATION] Deprecate ZPAGES + [#2291](https://github.com/open-telemetry/opentelemetry-cpp/pull/2291) +* [API] Deliver ABI breaking changes + [#2222](https://github.com/open-telemetry/opentelemetry-cpp/pull/2222) +* [SDK] Allow metric instrument names to contain / characters + [#2310](https://github.com/open-telemetry/opentelemetry-cpp/pull/2310) +* [SDK] Fix Observable Counters/UpDownCounters + [#2298](https://github.com/open-telemetry/opentelemetry-cpp/pull/2298) +* [SDK] Add exemplar reservoir to async metric storage + [#2319](https://github.com/open-telemetry/opentelemetry-cpp/pull/2319) +* [TEST] Fix lifetime issues in prometheus test utils + [#2322](https://github.com/open-telemetry/opentelemetry-cpp/pull/2322) +* [EXPORTER] Prometheus: Remove explicit timestamps from metric points + [#2324](https://github.com/open-telemetry/opentelemetry-cpp/pull/2324) +* [EXPORTER] Prometheus: Handle attribute key collisions from sanitation + [#2326](https://github.com/open-telemetry/opentelemetry-cpp/pull/2326) +* [EXPORTER] Prometheus cleanup, test with TranslateToPrometheus + [#2329](https://github.com/open-telemetry/opentelemetry-cpp/pull/2329) +* [SDK] Fix log message in Meter::RegisterSyncMetricStorage + [#2325](https://github.com/open-telemetry/opentelemetry-cpp/pull/2325) +* [DOC] Simplify the project status section + [#2332](https://github.com/open-telemetry/opentelemetry-cpp/pull/2332) +* [EXPORTER] Prometheus: Sanitize labels according to spec + [#2330](https://github.com/open-telemetry/opentelemetry-cpp/pull/2330) +* [SDK] Fix deadlock when shuting down http client + [#2337](https://github.com/open-telemetry/opentelemetry-cpp/pull/2337) +* [Exporter] Group spans by resource and instrumentation scope + in OTLP export requests + [#2335](https://github.com/open-telemetry/opentelemetry-cpp/pull/2335) +* [BUILD] Need fine-grained HAVE_CPP_STDLIB + [#2304](https://github.com/open-telemetry/opentelemetry-cpp/pull/2304) +* [API] Add InstrumentationScope attributes in MeterProvider::GetMeter() + [#2224](https://github.com/open-telemetry/opentelemetry-cpp/pull/2224) +* [REMOVAL] Drop C++11 support + [#2342](https://github.com/open-telemetry/opentelemetry-cpp/pull/2342) +* [EXPORTER] prometheus: add otel_scope_name and otel_scope_version labels + [#2293](https://github.com/open-telemetry/opentelemetry-cpp/pull/2293) +* [EXPORTER] Export resource for prometheus + [#2301](https://github.com/open-telemetry/opentelemetry-cpp/pull/2301) +* [BUILD] error: read-only reference ‘value’ used as ‘asm’ output + [#2354](https://github.com/open-telemetry/opentelemetry-cpp/pull/2354) +* [BUILD] Build break with external CMake nlohman_json package + [#2353](https://github.com/open-telemetry/opentelemetry-cpp/pull/2353) +* [BUILD] Upgrade libcurl to version 8.4.0 + [#2358](https://github.com/open-telemetry/opentelemetry-cpp/pull/2358) +* [BUILD] Fix opentracing-shim when added in super project + [#2356](https://github.com/open-telemetry/opentelemetry-cpp/pull/2356) +* [BUILD] Fix protoc searching with non-imported protobuf::protoc target + [#2362](https://github.com/open-telemetry/opentelemetry-cpp/pull/2362) +* [BUILD] Support to use different cmake package CONFIG of dependencies + [#2263](https://github.com/open-telemetry/opentelemetry-cpp/pull/2263) +* [SEMANTIC CONVENTION] Upgrade to semconv 1.22.0 + [#2368](https://github.com/open-telemetry/opentelemetry-cpp/pull/2368) + +Important changes: + +* [API] Add InstrumentationScope attributes in MeterProvider::GetMeter() + [#2224](https://github.com/open-telemetry/opentelemetry-cpp/pull/2224) + * MeterProvider::GetMeter() now accepts InstrumentationScope attributes. + * Because this is an `ABI` breaking change, the fix is only available + with the `CMake` option `WITH_ABI_VERSION_2=ON`. + * When building with `CMake` option `WITH_ABI_VERSION_1=ON` (by default) + the `ABI` is unchanged, and the fix is not available. + +Breaking changes: + +* [BUILD] Need fine-grained HAVE_CPP_STDLIB + [#2304](https://github.com/open-telemetry/opentelemetry-cpp/pull/2304) + * In `CMAKE`, the boolean option `WITH_STL` as changed to an option + that accepts the values `OFF`, `ON`, `CXX11`, `CXX14`, `CXX17`, + `CXX20` and `CXX23`. + * Applications makefiles that did not set WITH_STL need to use + `WITH_STL=OFF` instead (this is the default). + * Applications makefiles that did set WITH_STL need to use + `WITH_STL=ON` instead, or may choose to pick a specific value. + * In the `API` header files, the preprocessor symbol `HAVE_CPP_STDLIB` + is no longer used. + * Applications that did set `HAVE_CPP_STDLIB` before, need to set + `OPENTELEMETRY_STL_VERSION=` instead, to build with a + specific STL version (2011, 2014, 2017, 2020, 2023). + * The opentelemetry-cpp makefile no longer sets + CMAKE_CXX_STANDARD by itself. + Instead, the CMAKE_CXX_STANDARD and/or compiler options -stdc++ used + by the caller are honored. + * Applications that set neither CMAKE_CXX_STANDARD nor -stdc++ + options may need to provide a C++ standard in their makefiles. + +* [REMOVAL] Drop C++11 support + [#2342](https://github.com/open-telemetry/opentelemetry-cpp/pull/2342) + * Building with C++11 is no longer supported. + +Deprecations: + +* [DEPRECATION] Deprecate ZPAGES + [#2291](https://github.com/open-telemetry/opentelemetry-cpp/pull/2291) + +## [1.11.0] 2023-08-21 + +* [BUILD] Fix more cases for symbol name for 32-bit win32 DLL build + [#2264](https://github.com/open-telemetry/opentelemetry-cpp/pull/2264) +* [BUILD] added public link of `opentelemetry_proto_grpc` against gRPC lib (only + if gRPC library is shared) + [#2268](https://github.com/open-telemetry/opentelemetry-cpp/pull/2268) +* [CI] use ubuntu-latest for tsan CI + [#2267](https://github.com/open-telemetry/opentelemetry-cpp/pull/2267) +* [SDK] Fixing an apparent logging macro bug + [#2265](https://github.com/open-telemetry/opentelemetry-cpp/pull/2265) +* [BUILD] Support protobuf 3.22 or upper + [#2163](https://github.com/open-telemetry/opentelemetry-cpp/pull/2163) +* [BUILD] Remove extra includes + [#2252](https://github.com/open-telemetry/opentelemetry-cpp/pull/2252) +* [LOGS API SDK] Mark logs signal as stable API/SDK + [#2229](https://github.com/open-telemetry/opentelemetry-cpp/pull/2229) +* [SEMANTIC CONVENTIONS] Upgrade to 1.21.0 + [#2248](https://github.com/open-telemetry/opentelemetry-cpp/pull/2248) +* [SDK] Valgrind errors on std::atomic variables + [#2244](https://github.com/open-telemetry/opentelemetry-cpp/pull/2244) +* [BUILD] Fix compile with clang 16 and libc++ + [#2242](https://github.com/open-telemetry/opentelemetry-cpp/pull/2242) +* [Metrics SDK] Add unit to Instrument selection criteria + [#2236](https://github.com/open-telemetry/opentelemetry-cpp/pull/2236) +* [SDK] Add OStreamLogRecordExporterFactory + [#2240](https://github.com/open-telemetry/opentelemetry-cpp/pull/2240) +* [SDK] Add support for LowMemory metrics temporality + [#2234](https://github.com/open-telemetry/opentelemetry-cpp/pull/2234) +* [CI] Misc build scripts cleanup + [#2232](https://github.com/open-telemetry/opentelemetry-cpp/pull/2232) +* [CI] Upgrade GoogleTest version from 1.12.1 to 1.13.0 + [#2114](https://github.com/open-telemetry/opentelemetry-cpp/pull/2114) +* [BUILD] include cstdint + [#2230](https://github.com/open-telemetry/opentelemetry-cpp/pull/2230) +* [EXPORTER] Support protobuf 3.22 or upper + [#2163](https://github.com/open-telemetry/opentelemetry-cpp/pull/2163) +* [SDK] Mark logs signal as stable API/SDK + [#2229](https://github.com/open-telemetry/opentelemetry-cpp/pull/2229) + +Breaking changes: + +* [SDK] Add unit to Instrument selection criteria + [#2236](https://github.com/open-telemetry/opentelemetry-cpp/pull/2236) + * The `View` constructor and `ViewFactory::Create` method now takes a + `unit` criteria as optional third argument. + * Please adjust SDK configuration code accordingly. + +## [1.10.0] 2023-07-11 + +* [REMOVAL] Remove the jaeger exporter + [#2031](https://github.com/open-telemetry/opentelemetry-cpp/pull/2031) + +* [CI] Add a C++11 build + [#2152](https://github.com/open-telemetry/opentelemetry-cpp/pull/2152) + +* [CI] Add Include what you use + [#2214](https://github.com/open-telemetry/opentelemetry-cpp/pull/2214) + +* [CI] opentelemetry-cpp project CI + [#2071](https://github.com/open-telemetry/opentelemetry-cpp/pull/2071) + +* [CI] Do not tag pull_request with the "need-triage" label + [#2228](https://github.com/open-telemetry/opentelemetry-cpp/pull/2228) + +* [BUILD] Fixing CMake to build GTest on Windows + [#1887](https://github.com/open-telemetry/opentelemetry-cpp/pull/1887) + +* [BUILD] Remove option WITH_OTLP + [#2161](https://github.com/open-telemetry/opentelemetry-cpp/pull/2161) + +* [BUILD] Link to opentelemetry_logs even without OTLP + [#2177](https://github.com/open-telemetry/opentelemetry-cpp/pull/2177) + +* [BUILD] Avoid dependency on protobuf from the OTLP HTTP metrics exporter header + [#2179](https://github.com/open-telemetry/opentelemetry-cpp/pull/2179) + +* [BUILD] Add ctime header to metrics_exporter.cc + [#2187](https://github.com/open-telemetry/opentelemetry-cpp/pull/2187) + +* [BUILD] Fix the exported symbol name for 32-bit win32 DLL + [#2190](https://github.com/open-telemetry/opentelemetry-cpp/pull/2190) + +* [BUILD] Upgrade to opentelemetry-proto 0.20.0 + [#2195](https://github.com/open-telemetry/opentelemetry-cpp/pull/2195) + +* [BUILD] SDK Header files cleanup, use forward declarations + [#2182](https://github.com/open-telemetry/opentelemetry-cpp/pull/2182) + +* [BUILD] Enable building otel-cpp extensions from main repo + [#1937](https://github.com/open-telemetry/opentelemetry-cpp/pull/1937) + +* [BUILD] Fix if check on environment variable and add CMake variable + [#2207](https://github.com/open-telemetry/opentelemetry-cpp/pull/2207) + +* [BUILD] Add `OPENTELEMETRY_CPP_FOUND` into cmake CONFIG file + [#2215](https://github.com/open-telemetry/opentelemetry-cpp/pull/2215) + +* [BUILD] Upgrade opentelemetry-proto to 1.0.0 + [#2216](https://github.com/open-telemetry/opentelemetry-cpp/pull/2216) + +* [BUILD] Include nostd/string_view which is used in severity.h + [#2219](https://github.com/open-telemetry/opentelemetry-cpp/pull/2219) + +* [TEST] Expand api singleton test to cover explicit dlopen() + [#2164](https://github.com/open-telemetry/opentelemetry-cpp/pull/2164) + +* [API] Remove include_trace_context + [#2194](https://github.com/open-telemetry/opentelemetry-cpp/pull/2194) + +* [API] Remove Meters + [#2205](https://github.com/open-telemetry/opentelemetry-cpp/pull/2205) + +* [SDK] Add AdaptingCircularBufferCounter for exponential histograms + [#2158](https://github.com/open-telemetry/opentelemetry-cpp/pull/2158) + +* [SDK] Add base2 exponential histogram indexer + [#2173](https://github.com/open-telemetry/opentelemetry-cpp/pull/2173) + +* [SDK] Simplify SDK version + [#2180](https://github.com/open-telemetry/opentelemetry-cpp/pull/2180) + +* [SDK] Add benchmark for base2 exponential histogram indexer + [#2181](https://github.com/open-telemetry/opentelemetry-cpp/pull/2181) + +* [SDK] Provide builders to avoid exposing Metrics SDK internals + [#2189](https://github.com/open-telemetry/opentelemetry-cpp/pull/2189) + +* [SDK] MeterProvider should own MeterContext, not share it + [#2218](https://github.com/open-telemetry/opentelemetry-cpp/pull/2218) + +* [SDK] TracerProvider should own TracerContext, not share it + [#2221](https://github.com/open-telemetry/opentelemetry-cpp/pull/2221) + +* [EXPORTER] Change OTLP Json field name to camelCase + [#2162](https://github.com/open-telemetry/opentelemetry-cpp/pull/2162) + +* [EXPORTER] Support empty arrays in `OtlpRecordable` attributes + [#2166](https://github.com/open-telemetry/opentelemetry-cpp/pull/2166) + +* [EXPORTER] set is_monotonic only for instrument type kCounter + [#2171](https://github.com/open-telemetry/opentelemetry-cpp/pull/2171) + +* [EXPORTER] Fixed HTTP CURL for 32bits platforms + [#2178](https://github.com/open-telemetry/opentelemetry-cpp/pull/2178) + +* [EXPORTER] Fix OTLP HTTP exporting in sync mode + [#2193](https://github.com/open-telemetry/opentelemetry-cpp/pull/2193) + +* [EXPORTER] Prometheus exporter sanitizes invalid characters + [#1934](https://github.com/open-telemetry/opentelemetry-cpp/pull/1934) + +* [EXPORTER] Prometheus: Error on ingesting samples + with different value but same timestamp + [#2200](https://github.com/open-telemetry/opentelemetry-cpp/pull/2200) + +* [EXPORTER] OTLP GRPC mTLS support + [#2120](https://github.com/open-telemetry/opentelemetry-cpp/pull/2120) + +* [DOC] Small fix for Histogram documentation + [#2156](https://github.com/open-telemetry/opentelemetry-cpp/pull/2156) + +* [DOC] Move Reiley Yang to emeritus + [#2198](https://github.com/open-telemetry/opentelemetry-cpp/pull/2198) + +Important changes: + +* [API] Remove Meters + [#2205](https://github.com/open-telemetry/opentelemetry-cpp/pull/2205) + * The CMake option `WITH_REMOVE_METER_PREVIEW` was added. + * This option is experimental, and may change in the future. + * Enabling it is an ABI breaking change. + +Breaking changes: + +* [REMOVAL] Remove the jaeger exporter + [#2031](https://github.com/open-telemetry/opentelemetry-cpp/pull/2031) + * The CMake `WITH_JAEGER` option has been removed + * Please remove usage of `WITH_JAEGER` from user scripts and makefiles. + +* [SDK] MeterProvider should own MeterContext, not share it + [#2218](https://github.com/open-telemetry/opentelemetry-cpp/pull/2218) + * The `MeterProvider` constructor now takes a `unique_ptr` on + `MeterContext`, instead of a `shared_ptr`. + * Please adjust SDK configuration code accordingly. + +* [SDK] TracerProvider should own TracerContext, not share it + [#2221](https://github.com/open-telemetry/opentelemetry-cpp/pull/2221) + * The `TracerProvider` constructor now takes a `unique_ptr` on + `TracerContext`, instead of a `shared_ptr`. + * The `LoggerProvider` constructor now takes a `unique_ptr` on + `LoggerContext`, instead of a `shared_ptr`. + * Please adjust SDK configuration code accordingly. + +## [1.9.1] 2023-05-26 + +* [DEPRECATION] Drop C++11 support + [#2146](https://github.com/open-telemetry/opentelemetry-cpp/pull/2146) + +* [CI] Upgrade Bazel and Bazelisk version + [#2118](https://github.com/open-telemetry/opentelemetry-cpp/pull/2118) +* [CI] Upgrade Google Benchmark version from 1.6.0 to 1.7.1 + [#2116](https://github.com/open-telemetry/opentelemetry-cpp/pull/2116) +* [CI] Upgrade Nlohmann JSON library version from 3.10.5 to 3.11.2 + [#2115](https://github.com/open-telemetry/opentelemetry-cpp/pull/2115) + +* [BUILD] Missed include + [#2143](https://github.com/open-telemetry/opentelemetry-cpp/pull/2143) +* [BUILD] Add opentelemetry_proto_grpc and allow build shared + opentelemetry_proto and opentelemetry_proto_grpc on non-Windows platform. + [#2097](https://github.com/open-telemetry/opentelemetry-cpp/pull/2097) +* [BUILD] Warning cleanup, single character wrapped by std::string + [#2137](https://github.com/open-telemetry/opentelemetry-cpp/pull/2137) +* [BUILD] Add missing target dependencies + [#2128](https://github.com/open-telemetry/opentelemetry-cpp/pull/2128) +* [BUILD] Fix if JSON library already added another CMake target + [#2126](https://github.com/open-telemetry/opentelemetry-cpp/pull/2126) +* [BUILD] shared libraries with version suffix, along with the symbolic link + [#2109](https://github.com/open-telemetry/opentelemetry-cpp/pull/2109) +* [BUILD] Show warning message if WITH_OTLP is enabled + [#2112](https://github.com/open-telemetry/opentelemetry-cpp/pull/2112) +* [BUILD] Add missing STL header. + [#2107](https://github.com/open-telemetry/opentelemetry-cpp/pull/2107) +* [BUILD] Build break with old curl, macro CURL_VERSION_BITS unknown + [#2102](https://github.com/open-telemetry/opentelemetry-cpp/pull/2102) +* [BUILD] Transitive dependency issue with the otlp http exporter + [#2154](https://github.com/open-telemetry/opentelemetry-cpp/pull/2154) + +* [TEST] Add unit test for log body implicit conversions. + [#2136](https://github.com/open-telemetry/opentelemetry-cpp/pull/2136) +* [TEST] Add event id to logger benchmark method + [#2133](https://github.com/open-telemetry/opentelemetry-cpp/pull/2133) + +* [API] Fix inclusion header files and use forward declaration + [#2124](https://github.com/open-telemetry/opentelemetry-cpp/pull/2124) +* [API] Add user facing Logging API and Benchmarks + [#2094](https://github.com/open-telemetry/opentelemetry-cpp/pull/2094) + +* [SDK] SDK support for the new OTel log + [#2123](https://github.com/open-telemetry/opentelemetry-cpp/pull/2123) + +* [EXPORTER] Fixed HTTP session cleanup on shutdown + [#2111](https://github.com/open-telemetry/opentelemetry-cpp/pull/2111) +* [EXPORTER] Delegate all API calls of gRPC into + opentelemetry_exporter_otlp_grpc_client, + and make it contains all symbols needed. + [#2005](https://github.com/open-telemetry/opentelemetry-cpp/pull/2005) + +* [DOC] Add Marc as maintainer. + [#2027](https://github.com/open-telemetry/opentelemetry-cpp/pull/2027) + +Breaking changes: + +* Add opentelemetry_proto_grpc and move gRPC sources into it. + [#2097](https://github.com/open-telemetry/opentelemetry-cpp/pull/2097) + * There will be no breaking changes for users who only use OTLP exporters and + do not directly use opentelemetry-cpp::proto. However, it is important to + note that `opentelemetry-cpp::proto` no longer contains generated gRPC codes + , and all components that depend on these gRPC codes should also link to + `opentelemetry-cpp::proto_grpc`. + +Deprecations: + +* The Jaeger Exporter is deprecated, see [DEPRECATED](./DEPRECATED.md) for details. +* C++11 support is to end, C++14 will be supported instead, + see [DEPRECATED](./DEPRECATED.md) for details. + +## [1.9.0] 2023-04-12 + +* [CI] Make build environment parallel (Windows) + [#2080](https://github.com/open-telemetry/opentelemetry-cpp/pull/2080) +* [CI] Make build environment parallel (Linux) + [#2076](https://github.com/open-telemetry/opentelemetry-cpp/pull/2076) +* [CI] Remove separate run of metrics ostream example + [#2030](https://github.com/open-telemetry/opentelemetry-cpp/pull/2030) + +* [BUILD] Include directory path added for Zipkin exporter example + [#2069](https://github.com/open-telemetry/opentelemetry-cpp/pull/2069) +* [BUILD] Ignore more warning in generated protobuf files + [#2067](https://github.com/open-telemetry/opentelemetry-cpp/pull/2067) +* [BUILD] Clean warnings in ETW exporters + [#2063](https://github.com/open-telemetry/opentelemetry-cpp/pull/2063) +* [BUILD] Fix default value of OPENTELEMETRY_INSTALL_default + [#2062](https://github.com/open-telemetry/opentelemetry-cpp/pull/2062) + +* [SEMANTIC CONVENTIONS] Upgrade to version 1.20.0 + [#2088](https://github.com/open-telemetry/opentelemetry-cpp/pull/2088) +* [SEMANTIC CONVENTIONS] Upgrade to version 1.19.0 + [#2017](https://github.com/open-telemetry/opentelemetry-cpp/pull/2017) + +* [API] Checking indices before dereference in string utils + [#2040](https://github.com/open-telemetry/opentelemetry-cpp/pull/2040) +* [API] Export factory class of log provider + [#2041](https://github.com/open-telemetry/opentelemetry-cpp/pull/2041) + +* [SDK] Implement Forceflush for Periodic Metric Reader + [#2064](https://github.com/open-telemetry/opentelemetry-cpp/pull/2064) +* [SDK] Add `ForceFlush` for all `LogRecordExporter` and `SpanExporter` + [#2000](https://github.com/open-telemetry/opentelemetry-cpp/pull/2000) +* [SDK] Fix schema URL precedence bug in `Resource::Merge` + [#2036](https://github.com/open-telemetry/opentelemetry-cpp/pull/2036) +* [SDK] Use sdk_start_ts for MetricData start_ts for instruments having + cumulative aggregation temporality. + [#2086](https://github.com/open-telemetry/opentelemetry-cpp/pull/2086) + +* [EXPORTER] Add OTLP HTTP SSL support + [#1793](https://github.com/open-telemetry/opentelemetry-cpp/pull/1793) +* [EXPORTER] GRPC endpoint scheme should take precedence over OTEL_EXPORTER_OTLP_TRACES_INSECURE + [#2060](https://github.com/open-telemetry/opentelemetry-cpp/pull/2060) + +* [EXAMPLES] Remove unused 'alerting' section from prometheus.yml in examples + [#2055](https://github.com/open-telemetry/opentelemetry-cpp/pull/2055) +* [EXAMPLES] Fix view names in Prometheus example + [#2034](https://github.com/open-telemetry/opentelemetry-cpp/pull/2034) + +* [DOC] Fix some docs typo + [#2057](https://github.com/open-telemetry/opentelemetry-cpp/pull/2057) +* [DOC] Update OpenTracing shim README.md + [#2028](https://github.com/open-telemetry/opentelemetry-cpp/pull/2028) +* [DOC] INSTALL doc clarifications + [#2078](https://github.com/open-telemetry/opentelemetry-cpp/pull/2078) + +Important changes: + +* [EXPORTER] GRPC endpoint scheme should take precedence over OTEL_EXPORTER_OTLP_TRACES_INSECURE + [#2060](https://github.com/open-telemetry/opentelemetry-cpp/pull/2060) + * The logic to decide whether or not an OTLP GRPC exporter uses SSL has + changed to comply with the specification: + * Before this change, the following settings were evaluated, in order: + * OTEL_EXPORTER_OTLP_TRACES_INSECURE (starting with 1.8.3) + * OTEL_EXPORTER_OTLP_INSECURE (starting with 1.8.3) + * OTEL_EXPORTER_OTLP_TRACES_SSL_ENABLE + * OTEL_EXPORTER_OTLP_SSL_ENABLE + * With this change, the following settings are evaluated, in order: + * The GRPC endpoint scheme, if provided: + * "https" imply with SSL, + * "http" imply without ssl. + * OTEL_EXPORTER_OTLP_TRACES_INSECURE + * OTEL_EXPORTER_OTLP_INSECURE + * OTEL_EXPORTER_OTLP_TRACES_SSL_ENABLE + * OTEL_EXPORTER_OTLP_SSL_ENABLE + * As a result, a behavior change for GRPC SSL is possible, + because the endpoint scheme now takes precedence. + Please verify configuration settings for the GRPC endpoint. +* [SDK] Add `ForceFlush` for all `LogRecordExporter` and `SpanExporter` + [#2000](https://github.com/open-telemetry/opentelemetry-cpp/pull/2000) + * `LogRecordExporter` and `SpanExporter` add a new virtual function + `ForceFlush`, and if users implement any customized `LogRecordExporter` and + `SpanExporter`, they should also implement this function. + There should be no impact if users only use factory to create exporters. + +Deprecations: + +* The Jaeger Exporter is deprecated, see [DEPRECATED](./DEPRECATED.md) for details. + +## [1.8.3] 2023-03-06 + +* Provide version major/minor/patch macros + [#2014](https://github.com/open-telemetry/opentelemetry-cpp/pull/2014) +* [BUILD] Add `OPENTELEMETRY_INSTALL` to allow user to skip install targets. + [#2022](https://github.com/open-telemetry/opentelemetry-cpp/pull/2022) +* [SDK] Rename the global SDK version variables to avoid naming clash + [#2011](https://github.com/open-telemetry/opentelemetry-cpp/pull/2011) +* [BUILD] Fix typo in CMakeLists.txt + [#2010](https://github.com/open-telemetry/opentelemetry-cpp/pull/2010) +* [EXPORTER] fix Prometheus test iterator iterator increment + [#2006](https://github.com/open-telemetry/opentelemetry-cpp/pull/2006) +* [SDK]Add attributes for InstrumentationScope + [#2004](https://github.com/open-telemetry/opentelemetry-cpp/pull/2004) +* [METRICS SDK] Performance improvement in measurement processing + [#1993](https://github.com/open-telemetry/opentelemetry-cpp/pull/1993) +* [EXAMPLE] Add example for logs ostream exporter + [#1992](https://github.com/open-telemetry/opentelemetry-cpp/pull/1992) +* [ETW Exporter] Support serialize span/log attributes into JSON + [#1991](https://github.com/open-telemetry/opentelemetry-cpp/pull/1991) +* [ETW Exporter]Do not overwrite ParentId when setting attribute on Span + [#1989](https://github.com/open-telemetry/opentelemetry-cpp/pull/1989) +* Upgrade prometheus-cpp to v1.1.0 + [#1954](https://github.com/open-telemetry/opentelemetry-cpp/pull/1954) +* Convert Prometheus Exporter to Pull MetricReader + [#1953](https://github.com/open-telemetry/opentelemetry-cpp/pull/1953) +* [DOCS] Add alpine packages to INSTALL.md + [#1957](https://github.com/open-telemetry/opentelemetry-cpp/pull/1957) +* [METRICS SDK] Add benchmark tests for Sum Aggregation. + [#1948](https://github.com/open-telemetry/opentelemetry-cpp/pull/1948) +* [BUILD] Build OpenTelemetry SDK and exporters into DLL + [#1932](https://github.com/open-telemetry/opentelemetry-cpp/pull/1932) +* [CI] Enforce copyright check in CI + [#1965](https://github.com/open-telemetry/opentelemetry-cpp/pull/1965) +* [BUILD] Fix typo GENENV -> GETENV + [#1972](https://github.com/open-telemetry/opentelemetry-cpp/pull/1972) +* [SEMANTIC CONVENTIONS] Upgrade to version 1.18.0 + [#1974](https://github.com/open-telemetry/opentelemetry-cpp/pull/1974) +* [EXT] Fix thread-safety when shutdown. + [#1977](https://github.com/open-telemetry/opentelemetry-cpp/pull/1977) +* [SDK] Fix missing ObservedTimestamp. + [#1985](https://github.com/open-telemetry/opentelemetry-cpp/pull/1985) +* [METRICS SDK] fix variable names + [#1987](https://github.com/open-telemetry/opentelemetry-cpp/pull/1987) +* [EXPORTER] Fix Prometheus server crash on listening to already used port + [#1986](https://github.com/open-telemetry/opentelemetry-cpp/pull/1986) +* [EXPORTER] Boolean environment variables not parsed per the spec + [#1982](https://github.com/open-telemetry/opentelemetry-cpp/pull/1982) +* [EXPORTER] Opentracing shim + [#1909](https://github.com/open-telemetry/opentelemetry-cpp/pull/1909) + +## [1.8.2] 2023-01-31 + +* Remove redundant macro check in nostd::shared_ptr [#1939](https://github.com/open-telemetry/opentelemetry-cpp/pull/1939) +* Fix typo in packages.cmake causing incorrect nuget package versions [#1936](https://github.com/open-telemetry/opentelemetry-cpp/pull/1936) +* [METRICS] Custom Aggregation support [#1899](https://github.com/open-telemetry/opentelemetry-cpp/pull/1899) +* Small fix in INSTALL.md for enabling building package. [#1930](https://github.com/open-telemetry/opentelemetry-cpp/pull/1930) +* [METRICS] Fix warning for misconfiguration of PeriodicExportingMetricReader [#1929](https://github.com/open-telemetry/opentelemetry-cpp/pull/1929) +* Make macros.h available for all source files via version.h [#1918](https://github.com/open-telemetry/opentelemetry-cpp/pull/1918) +* [METRICS] Histogram Aggregation: Fix bucket detection logic, + performance improvements, and benchmark tests [#1869](https://github.com/open-telemetry/opentelemetry-cpp/pull/1869) +* Remove unused namespace alias for nostd [#1914](https://github.com/open-telemetry/opentelemetry-cpp/pull/1914) +* [METRICS] Update meter.h [#1907](https://github.com/open-telemetry/opentelemetry-cpp/pull/1907) +* sdk::resource::Resource::Merge should be const [#1905](https://github.com/open-telemetry/opentelemetry-cpp/pull/1905) +* [METRICS] Collect and Export metric data before + PeriodicMetricReader shutdown. [#1860](https://github.com/open-telemetry/opentelemetry-cpp/pull/1860) +* [ETW EXPORTER] Add Virtual destructor for TailSampler, Update Maintainer + mode warnings for MSVC [#1897](https://github.com/open-telemetry/opentelemetry-cpp/pull/1897) +* Fix #1867 Orderly shutdown in examples [#1868](https://github.com/open-telemetry/opentelemetry-cpp/pull/1868) +* [METRICS] minor metrics handling optimizations [#1890](https://github.com/open-telemetry/opentelemetry-cpp/pull/1890) +* fix SpinLockMutex for Intel Compiler [#1885](https://github.com/open-telemetry/opentelemetry-cpp/pull/1885) +* [LOGS] Change BatchLogRecordProcessorFactory::Create to static method [#1876](https://github.com/open-telemetry/opentelemetry-cpp/pull/1876) +* Enable generating deb, rpm, NuGet, tgz, zip package through cmake build [#1662](https://github.com/open-telemetry/opentelemetry-cpp/pull/1662) +* Updated clone command in INSTALL.md [#1818](https://github.com/open-telemetry/opentelemetry-cpp/pull/1818) +* Small cleanup to remove old metrics design docs [#1855](https://github.com/open-telemetry/opentelemetry-cpp/pull/1855) +* [BUILD] Fix build error with older version of VS2017 compiler. [1857](https://github.com/open-telemetry/opentelemetry-cpp/pull/1857) +* [EXPORTERS] Enable setting Span endtime for ETW exporter [#1846](https://github.com/open-telemetry/opentelemetry-cpp/pull/1846) +* [REMOVAL] Remove deprecated experimental semantic conventions [#1743](https://github.com/open-telemetry/opentelemetry-cpp/pull/1743) +* [EXPORTERS] Fix console debug logs for otlp exporters. [#1848](https://github.com/open-telemetry/opentelemetry-cpp/pull/1848) +* [LOGS] Add `include_trace_context` and `EventLogger` [#1884](https://github.com/open-telemetry/opentelemetry-cpp/pull/1884) +* [METRICS] Change BatchLogRecordProcessorFactory::Create to static method +* [BUILD] Fix OTELCPP_MAINTAINER_MODE [#1844](https://github.com/open-telemetry/opentelemetry-cpp/pull/1844) +* [BUILD] Fix compatibility when using clang and libc++, upgrade GTest and + cmake when using C++20 [#1852](https://github.com/open-telemetry/opentelemetry-cpp/pull/1852) +* [SEMANTIC CONVENTIONS] Upgrade to version 1.16.0 + [#1854](https://github.com/open-telemetry/opentelemetry-cpp/pull/1854) +* [SDK] BatchSpanProcessor now logs a warning when dropping a span because the + queue is full + [1871](https://github.com/open-telemetry/opentelemetry-cpp/pull/1871) +* [BUILD] Migrate from @bazel_tools//platforms to [Bazel Platforms](https://github.com/bazelbuild/platforms) + to enable Bazel 6.0.0 compatibility [#1873](https://github.com/open-telemetry/opentelemetry-cpp/pull/1873) +* [BUILD] Cleanup CMake makefiles for nlohmann_json + [#1912](https://github.com/open-telemetry/opentelemetry-cpp/pull/1912) +* [BUILD] Cleanup CMake makefiles for CURL usage + [#1916](https://github.com/open-telemetry/opentelemetry-cpp/pull/1916) +* [SEMANTIC CONVENTIONS] Upgrade to version 1.17.0 + [#1927](https://github.com/open-telemetry/opentelemetry-cpp/pull/1927) +* [MAINTAINER DOC] Define and document a deprecation process, + [DEPRECATION] Deprecate the Jaeger exporter, + implemented by [#1923](https://github.com/open-telemetry/opentelemetry-cpp/pull/1923) +* [BUILD] OTLP HTTP Exporter has build warnings in maintainer mode + [#1943](https://github.com/open-telemetry/opentelemetry-cpp/pull/1943) + +Deprecations: + +* [MAINTAINER DOC] Define and document a deprecation process, + [#1923](https://github.com/open-telemetry/opentelemetry-cpp/pull/1923) + * A new file, [DEPRECATED](./DEPRECATED.md) list all the code currently + deprecated. + * A new [deprecation process](./docs/deprecation-process.md) details the plan to + deprecate and later remove code. +* [DEPRECATION] Deprecate the Jaeger exporter + [#1923](https://github.com/open-telemetry/opentelemetry-cpp/pull/1923) + * The Jaeger Exporter is deprecated, see [DEPRECATED](./DEPRECATED.md) for details. + +Important changes: + +* [BUILD] Cleanup CMake makefiles for CURL usage + [#1916](https://github.com/open-telemetry/opentelemetry-cpp/pull/1916) + * CMake option `WITH_OTLP_HTTP` + * Before this change, the CMake option `WITH_OTLP_HTTP` was unpredictable, + sometime set to ON and sometime set to OFF by default, + depending on whether a CURL package was found or not. + The option `WITH_OTLP_HTTP` was sometime not displayed in the ccmake + UI, making it impossible to even discover there is an option of that name. + * With this change, CMake option `WITH_OTLP_HTTP` is always OFF by + default. WITH_OTLP_HTTP MUST be set to ON explicitly to build the + OTLP HTTP exporter. The option is always visible in the ccmake UI. + * CMake option `BUILD_W3CTRACECONTEXT_TEST` + * Before this change, the W3C trace context tests were built, or + not, in an unpredictable way, depending on the presence, or not, of a + CURL package. In particular, the build could ignore the W3C trace + context tests even when BUILD_W3CTRACECONTEXT_TEST=ON. + * With this change, option BUILD_W3CTRACECONTEXT_TEST is honored. + * HTTP client/server examples + * Before this change, the HTTP client/server examples were built, or + not, in an unpredictable way, depending on the presence, or not, of a + CURL package. + * With this change, a new option `WITH_EXAMPLES_HTTP` is used to + build the HTTP client/server examples. + +## [1.8.1] 2022-12-04 + +* [ETW Exporter] Tail based sampling support [#1780](https://github.com/open-telemetry/opentelemetry-cpp/pull/1780) +* [EXPORTERS] fix typo [affecting otlp exported histogram metrics max uint] [#1827](https://github.com/open-telemetry/opentelemetry-cpp/pull/1827) +* [EXPORTERS] fix enum-compare-switch warning [#1833](https://github.com/open-telemetry/opentelemetry-cpp/pull/1833) +* [METRICS] Change default temporality as "Cumulative" for OTLP metrics +exporters [#1828](https://github.com/open-telemetry/opentelemetry-cpp/pull/1828) +* [BUILD] Moved otlp_grpc_utils.cc to opentelemetry_exporter_otlp_grpc_client. +[#1829](https://github.com/open-telemetry/opentelemetry-cpp/pull/1829) +* Fix type mismatch when move nostd::shared_ptr [#1815](https://github.com/open-telemetry/opentelemetry-cpp/pull/1815) +* [BUILD] Fix Prometheus target name [#1820](https://github.com/open-telemetry/opentelemetry-cpp/pull/1820) +* Clean unused docker files [#1817](https://github.com/open-telemetry/opentelemetry-cpp/pull/1817) +* [BUILD] Fix default bazel build [#1816](https://github.com/open-telemetry/opentelemetry-cpp/pull/1816) +* [BUILD] move client::nosend under test_common [#1811](https://github.com/open-telemetry/opentelemetry-cpp/pull/1811) +* [BUILD] Fix opentelemetry-proto file exists check [#1824](https://github.com/open-telemetry/opentelemetry-cpp/pull/1824) + +## [1.8.0] 2022-11-27 + +* [DOC] Update Metrics status in README.md [#1722](https://github.com/open-telemetry/opentelemetry-cpp/pull/1722) +* [DOC] Remove misleading comments about ABI compatibility for nostd::span [#1731](https://github.com/open-telemetry/opentelemetry-cpp/pull/1731) +* [BUILD] Bump abseil-cpp for cmake CI [#1807](https://github.com/open-telemetry/opentelemetry-cpp/pull/1807) +* [Exporter] Add status code to OTLP grpc trace log [#1792](https://github.com/open-telemetry/opentelemetry-cpp/pull/1792) +* [Exporter] add fix for prometheus exporter build [#1795](https://github.com/open-telemetry/opentelemetry-cpp/pull/1795) +* [BUILD] Add option WITH_BENCHMARK to disable building benchmarks [#1794](https://github.com/open-telemetry/opentelemetry-cpp/pull/1794) +* [BUILD] Fix CI benchmark [#1799](https://github.com/open-telemetry/opentelemetry-cpp/pull/1799) +* [BUILD] bump to gRPC v1.48.1 for bazel CIs [#1786](https://github.com/open-telemetry/opentelemetry-cpp/pull/1786) +* [BUILD] Fix CI build [#1798](https://github.com/open-telemetry/opentelemetry-cpp/pull/1798) +* [BUILD] Fix clang-format in CI [#1796](https://github.com/open-telemetry/opentelemetry-cpp/pull/1796) +* Fix session lock of OtlpHttpClient [#1760](https://github.com/open-telemetry/opentelemetry-cpp/pull/1760) +* [Metrics SDK] Add MeterContext::ForEachMeter() method to process callbacks on + Meter in thread-safe manner [#1783](https://github.com/open-telemetry/opentelemetry-cpp/pull/1783) +* [DOC] Document that clang-format version 10.0 is used. [#1782](https://github.com/open-telemetry/opentelemetry-cpp/pull/1782) +* [BUILD] Upgrade bazel build to use abseil-cpp-20220623.1 [#1779](https://github.com/open-telemetry/opentelemetry-cpp/pull/1779) +* Fix GlobalLogHandler singleton creation order [#1767](https://github.com/open-telemetry/opentelemetry-cpp/pull/1767) +* [Metrics SDK] Change Prometheus CMake target name [#1765](https://github.com/open-telemetry/opentelemetry-cpp/pull/1765) +* [DOC] Cleanup INSTALL.md [#1757](https://github.com/open-telemetry/opentelemetry-cpp/pull/1757) +* [DOC] Format config options in OTLP exporter readme [#1748](https://github.com/open-telemetry/opentelemetry-cpp/pull/1748) +* [DOC] Cleanup ENABLE_METRICS_PREVIEW [#1745](https://github.com/open-telemetry/opentelemetry-cpp/pull/1745) +* [Build] Multiple CURL packages leads to invalid build (#1738) [#1739](https://github.com/open-telemetry/opentelemetry-cpp/pull/1739) +* [Metrics SDK] Cleanup ENABLE_METRICS_PREVIEW [#1735](https://github.com/open-telemetry/opentelemetry-cpp/pull/1735) +* [Logs SDK] LogProcessor, LogExporter class name [#1736](https://github.com/open-telemetry/opentelemetry-cpp/pull/1736) +* [Metrics SDK] Cleanup of old _metric api/sdk [#1734](https://github.com/open-telemetry/opentelemetry-cpp/pull/1734) +* [ETW Exporter] Fix span timestamp(s) precision to nanoseconds [#1726](https://github.com/open-telemetry/opentelemetry-cpp/pull/1726) +* [LOGS SDK] Rename LogProcessor and LogExporter to LogRecordProcessor and LogRecordExporter + [#1727](https://github.com/open-telemetry/opentelemetry-cpp/pull/1727) +* [METRICS SDK] - Remove old metrics from Github CI + [#1733](https://github.com/open-telemetry/opentelemetry-cpp/pull/1733) +* [BUILD] Add CMake OTELCPP_PROTO_PATH [#1730](https://github.com/open-telemetry/opentelemetry-cpp/pull/1730) +* [SEMANTIC CONVENTIONS] Upgrade to version 1.15.0 + [#1761](https://github.com/open-telemetry/opentelemetry-cpp/pull/1761) +* [LOGS SDK] New LogRecord and logs::Recordable implementations. + [#1766](https://github.com/open-telemetry/opentelemetry-cpp/pull/1766) + +Deprecation notes: + +* [Deprecation] Deprecate experimental semantic conventions + [#1744](https://github.com/open-telemetry/opentelemetry-cpp/pull/1744) + * The file + `api/include/opentelemetry/trace/experimental_semantic_conventions.h` + is deprecated, and will be removed in a future release. + Use file + `api/include/opentelemetry/trace/semantic_conventions.h` + instead. + + * The file + `sdk/include/opentelemetry/sdk/resource/experimental_semantic_conventions.h` + is deprecated, and will be removed in a future release. + Use file + `sdk/include/opentelemetry/sdk/resource/semantic_conventions.h` + instead. + + * The function, declared in the global namespace + `uint32_t hashCode(const char *str, uint32_t h = 0)` + is deprecated, and will be removed in a future release. + No replacement will be provided. + Note that function `opentelemetry::utils::hashCode`, + declared in the ETW exporter, is not affected by this deprecation. + +Breaking changes: + +* [SEMANTIC CONVENTIONS] Upgrade to version 1.15.0 + [#1761](https://github.com/open-telemetry/opentelemetry-cpp/pull/1761) + * Naming of semantic conventions has changed from uppercase constants, + like `SemanticConventions::SERVICE_NAME`, + to camel case, like `SemanticConventions::kServiceName`. + This is necessary to avoid collisions with macros in general, + which breaks the build on some platforms. + * Semantic conventions are flagged as experimental, + which is why this change is done in this release. + +## [1.7.0] 2022-10-28 + +* [METRICS SDK] Validate Instrument meta data (name, unit, description) [#1713](https://github.com/open-telemetry/opentelemetry-cpp/pull/1713) +* [DOCS] Document libthrift 0.12.0 doesn't work with Jaeger exporter [#1714](https://github.com/open-telemetry/opentelemetry-cpp/pull/1714) +* [Metrics SDK] Add Monotonic Property to Sum Aggregation, and +unit tests for Up Down Counter [#1675](https://github.com/open-telemetry/opentelemetry-cpp/pull/1675) +* [Metrics SDK] Move Metrics Exemplar processing behind feature flag [#1710](https://github.com/open-telemetry/opentelemetry-cpp/pull/1710) +* [Metrics API/SDK] Change Meter API/SDK to return nostd::unique_ptr + for Sync Instruments [#1707](https://github.com/open-telemetry/opentelemetry-cpp/pull/1707) +which includes breaking change in the Metrics api and sdk. +* [BUILD] Add e2e test to asan & tsan CI [#1670](https://github.com/open-telemetry/opentelemetry-cpp/pull/1670) +* [BUILD] Add otlp-grpc example bazel [#1708](https://github.com/open-telemetry/opentelemetry-cpp/pull/1708) +* [TRACE SDK] Fix debug log of OTLP HTTP exporter and ES log exporter [#1703](https://github.com/open-telemetry/opentelemetry-cpp/pull/1703) +* [METRICS SDK] Fix a potential precision loss on integer in +ReservoirCellIndexFor [#1696](https://github.com/open-telemetry/opentelemetry-cpp/pull/1696) +* [METRICS SDK] Fix Histogram crash [#1685](https://github.com/open-telemetry/opentelemetry-cpp/pull/1685) +* [METRICS SDK] Fix:1676 Segfault when short export period is used for metrics [#1682](https://github.com/open-telemetry/opentelemetry-cpp/pull/1682) +* [METRICS SDK] Add timeout support to MeterContext::ForceFlush [#1673](https://github.com/open-telemetry/opentelemetry-cpp/pull/1673) +* [DOCS] - Minor updates to OStream Metrics exporter documentation [#1679](https://github.com/open-telemetry/opentelemetry-cpp/pull/1679) +* [DOCS] Fix:#1575 API Documentation for Metrics SDK and API [#1678](https://github.com/open-telemetry/opentelemetry-cpp/pull/1678) +* [BUILD] Fixed compiler warnings [#1677](https://github.com/open-telemetry/opentelemetry-cpp/pull/1677) +* [METRICS SDK] Fix threading issue between Meter::RegisterSyncMetricStorage + and Meter::Collect [#1666](https://github.com/open-telemetry/opentelemetry-cpp/pull/1666) +* [METRICS SDK] Fix data race on MeterContext::meters_ [#1668](https://github.com/open-telemetry/opentelemetry-cpp/pull/1668) +* [METRICS SDK] Fix observable Gauge metrics generation [#1651](https://github.com/open-telemetry/opentelemetry-cpp/pull/1651) +* [BUILD] Detect ARCH=sparc in CMake [#1660](https://github.com/open-telemetry/opentelemetry-cpp/pull/1660) +* [SDK] Add user agent for OTLP http/grpc client [#1657](https://github.com/open-telemetry/opentelemetry-cpp/pull/1657) +* [BUILD] Fix clang and gcc warnings [#1658](https://github.com/open-telemetry/opentelemetry-cpp/pull/1658) +* [Metrics SDK] Add Metrics ExemplarFilter and ExemplarReservoir [#1584](https://github.com/open-telemetry/opentelemetry-cpp/pull/1584) +* [LOGS SDK] Rename OnReceive to OnEmit [#1652](https://github.com/open-telemetry/opentelemetry-cpp/pull/1652) +* [METRICS SDK] Fix Observable Gauge does not reflect updated values, +and send the old value always [#1641](https://github.com/open-telemetry/opentelemetry-cpp/pull/1641) +* [Metrics SDK] Change boundary type to double for Explicit Bucket Histogram Aggregation, +and change default bucket range [#1626](https://github.com/open-telemetry/opentelemetry-cpp/pull/1626) +* [METRICS SDK] Fix occasional Segfault with LongCounter instrument [#1638](https://github.com/open-telemetry/opentelemetry-cpp/pull/1638) +* [BUILD] Bump vcpk to 2022.08.15 [#1633](https://github.com/open-telemetry/opentelemetry-cpp/pull/1633) +* [BUILD] Bump gRPC to v1.48.1 for CMake Linux CI [#1608](https://github.com/open-telemetry/opentelemetry-cpp/pull/1608) +* [Metrics] Switch to explicit 64 bit integers [#1686](https://github.com/open-telemetry/opentelemetry-cpp/pull/1686) + which includes breaking change in the Metrics api and sdk. +* [Metrics SDK] Add support for Pull Metric Exporter [#1701](https://github.com/open-telemetry/opentelemetry-cpp/pull/1701) + which includes breaking change in the Metrics api. +* [BUILD] Add CMake OTELCPP_MAINTAINER_MODE [#1650](https://github.com/open-telemetry/opentelemetry-cpp/pull/1650) +* [SEMANTIC CONVENTIONS] Upgrade to version 1.14.0 [#1697](https://github.com/open-telemetry/opentelemetry-cpp/pull/1697) + +Notes: + +Metrics API/SDK GA release includes PRs [#1686](https://github.com/open-telemetry/opentelemetry-cpp/pull/1686), +[#1701](https://github.com/open-telemetry/opentelemetry-cpp/pull/1701), and +[#1707](https://github.com/open-telemetry/opentelemetry-cpp/pull/1707) +with breaking changes in the Metrics API and SDK. + +## [1.6.1] 2022-09-22 + +* [BUILD] Upgrade opentelemetry-proto to v0.19.0 [#1579](https://github.com/open-telemetry/opentelemetry-cpp/pull/1579) +* [METRICS EXPORTER] Add `OtlpGrpcMetricExporterFactory` and `OtlpHttpMetricExporterFactory`. + [#1606](https://github.com/open-telemetry/opentelemetry-cpp/pull/1606) +* [SEMANTIC CONVENTIONS] Upgrade to version 1.13.0 [#1624](https://github.com/open-telemetry/opentelemetry-cpp/pull/1624) +* [BUILD] Fixes span creation benchmark issue. [#1622](https://github.com/open-telemetry/opentelemetry-cpp/pull/1622) +* [BUILD] Fix more build warnings (#1616) [#1620](https://github.com/open-telemetry/opentelemetry-cpp/pull/1620) +* [SDK gRPC]: Fix out-of-bounds access of string_view in GrpcClientCarrier in + the example + [#1619](https://github.com/open-telemetry/opentelemetry-cpp/pull/1619) +* [EXPORTER ETW] Add Trace flags in SpanContext [#1618](https://github.com/open-telemetry/opentelemetry-cpp/pull/1618) +* [SDK] resource sdk: Update Resource::Merge function docs [#1615](https://github.com/open-telemetry/opentelemetry-cpp/pull/1615) +* [BUILD] Fix build warnings [#1613](https://github.com/open-telemetry/opentelemetry-cpp/pull/1613) +* [API BUILD] Fix header only api singletons (#1520) [#1604](https://github.com/open-telemetry/opentelemetry-cpp/pull/1604) +* [METRICS SDK] Fix default value of + `OtlpHttpMetricExporterOptions::aggregation_temporality`. + [#1601](https://github.com/open-telemetry/opentelemetry-cpp/pull/1601) +* [METRICS EXAMPLE] Example for OTLP gRPC exporter for Metrics. [#1598](https://github.com/open-telemetry/opentelemetry-cpp/pull/1598) +* [SDK] Fix `LoggerContext::Shutdown` and tsan of `OtlpHttpClient` [#1592](https://github.com/open-telemetry/opentelemetry-cpp/pull/1592) +* [METRICS SDK] Fix 1585 - Multiple cumulative metric collections without + measurement recording. + [#1586](https://github.com/open-telemetry/opentelemetry-cpp/pull/1586) +* [BUILD] metrics warnings [#1583](https://github.com/open-telemetry/opentelemetry-cpp/pull/1583) +* [METRICS SDK] Fix ObservableInstrument::RemoveCallback [#1582](https://github.com/open-telemetry/opentelemetry-cpp/pull/1582) +* [SDK] Add error log when getting a http error code [#1581](https://github.com/open-telemetry/opentelemetry-cpp/pull/1581) +* [EXPORTER] ETW Exporter - Add support for Sampler and ID Generator [#1547](https://github.com/open-telemetry/opentelemetry-cpp/pull/1547) + +Notes: + +While [OpenTelemetry semantic +convention](https://github.com/open-telemetry/opentelemetry-specification/tree/main/semantic_conventions) +is still in experimental state, PR +[#1624](https://github.com/open-telemetry/opentelemetry-cpp/pull/1624) upgraded +it from 1.12.0 to 1.13.0 which **MAY** break the instrumentation library. Please +update the semantic convention in instrumentation library is needed. + +## [1.6.0] 2022-08-15 + +* [METRICS SDK] Calling Observable Instruments callback during metrics + collection + [#1554](https://github.com/open-telemetry/opentelemetry-cpp/pull/1554) +* [METRICS CI] Add CI jobs for new and deprecated metrics [#1531](https://github.com/open-telemetry/opentelemetry-cpp/pull/1531) +* [METRICS BUILD] Fix metrics asan and tsan CI [#1562](https://github.com/open-telemetry/opentelemetry-cpp/pull/1562) +* [METRICS SDK] remove throw check from metrics with noexcept [#1560](https://github.com/open-telemetry/opentelemetry-cpp/pull/1560) +* [METRICS SDK] fix metrics race condition [#1552](https://github.com/open-telemetry/opentelemetry-cpp/pull/1552) +* [METRICS SDK] Fix metrics context circular reference [#1535](https://github.com/open-telemetry/opentelemetry-cpp/pull/1535) +* [METRICS EXPORTER] Improve scope/instrument names in metrics ostream exporter [#1544](https://github.com/open-telemetry/opentelemetry-cpp/pull/1544) +* [METRICS BUILD] fix IWYU error in instruments.h [#1555](https://github.com/open-telemetry/opentelemetry-cpp/pull/1555) +* [EXPORTER] Prometheus exporter support Gauge Type [#1553](https://github.com/open-telemetry/opentelemetry-cpp/pull/1553) +* [METRICS SDK] Fix default Metric view name [#1515](https://github.com/open-telemetry/opentelemetry-cpp/pull/1515) +* [SDK] Fix infinitely waiting when shutdown with more than one running http + sessions. + [#1549](https://github.com/open-telemetry/opentelemetry-cpp/pull/1549) +* [METRICS SDK] Fix OTLP gRPC Metrics env variables [#1543](https://github.com/open-telemetry/opentelemetry-cpp/pull/1543) +* [METRICS SDK] Metric aggregation temporality controls [#1541](https://github.com/open-telemetry/opentelemetry-cpp/pull/1541) +* [METRICS SDK] Histogram min/max support [#1540](https://github.com/open-telemetry/opentelemetry-cpp/pull/1540) +* [METRICS EXPORTER] ostream exporter should print out resource attributes [#1523](https://github.com/open-telemetry/opentelemetry-cpp/pull/1523) +* [METRICS SDK] Support multiple async callbacks [#1495](https://github.com/open-telemetry/opentelemetry-cpp/pull/1495) +* [BUILD] Fix code scanning alert [#1530](https://github.com/open-telemetry/opentelemetry-cpp/pull/1530) +* [BUILD] Fix several compiling/linking errors [#1539](https://github.com/open-telemetry/opentelemetry-cpp/pull/1539) +* [TRACE SDK] Add SpanData getter for Span Recordable [#1508](https://github.com/open-telemetry/opentelemetry-cpp/pull/1508) +* [LOG SDK] Fix log sdk builder (#1486) [#1524](https://github.com/open-telemetry/opentelemetry-cpp/pull/1524) +* [METRICS SDK] Add configuration options for Aggregation creation [#1513](https://github.com/open-telemetry/opentelemetry-cpp/pull/1513) +* [METRICS TEST] Fix metrics unit test memory leack [#1533](https://github.com/open-telemetry/opentelemetry-cpp/pull/1533) +* [LOG SDK] Add log sdk builders (#1486) [#1524](https://github.com/open-telemetry/opentelemetry-cpp/pull/1524) + +## [1.5.0] 2022-07-29 + +* [EXPORTER BUILD] Add resources to dep list of prometheus exporter test [#1527](https://github.com/open-telemetry/opentelemetry-cpp/pull/1527) +* [BUILD] Don't require applications using jaeger exporter to know about libcurl + [#1518](https://github.com/open-telemetry/opentelemetry-cpp/pull/1518) +* [EXPORTER] Inline print_value() in ostream exporter [#1512](https://github.com/open-telemetry/opentelemetry-cpp/pull/1512) +* [SDK] fix: urlPaser will incorrect parsing url like `http://abc.com/xxx@xxx/a/b` + [#1511](https://github.com/open-telemetry/opentelemetry-cpp/pull/1511) +* [SDK] Rename `InstrumentationLibrary` to `InstrumentationScope` [#1507](https://github.com/open-telemetry/opentelemetry-cpp/pull/1507) +* [BUILD] Try to build nlohmann-json only it's depended. [#1505](https://github.com/open-telemetry/opentelemetry-cpp/pull/1505) +* [EXPORTER BUILD] Link opentelemetry_api to ETW exporter test [#1503](https://github.com/open-telemetry/opentelemetry-cpp/pull/1503) +* [SDK] Add automatically semantic conventions from the spec (#873) [#1497](https://github.com/open-telemetry/opentelemetry-cpp/pull/1497) +* [SDK] Use template class for in-memory data. [#1496](https://github.com/open-telemetry/opentelemetry-cpp/pull/1496) +* [SDK] fix compiler warnings [#1499](https://github.com/open-telemetry/opentelemetry-cpp/pull/1499) +* [TRACE SDK] Add trace sdk builders (#1393) [#1471](https://github.com/open-telemetry/opentelemetry-cpp/pull/1471) +* [METRICS BUILD] Enable bazel build for metrics proto files [#1489](https://github.com/open-telemetry/opentelemetry-cpp/pull/1489) +* [METRICS EXPORTER] Add metrics OTLP/HTTP exporter [#1487](https://github.com/open-telemetry/opentelemetry-cpp/pull/1487) +* [EXPORTER] fix otlp grpc exporter naming [#1488](https://github.com/open-telemetry/opentelemetry-cpp/pull/1488) +* [BUILD] Remove `--config Debug` when installing. [#1480](https://github.com/open-telemetry/opentelemetry-cpp/pull/1480) +* [EXPORTER] Fix endpoint in otlp grpc exporter [#1474](https://github.com/open-telemetry/opentelemetry-cpp/pull/1474) +* [EXAMPLE] Fix memory ownership of InMemorySpanExporter (#1473) [#1471](https://github.com/open-telemetry/opentelemetry-cpp/pull/1471) +* [EXPORTER TESTS] Prometheus unit test [#1461](https://github.com/open-telemetry/opentelemetry-cpp/pull/1461) +* [DOCS] Update docs to use relative code links. [#1447](https://github.com/open-telemetry/opentelemetry-cpp/pull/1447) +* [SDK] Remove reference to deprecated InstrumentationLibrary in OTLP [#1469](https://github.com/open-telemetry/opentelemetry-cpp/pull/1469) +* [SDK] Fix trace kIsSampled flag set incorrectly [#1465](https://github.com/open-telemetry/opentelemetry-cpp/pull/1465) +* [METRICS EXPORTER] OTLP gRPC Metrics Exporter [#1454](https://github.com/open-telemetry/opentelemetry-cpp/pull/1454) +* [EXPORTER] fix prometheus exporter failure type [#1460](https://github.com/open-telemetry/opentelemetry-cpp/pull/1460) +* [SDK] Fix build warnings about missing aggregates. [#1368](https://github.com/open-telemetry/opentelemetry-cpp/pull/1368) +* [EXT] `curl::HttpClient` use `curl_multi_handle` instead of creating a thread + for every request and it's able to reuse connections now. ([#1317](https://github.com/open-telemetry/opentelemetry-cpp/pull/1317)) +* [SEMANTIC CONVENTIONS] Upgrade to version 1.12.0 [#873](https://github.com/open-telemetry/opentelemetry-cpp/pull/873) + +## [1.4.1] 2022-06-19 + +* [METRICS SDK] Fix variables inizialization [#1430](https://github.com/open-telemetry/opentelemetry-cpp/pull/1430) +* [DOCS] Fixed broken link to OpenTelemetry.io (#1445) [#1446](https://github.com/open-telemetry/opentelemetry-cpp/pull/1446) +* [BUILD] Upgrade nlohmann_json to 3.10.5 (#1438) [#1441](https://github.com/open-telemetry/opentelemetry-cpp/pull/1441) +* [METRICS SDK] fix histogram [#1440](https://github.com/open-telemetry/opentelemetry-cpp/pull/1440) +* [DOCS] Fix GettingStarted documentation for Jaeger HTTP exporter (#1347) [#1439](https://github.com/open-telemetry/opentelemetry-cpp/pull/1439) +* [BUILD] install sdk-config.h [#1419](https://github.com/open-telemetry/opentelemetry-cpp/pull/1419) +* [EXAMPLE] Log current timestamp instead of epoch time [#1434](https://github.com/open-telemetry/opentelemetry-cpp/pull/1434) +* [METRICS SDK] Add attributes/dimensions to metrics ostream exporter [#1400](https://github.com/open-telemetry/opentelemetry-cpp/pull/1400) +* [SDK] Fix global log handle symbols when using dlopen [#1420](https://github.com/open-telemetry/opentelemetry-cpp/pull/1420) +* [METRICS] Only record non-negative / finite / Non-NAN histogram values([#1427](https://github.com/open-telemetry/opentelemetry-cpp/pull/1427)) +* [ETW EXPORTER] Fix ETW log exporter header inclusion [#1426](https://github.com/open-telemetry/opentelemetry-cpp/pull/1426) +* [ETW EXPORTER] Copy string_view passed to ETW exporter in PropertyVariant [#1425](https://github.com/open-telemetry/opentelemetry-cpp/pull/1425) +* [METRICS API/SDK] Pass state to async callback function. [#1408](https://github.com/open-telemetry/opentelemetry-cpp/pull/1408) +* [BUILD] fix nlohmann_json's (third party) include dir [#1415](https://github.com/open-telemetry/opentelemetry-cpp/pull/1415) +* [SDK] fix: WaitOnSocket select error when sockfd above FD_SETSIZE [#1410](https://github.com/open-telemetry/opentelemetry-cpp/pull/1410) +* [SDK] fix OTEL_INTERNAL_LOG_INFO [#1407](https://github.com/open-telemetry/opentelemetry-cpp/pull/1407) +* [DOCS] Document Getting Started with Prometheus and Grafana [#1396](https://github.com/open-telemetry/opentelemetry-cpp/pull/1396) + +## [1.4.0] 2022-05-17 + +* [API SDK] Upgrade proto to v0.17.0, update log data model ([#1383](https://github.com/open-telemetry/opentelemetry-cpp/pull/1383)) +* [BUILD] Alpine image ([#1382](https://github.com/open-telemetry/opentelemetry-cpp/pull/1382)) +* [LOGS SDK] Get span_id from context when Logger::Log received invalid span_id + ([#1398](https://github.com/open-telemetry/opentelemetry-cpp/pull/1398)) +* [METRICS SDK] Connect async storage with async instruments ([#1388](https://github.com/open-telemetry/opentelemetry-cpp/pull/1388)) +* [DOCS] Getting started document using ostream exporter ([#1394](https://github.com/open-telemetry/opentelemetry-cpp/pull/1394)) +* [BUILD] Fix missing link to nlohmann_json ([#1390](https://github.com/open-telemetry/opentelemetry-cpp/pull/1390)) +* [SDK] Fix sharing resource in batched exported spans ([#1386](https://github.com/open-telemetry/opentelemetry-cpp/pull/1386)) +* [PROTOCOL \& LOGS] Upgrade proto to v0.17.0, update log data model ([#1383](https://github.com/open-telemetry/opentelemetry-cpp/pull/1383)) +* [METRICS SDK] Remove un-necessary files. ([#1379](https://github.com/open-telemetry/opentelemetry-cpp/pull/1379)) +* [EXPORTER] Prometheus exporter meters and instrument name ([#1378](https://github.com/open-telemetry/opentelemetry-cpp/pull/1378)) +* [API] Add noexcept/const qualifier at missing places for Trace API. ([#1374](https://github.com/open-telemetry/opentelemetry-cpp/pull/1374)) +* [SDK] Fix empty tracestate header propagation ([#1373](https://github.com/open-telemetry/opentelemetry-cpp/pull/1373)) +* [METRICS SDK] Reuse temporal metric storage for sync storage ([#1369](https://github.com/open-telemetry/opentelemetry-cpp/pull/1369)) +* [SDK] Fix baggage propagation for empty/invalid baggage context ([#1367](https://github.com/open-telemetry/opentelemetry-cpp/pull/1367)) +* [BUILD] Export opentelemetry_otlp_recordable ([#1365](https://github.com/open-telemetry/opentelemetry-cpp/pull/1365)) +* [TESTS] Disable test on prometheus-cpp which not need ([#1363](https://github.com/open-telemetry/opentelemetry-cpp/pull/1363)) +* [METRICS] Fix class member initialization order ([#1360](https://github.com/open-telemetry/opentelemetry-cpp/pull/1360)) +* [METRICS SDK] Simplify SDK Configuration: Use View with default aggregation if + no matching View is configured + ([#1358](https://github.com/open-telemetry/opentelemetry-cpp/pull/1358)) +* [BUILD] Add missing include guard ([#1357](https://github.com/open-telemetry/opentelemetry-cpp/pull/1357)) +* [ETW EXPORTER] Fix scalar delete against array ([#1356](https://github.com/open-telemetry/opentelemetry-cpp/pull/1356)) +* [ETW EXPORTER] Conditional include for codecvt header ([#1355](https://github.com/open-telemetry/opentelemetry-cpp/pull/1355)) +* [BUILD] Use latest TraceLoggingDynamic.h ([#1354](https://github.com/open-telemetry/opentelemetry-cpp/pull/1354)) +* [SDK] Add explicit type cast in baggage UrlDecode ([#1353](https://github.com/open-telemetry/opentelemetry-cpp/pull/1353)) +* [METRICS SDK] Remove exporter registration to meter provider ([#1350](https://github.com/open-telemetry/opentelemetry-cpp/pull/1350)) +* [METRICS SDK] Fix output time in metrics OStream exporter ([#1346](https://github.com/open-telemetry/opentelemetry-cpp/pull/1346)) +* [BUILD] ostream metrics cmake ([#1344](https://github.com/open-telemetry/opentelemetry-cpp/pull/1344)) +* [BUILD] Link `opentelemetry_ext` with `opentelemetry_api` ([#1336](https://github.com/open-telemetry/opentelemetry-cpp/pull/1336)) +* [METRICS SDK] Enable metric collection for Async Instruments - Delta and + Cumulative + ([#1334](https://github.com/open-telemetry/opentelemetry-cpp/pull/1334)) +* [BUILD] Dependencies image as artifact ([#1333](https://github.com/open-telemetry/opentelemetry-cpp/pull/1333)) +* [EXAMPLE] Prometheus example ([#1332](https://github.com/open-telemetry/opentelemetry-cpp/pull/1332)) +* [METRICS EXPORTER] Prometheus exporter ([#1331](https://github.com/open-telemetry/opentelemetry-cpp/pull/1331)) +* [METRICS] Metrics histogram example ([#1330](https://github.com/open-telemetry/opentelemetry-cpp/pull/1330)) +* [TESTS] Replace deprecated googletest API ([#1327](https://github.com/open-telemetry/opentelemetry-cpp/pull/1327)) +* [BUILD] Fix Ninja path ([#1326](https://github.com/open-telemetry/opentelemetry-cpp/pull/1326)) +* [API] Update yield logic for ARM processor ([#1325](https://github.com/open-telemetry/opentelemetry-cpp/pull/1325)) +* [BUILD] Fix metrics compiler warnings ([#1328](https://github.com/open-telemetry/opentelemetry-cpp/pull/1328)) +* [METRICS SDK] Implement Merge and Diff operation for Histogram Aggregation ([#1303](https://github.com/open-telemetry/opentelemetry-cpp/pull/1303)) + +Notes: + +While opentelemetry-cpp Logs are still in experimental stage, +[#1383](https://github.com/open-telemetry/opentelemetry-cpp/pull/1383) updated +opentelemetry-proto to 0.17.0, which includes some breaking change in the +protocol, like +[this](https://github.com/open-telemetry/opentelemetry-proto/pull/373). This +makes `name` parameter for our log API unnecessary. However, this parameter is +marked deprecated instead of being removed in this release, and it will be +removed in future release. + +## [1.3.0] 2022-04-11 + +* [ETW EXPORTER] ETW provider handle cleanup ([#1322](https://github.com/open-telemetry/opentelemetry-cpp/pull/1322)) +* [BUILD] Move public definitions into `opentelemetry_api`. ([#1314](https://github.com/open-telemetry/opentelemetry-cpp/pull/1314)) +* [METRICS] OStream example ([#1312](https://github.com/open-telemetry/opentelemetry-cpp/pull/1312)) +* [BUILD] Rename `http_client_curl` to `opentelemetry_http_client_curl` ([#1301](https://github.com/open-telemetry/opentelemetry-cpp/pull/1301)) +* [METRICS SDK] Add InstrumentationInfo and Resource to the metrics data to be + exported. + ([#1299](https://github.com/open-telemetry/opentelemetry-cpp/pull/1299)) +* [TESTS] Add building test without RTTI ([#1294](https://github.com/open-telemetry/opentelemetry-cpp/pull/1294)) +* [METRICS SDK] Implement periodic exporting metric reader ([#1286](https://github.com/open-telemetry/opentelemetry-cpp/pull/1286)) +* [SDK] Bugfix: span SetAttribute crash ([#1283](https://github.com/open-telemetry/opentelemetry-cpp/pull/1283)) +* [BUG] Remove implicitly deleted default constructor ([#1267](https://github.com/open-telemetry/opentelemetry-cpp/pull/1267)) +* [METRICS SDK] Synchronous Metric collection (Delta , Cumulative) ([#1265](https://github.com/open-telemetry/opentelemetry-cpp/pull/1265)) +* [METRICS SDK] Metrics exemplar round 1 ([#1264](https://github.com/open-telemetry/opentelemetry-cpp/pull/1264)) +* [EXPORTER] Fix: use CURLOPT_TIMEOUT_MS to config OtlpHttpExporter's timeout + instead of CURLOPT_TIMEOUT + ([#1261](https://github.com/open-telemetry/opentelemetry-cpp/pull/1261)) +* [EXPORTER] Jaeger Exporter - Populate Span Links ([#1251](https://github.com/open-telemetry/opentelemetry-cpp/pull/1251)) +* [SDK] Reorder the destructor of members in LoggerProvider and TracerProvider ([#1245](https://github.com/open-telemetry/opentelemetry-cpp/pull/1245)) +* [METRICS SDK] Enable metric collection from MetricReader ([#1241](https://github.com/open-telemetry/opentelemetry-cpp/pull/1241)) +* [METRICS SDK] Asynchronous Aggregation storage ([#1232](https://github.com/open-telemetry/opentelemetry-cpp/pull/1232)) +* [METRICS SDK] Synchronous Instruments - Aggregation Storage(s) creation for + configured views + ([#1219](https://github.com/open-telemetry/opentelemetry-cpp/pull/1219)) +* [BUILD] Added s390x arch into CMake build. ([#1216](https://github.com/open-telemetry/opentelemetry-cpp/pull/1216)) +* [API] Allow extension of the lifetime of ContextStorage. ([#1214](https://github.com/open-telemetry/opentelemetry-cpp/pull/1214)) +* [METRICS SDK] Add Aggregation storage ([#1213](https://github.com/open-telemetry/opentelemetry-cpp/pull/1213)) +* [TESTS] Fix ostream_log_test Mac ([#1208](https://github.com/open-telemetry/opentelemetry-cpp/pull/1208)) +* [BUILD] Update grpc to v1.43.2 to support VS2022/MSVC 19.30 and bazel 5.0 ([#1207](https://github.com/open-telemetry/opentelemetry-cpp/pull/1207)) +* [DOCS] Benchmark documentation ([#1205](https://github.com/open-telemetry/opentelemetry-cpp/pull/1205)) +* [DOCS] Fix errors in SDK documentation ([#1201](https://github.com/open-telemetry/opentelemetry-cpp/pull/1201)) +* [METRICS EXPORTER] Ostream metric exporter ([#1196](https://github.com/open-telemetry/opentelemetry-cpp/pull/1196)) +* [Metrics SDK] Filtering metrics attributes ([#1191](https://github.com/open-telemetry/opentelemetry-cpp/pull/1191)) +* [Metrics SDK] Sync and Async Instruments SDK ([#1184](https://github.com/open-telemetry/opentelemetry-cpp/pull/1184)) +* [Metrics SDK] Add Aggregation as part of metrics SDK. ([#1178](https://github.com/open-telemetry/opentelemetry-cpp/pull/1178)) +* [BUILD] Cmake: thrift requires boost headers, include them as + Boost_INCLUDE_DIRS + ([#1100](https://github.com/open-telemetry/opentelemetry-cpp/pull/1100)) + +Notes: + +[#1301](https://github.com/open-telemetry/opentelemetry-cpp/pull/1301) added +`opentelemetry_` as prefix to http_client_curl library for resolving potential +naming conflict, this could break existing cmake build if http_client_curl is +listed as explicit dependency in user's cmake file. + +## [1.2.0] 2022-01-31 + +* [CI] Continuous benchmark tests as part of the CI ([#1174](https://github.com/open-telemetry/opentelemetry-cpp/pull/1174)) +* [API] Allow to use external abseil for bazel targets ([#1172](https://github.com/open-telemetry/opentelemetry-cpp/pull/1172)) +* [EXPORTER] Importing gsl::span if std::span is not available ([#1167](https://github.com/open-telemetry/opentelemetry-cpp/pull/1167)) +* [EXPORTER] Synchronized calls to Exporter::Export & Shutdown ([#1164](https://github.com/open-telemetry/opentelemetry-cpp/pull/1164)) +* [EXPORTER] OTLP http exporter block thread ([#1163](https://github.com/open-telemetry/opentelemetry-cpp/pull/1163)) +* [TESTS] Jaeger: ThriftSender unit test ([#1162](https://github.com/open-telemetry/opentelemetry-cpp/pull/1162)) +* [EXPORTER] InMemorySpanExporter shutdown fix ([#1161](https://github.com/open-telemetry/opentelemetry-cpp/pull/1161)) +* [EXPORTER] Fix leak in Jaeger exporter ([#1160](https://github.com/open-telemetry/opentelemetry-cpp/pull/1160)) +* [TESTS] ZipkinExporter unit-tests ([#1155](https://github.com/open-telemetry/opentelemetry-cpp/pull/1155)) +* [SDK] Logger: propagating resources through LoggerProvider ([#1154](https://github.com/open-telemetry/opentelemetry-cpp/pull/1154)) +* [SDK] Logger: support for instrumentation library ([#1149](https://github.com/open-telemetry/opentelemetry-cpp/pull/1149)) +* [SDK] Add log level for internal log of sdk ([#1147](https://github.com/open-telemetry/opentelemetry-cpp/pull/1147)) +* [METRICS] Metrics SDK: View API ([#1110](https://github.com/open-telemetry/opentelemetry-cpp/pull/1110)) + +Notes on experimental features: + +[#1149](https://github.com/open-telemetry/opentelemetry-cpp/pull/1149) and +[#1154](https://github.com/open-telemetry/opentelemetry-cpp/pull/1154) from +above CHANGELOG introduced API changes which are not backward compatible with +previous logs, please update API package to this release if +`ENABLE_LOGS_PREVIEW` is turned on (it is turned off by default). + +## [1.1.1] 2021-12-20 + +* [SDK] Rename OTEL_CPP_GET_ATTR macro, and define it using fully qualified attr + function + ([#1140](https://github.com/open-telemetry/opentelemetry-cpp/pull/1140)) +* [SDK] Default resource attributes and attributes in OTEL_RESOURCE_ATTRIBUTES + are missing when using Otlp*LogExporter + ([#1082](https://github.com/open-telemetry/opentelemetry-cpp/pull/1082)) +* [METRICS] Add Meter and MeterProvider in the SDK + ([#1078](https://github.com/open-telemetry/opentelemetry-cpp/pull/1078)) +* [EXPORTER] ZipkinExporter shutdown + ([#1153](https://github.com/open-telemetry/opentelemetry-cpp/pull/1153)) +* [EXPORTER] Jaeger exporter shutdown + ([#1150](https://github.com/open-telemetry/opentelemetry-cpp/pull/1150)) +* [EXPORTER] Bugfix: `jaeger::TUDPTransport::write` crash when `getaddrinfo` + returns error + ([#1116](https://github.com/open-telemetry/opentelemetry-cpp/pull/1116)) +* [EXPORTER] Bugfix: Jaeger exporter: extend supported attributes types + ([#1106](https://github.com/open-telemetry/opentelemetry-cpp/pull/1106)) +* [EXPORTER] Fix otlp generates null span ids + ([#1113](https://github.com/open-telemetry/opentelemetry-cpp/pull/1113)) +* [EXPORTER] Jaeger bazel (Linux only) + ([#1077](https://github.com/open-telemetry/opentelemetry-cpp/pull/1077)) +* [DOCS] Add note on DLL support + ([#1137](https://github.com/open-telemetry/opentelemetry-cpp/pull/1137)) +* [DOCS] Improve the instructions for Bazel build + ([#1136](https://github.com/open-telemetry/opentelemetry-cpp/pull/1136)) +* [DOCS] Document dependencies + ([#1119](https://github.com/open-telemetry/opentelemetry-cpp/pull/1119)) +* [DOCS] Dockerfile for quick demo/troubleshooting purpose + ([#905](https://github.com/open-telemetry/opentelemetry-cpp/pull/905)) +* [TESTS] Fix data race in BM_ThreadYieldSpinLockThrashing + ([#1099](https://github.com/open-telemetry/opentelemetry-cpp/pull/1099)) +* [EXAMPLE] Otlp gRPC log example + ([#1083](https://github.com/open-telemetry/opentelemetry-cpp/pull/1083)) +* [BUILD] C++20 not Building with VS2019 + ([#1144](https://github.com/open-telemetry/opentelemetry-cpp/pull/1144)) +* [BUILD] Mark tags to bazel + targets([#1075](https://github.com/open-telemetry/opentelemetry-cpp/pull/1075)) + +## [1.1.0] 2021-11-19 + +* [BUILD] build release tarball when nlohmann-json not installed + ([#1074](https://github.com/open-telemetry/opentelemetry-cpp/pull/1074)) +* [SDK] Bugfix: regex is neither working on GCC 4.9.x + ([#1069](https://github.com/open-telemetry/opentelemetry-cpp/pull/1069)) +* [SDK] Improvement: span_id should not break strict aliasing. + ([#1068](https://github.com/open-telemetry/opentelemetry-cpp/pull/1068)) +* [EXAMPLE] OTLP HTTP log example + ([#1062](https://github.com/open-telemetry/opentelemetry-cpp/pull/1062)) +* [SDK] OTLP gRPC log export should fail after shutdown + ([#1064](https://github.com/open-telemetry/opentelemetry-cpp/pull/1064)) +* [BUILD] Building otlp exporter from the release tarball + ([#1056](https://github.com/open-telemetry/opentelemetry-cpp/pull/1056)) +* [METRICS] Move old metrics implementation to different directory, and rename + targets to \_deprecated + ([#1053](https://github.com/open-telemetry/opentelemetry-cpp/pull/1053)) +* [EXPORTER] Add OTLP/gRPC Log Exporter + ([#1048](https://github.com/open-telemetry/opentelemetry-cpp/pull/1048)) +* [EXPORTER] Prometheus Exporter + ([#1031](https://github.com/open-telemetry/opentelemetry-cpp/pull/1031)) +* [EXPORTER] Add OTLP/HTTP Log Exporter + ([#1030](https://github.com/open-telemetry/opentelemetry-cpp/pull/1030)) +* [SDK] fix: issue 368- consistent namespace scope resolution + ([#1008](https://github.com/open-telemetry/opentelemetry-cpp/pull/1008)) + +## [1.0.1] 2021-10-21 + +* [EXPORTER] Exports span attributes to ETW + ([#1021](https://github.com/open-telemetry/opentelemetry-cpp/pull/1021)) +* [BUILD] cmake: add FindThrift.cmake find module method for thrift + ([#1020](https://github.com/open-telemetry/opentelemetry-cpp/pull/1020)) +* [BUILD] Fix nlohmann_json package dependency + ([#1017](https://github.com/open-telemetry/opentelemetry-cpp/pull/1017)) +* [EXPORTER] Change OTLP/HTTP default port from 4317 to 4318 + ([#1018](https://github.com/open-telemetry/opentelemetry-cpp/pull/1018)) +* [EXPORTER] ETW Log Exporter + ([#1006](https://github.com/open-telemetry/opentelemetry-cpp/pull/1006)) +* [API] Adding new Logger:log() method + ([#1005](https://github.com/open-telemetry/opentelemetry-cpp/pull/1005)) +* [EXPORTER] Remove scheme from OTLP endpoint before passing to gRPC + ([#988](https://github.com/open-telemetry/opentelemetry-cpp/pull/988)) +* [BUILD] Update opentelemetry-proto for bazel build to 0.9.0 + ([#984](https://github.com/open-telemetry/opentelemetry-cpp/pull/984)) +* [BUILD] Cross compling grpc_cpp_plugin not found bug + ([#982](https://github.com/open-telemetry/opentelemetry-cpp/pull/982)) +* [EXPORTER] Support environment variables for both `OtlpGrpcExporter` and + `OtlpHttpExporter` + ([#983](https://github.com/open-telemetry/opentelemetry-cpp/pull/983)) +* [API/SDK] Add schema_url support to both Resource and InstrumentationLibrary + ([#979](https://github.com/open-telemetry/opentelemetry-cpp/pull/979)) +* [BUILD] Fix build issue where _memcpy_ was not declared in scope + ([#985](https://github.com/open-telemetry/opentelemetry-cpp/issues/985)) + +## [1.0.0] 2021-09-16 + +### API + +* Document DefaultSpan, remove DefaultTracer + ([#959](https://github.com/open-telemetry/opentelemetry-cpp/pull/959)) +* Separate baggage<->Context api from Baggage Propagator + ([#963](https://github.com/open-telemetry/opentelemetry-cpp/pull/963)) +* Remove unused public API to_span_ptr + ([#964](https://github.com/open-telemetry/opentelemetry-cpp/pull/964)) +* :collision: Make span context management public + ([#967](https://github.com/open-telemetry/opentelemetry-cpp/pull/967)) +* Support determining parent span from Context while creating new Span + ([#969](https://github.com/open-telemetry/opentelemetry-cpp/pull/969)) +* :collision: Traces: Add note on experimental semantic convention + implementation, prefix semantics headers with experimental tag + ([#970](https://github.com/open-telemetry/opentelemetry-cpp/pull/970)) +* Increment OPENTELEMETRY_ABI_VERSION_NO to 1 + ([#980](https://github.com/open-telemetry/opentelemetry-cpp/pull/980)) + +### SDK + +* Clean up `GetEnvironmentVariable` and remove unused variable under `NO_GETENV` + ([#976](https://github.com/open-telemetry/opentelemetry-cpp/pull/976)) +* :collision: Resources: Add note on experimental semantic convention + implementation, prefix semantics headers with experimental tag + ([#970](https://github.com/open-telemetry/opentelemetry-cpp/pull/970)) + +### OTLP Exporter + +* :bug: Ignore status description if status code is not Error + ([#962](https://github.com/open-telemetry/opentelemetry-cpp/pull/962)) +* :collision: Make Otlp exporter configuration environment variables + specs-compliant + ([#974](https://github.com/open-telemetry/opentelemetry-cpp/pull/974)) + +### Zipkin Exporter + +* :bug: Don't set parentId in case parentId is empty + ([#943](https://github.com/open-telemetry/opentelemetry-cpp/pull/943)) +* :rocket: Extend zipkin exporter with ability to provide headers + ([#951](https://github.com/open-telemetry/opentelemetry-cpp/pull/951)) + +### DOCS + +* :book: Add getting-started documentation for SDK: + ([#942](https://github.com/open-telemetry/opentelemetry-cpp/pull/942)) +* :book: Remove unnecessary spaces and spelling of gRPC in README + ([#965](https://github.com/open-telemetry/opentelemetry-cpp/pull/965)) + +### BUILD + +* Disable bazel build for gcc 4.8, upgrade versions for grpc(v1.39.1) and + bazel(4.2.0), document bazel support + ([#953](https://github.com/open-telemetry/opentelemetry-cpp/pull/953)) +* Move CMake config template to cmake folder + ([#958](https://github.com/open-telemetry/opentelemetry-cpp/pull/958)) +* Enable CMake to search the new package variable `_ROOT` + ([#975](https://github.com/open-telemetry/opentelemetry-cpp/pull/975)) +* :bug: Do not override CMAKE_CXX_STANDARD + ([#977](https://github.com/open-telemetry/opentelemetry-cpp/pull/977)) + +### :construction: Experiemental Features (Will change in future) + +* Semantic Conventions for traces - As the specs is still experimental, the + implementation will change in future. +* Semantic Convention for resource - As the specs is still experimental, the + implementation will change in future. +* Logs and Metrics API & SDK - These are not compliant, and are behind feature + flag. Not recommended to use for now. + +## [1.0.0-rc4] 2021-08-04 + +* [EXPORTER] `BREAKING CHANGE` Fix default HTTP port for OTLP HTTP Exporter + ([#939](https://github.com/open-telemetry/opentelemetry-cpp/pull/939)) +* [API] Fix timeout calculation for Tracer::Flush() and Tracer::Close() + ([#937](https://github.com/open-telemetry/opentelemetry-cpp/pull/937)) +* [API] Fix for Stack::Resize that new_capacity is not assigned to capacity_ + ([#934](https://github.com/open-telemetry/opentelemetry-cpp/pull/934)) +* [SDK] Fix race condition in circular buffer simulation test + ([#931](https://github.com/open-telemetry/opentelemetry-cpp/pull/931)) +* [EXPORTER] Fix error logging in Jaeger Exporter + ([#930](https://github.com/open-telemetry/opentelemetry-cpp/pull/930)) +* [BUILD] Use latest grpc version (v1.39.0) for cmake build of otlp exporter + ([#927](https://github.com/open-telemetry/opentelemetry-cpp/pull/927)) +* [EXPORTER] Add Jaeger Thrift HTTP exporter + ([#926](https://github.com/open-telemetry/opentelemetry-cpp/pull/926)) +* [SDK] Move env-var read logic to common dir, and optional reading of env-var + ([#924](https://github.com/open-telemetry/opentelemetry-cpp/pull/924)) +* [EXPORTER] Remove recordable header from CMake install rules for exporters + ([#923](https://github.com/open-telemetry/opentelemetry-cpp/pull/923)) +* [EXPORTER] `BREAKING CHANGE` Rename Recordable to JaegerRecordable in Jaeger + exporter + ([#919](https://github.com/open-telemetry/opentelemetry-cpp/pull/919)) +* [EXPORTER] `BREAKING CHANGE` Rename Jaeger exporter target + ([#918](https://github.com/open-telemetry/opentelemetry-cpp/pull/918)) +* [EXPORTER] Add Zipkin exporter example + ([#917](https://github.com/open-telemetry/opentelemetry-cpp/pull/917)) +* [EXPORTER] Bazel build for Zipkin exorter + ([#916](https://github.com/open-telemetry/opentelemetry-cpp/pull/916)) +* [BUILD] Allow to use local GSL + ([#915](https://github.com/open-telemetry/opentelemetry-cpp/pull/915)) +* [DOCS] Document vcpkg toolchain configuration + ([#913](https://github.com/open-telemetry/opentelemetry-cpp/pull/913)) +* [SDK] Fix for resource deletion after tracer provider shutdown + ([#911](https://github.com/open-telemetry/opentelemetry-cpp/pull/911)) +* [BUILD] Add bazel build for grpc example + ([#910](https://github.com/open-telemetry/opentelemetry-cpp/pull/910)) +* [EXPORTER] Add resource and instrumentation library support for + OStreamSpanExporter + ([#906](https://github.com/open-telemetry/opentelemetry-cpp/pull/906)) +* [API] Adding semantic-convention attributes for trace + ([#868](https://github.com/open-telemetry/opentelemetry-cpp/pull/868)) + +## [1.0.0-rc3] 2021-07-12 + +* [DOCS] Add doxygen reference docs for SDK + ([#902](https://github.com/open-telemetry/opentelemetry-cpp/pull/902)) +* [EXPORTER] Jaeger Exporter - Populate resource attributes into process tags in + Jaeger ([#897](https://github.com/open-telemetry/opentelemetry-cpp/pull/897)) +* [EXPORTER] Zipkin Exporter - Report Event timestamp into microseconds + ([#896](https://github.com/open-telemetry/opentelemetry-cpp/pull/896)) +* [EXPORTER] Jaeger Exporter - Handle span events + ([#895](https://github.com/open-telemetry/opentelemetry-cpp/pull/895)) +* [DOCS] Fix API docs for Scope object + ([#894](https://github.com/open-telemetry/opentelemetry-cpp/pull/894)) +* [BUILD] Fix Linking error for libcurl on some macOS environments + ([#893](https://github.com/open-telemetry/opentelemetry-cpp/pull/893)) +* [API] Context cleanup from Runtime Storage + ([#885](https://github.com/open-telemetry/opentelemetry-cpp/pull/885)) +* [DOCS] Document strategy to avoid conflict between two different versions of + Abseil ([#883](https://github.com/open-telemetry/opentelemetry-cpp/pull/883/)) +* [EXPORTER] ETW Exporter - Document example for ETW Exporter + ([#882](https://github.com/open-telemetry/opentelemetry-cpp/pull/882)) +* [SDK] Create Span with Valid spanId and traceId irrespective of Sampling + decision + ([#879](https://github.com/open-telemetry/opentelemetry-cpp/pull/879)) +* [EXPORTER] Jaeger Exporter - Rename bswap macros to avoid clash on some + systems ([#876](https://github.com/open-telemetry/opentelemetry-cpp/pull/876)) +* [API] Add Semantics Conventions attributes for Resources + ([#872](https://github.com/open-telemetry/opentelemetry-cpp/pull/872)) +* [BUILD] Use nlohmann-json from submodules if not already installed + ([#870](https://github.com/open-telemetry/opentelemetry-cpp/pull/870)) + +## [1.0.0-rc2] 2021-06-18 + +* [EXPORTER] Jaeger Exporter - Support for Instrumentation Library + ([#864](https://github.com/open-telemetry/opentelemetry-cpp/pull/864)) +* [TESTS] Adding benchmark tests for baggage api + ([#861](https://github.com/open-telemetry/opentelemetry-cpp/pull/861)) +* [BUILD] Fix for GCC9/C++20 Support for using STL for modern C++ features + ([#860](https://github.com/open-telemetry/opentelemetry-cpp/pull/860)) +* [TESTS] Adding benchmark tests for span create api + ([#856](https://github.com/open-telemetry/opentelemetry-cpp/pull/856)) +* [BUILD] Fix for using Abseil library for modern C++ features + ([#850](https://github.com/open-telemetry/opentelemetry-cpp/pull/850)) +* [BUILD] Fix issues with win32/x86 compilation + ([#847](https://github.com/open-telemetry/opentelemetry-cpp/pull/847)) +* [DOCS] Document OSS dependencies and their licenses + ([#844](https://github.com/open-telemetry/opentelemetry-cpp/pull/844)) +* [BUILD] Various fixes to build with Visual Studio 2015 + ([#840](https://github.com/open-telemetry/opentelemetry-cpp/pull/840)) +* [INSTRUMENTATION] HTTPClient: Change support for full URL argument + ([#833](https://github.com/open-telemetry/opentelemetry-cpp/pull/833)) +* [EXPORTER] Jaeger Exporter - fix endianness of Jaeger IDs for transmission + ([#832](https://github.com/open-telemetry/opentelemetry-cpp/pull/832)) +* [INSTRUMENTATION] fix protobuf compilation warning in gRPC example + ([#830](https://github.com/open-telemetry/opentelemetry-cpp/pull/830)) +* [EXPORTER] `BREAKING CHANGE` - Add OTLP/HTTP+JSON Protocol exporter; Rename + `OtlpExporter` to `OtlpGrpcExporter` + ([#810](https://github.com/open-telemetry/opentelemetry-cpp/pull/810)) + +## [1.0.0-rc1] 2021-06-04 + +* [BUILD] Enable Jaeger exporter build in Windows + ([#815](https://github.com/open-telemetry/opentelemetry-cpp/pull/815)) +* [DOCS] Versioning doc update to clarify release and versioning policy + ([#811](https://github.com/open-telemetry/opentelemetry-cpp/pull/811)) +* [LOGS] Move Logging implementation under feature-flag + ([#807](https://github.com/open-telemetry/opentelemetry-cpp/pull/807)) +* [BUILD] Filter metric headers files from `opentelemetry-api` and + `opentelemetry-sdk` targets if metrics feature-flag is disabled + ([#806](https://github.com/open-telemetry/opentelemetry-cpp/pull/806)) +* [BUILD] Fix install rule for ostream exporter, Jaeger, ETW, ElasticSearch + ([#805](Fix install rule for header files of ostream exporter)) +* [API/SDK] Switch from mpark::variant to absl::variant as default + ([#771](https://github.com/open-telemetry/opentelemetry-cpp/pull/771)) +* [API/SDK] Support `const char *` as acceptable data type for attributes and + resources + ([#771](https://github.com/open-telemetry/opentelemetry-cpp/pull/771)) +* [EXAMPLE] gRPC instrumentation example with context propagation + ([#729](https://github.com/open-telemetry/opentelemetry-cpp/pull/729)) + +## [0.7.0] 2021-05-26 + +* [METRICS] Move metrics api/sdk under preview feature flag + ([#745](https://github.com/open-telemetry/opentelemetry-cpp/pull/745)) +* [DOCS] Add instructions to build using Bazel + ([#747](https://github.com/open-telemetry/opentelemetry-cpp/pull/747)) +* [DOCS] Update copyright headers + ([#754](https://github.com/open-telemetry/opentelemetry-cpp/pull/754)) +* [EXPORTER] Populate resource to OTLP proto data + ([#758](https://github.com/open-telemetry/opentelemetry-cpp/pull/758)) +* [CI] Add CodeQL security scan CI workflow + ([#770](https://github.com/open-telemetry/opentelemetry-cpp/pull/770)) +* [BUILD] Enable building API only CMake Project + ([#778](https://github.com/open-telemetry/opentelemetry-cpp/pull/778)) +* [SDK] Fix for sampling of root span + ([#784](https://github.com/open-telemetry/opentelemetry-cpp/pull/784)) +* [CI] Add Jaeger exporter to CMake CI build + ([#786](https://github.com/open-telemetry/opentelemetry-cpp/pull/786)) +* [API] `BREAKING CHANGE` - Tracer::WithActiveSpan() to return Scope object + intead of unique_ptr + ([#788](https://github.com/open-telemetry/opentelemetry-cpp/pull/788)) +* [DOCS] Add docs for nested spans and context propagation in readthedocs + ([#792](https://github.com/open-telemetry/opentelemetry-cpp/pull/792)) +* [CI] Output verbose error for failed unit-test in CI + ([#796](https://github.com/open-telemetry/opentelemetry-cpp/pull/796)) + +## [0.6.0] 2021-05-11 + +* [EXPORTER] Add Jaeger exporter + ([#534](https://github.com/open-telemetry/opentelemetry-cpp/pull/534)) +* [SDK] Support multiple processors + ([#692](https://github.com/open-telemetry/opentelemetry-cpp/pull/692)) +* [SDK] Add instrumentation library and multiple tracer support + ([#693](https://github.com/open-telemetry/opentelemetry-cpp/pull/693)) +* [SDK] Resource integration with Exporters + ([#706](https://github.com/open-telemetry/opentelemetry-cpp/pull/706)) +* [EXAMPLE] Enhance existing http example with propagation + ([#727](https://github.com/open-telemetry/opentelemetry-cpp/pull/727)) + +## [0.5.0] 2021-04-26 + +* [SDK] Support custom span-id and trace-id generator + ([#681](https://github.com/open-telemetry/opentelemetry-cpp/pull/681)) +* [SDK] Add SpanContext (and TraceState) to Recordable + ([#667](https://github.com/open-telemetry/opentelemetry-cpp/pull/667)) +* [SDK] Global Propagator + ([#668](https://github.com/open-telemetry/opentelemetry-cpp/pull/668)) +* [SDK] Create SharedContext for updating span pipeline + ([#650](https://github.com/open-telemetry/opentelemetry-cpp/pull/650)) +* [API] Baggage implementation + ([#676](https://github.com/open-telemetry/opentelemetry-cpp/pull/676)) +* [API] Move class from opentelemetry::core namespace to opentelemetry::common + namespace + ([#686](https://github.com/open-telemetry/opentelemetry-cpp/pull/686)) + +## [0.4.0] 2021-04-12 + +* [EXPORTER] ETW Exporter enhancements + ([#519](https://github.com/open-telemetry/opentelemetry-cpp/pull/519)) +* [EXPORTER] Read Zipkin endpoint from environment variable. + ([#624](https://github.com/open-telemetry/opentelemetry-cpp/pull/624)) +* [EXPORTER] Split Zpages webserver hosting from Exporter + ([#626](https://github.com/open-telemetry/opentelemetry-cpp/pull/626)) +* [EXPORTER] ETW Exporter Usage Instructions + ([#628](https://github.com/open-telemetry/opentelemetry-cpp/pull/628)) +* [INSTRUMENTATION] HTTP Client/Server Instrumentation example + ([#632](https://github.com/open-telemetry/opentelemetry-cpp/pull/632)) +* [EXPORTER] Enable tls authentication for otlp grpc exporter ([#635](Enable tls + authentication for otlp grpc exporter)) +* [API] Refactoring trace_state to reuse common functionality in baggage + ([#638](https://github.com/open-telemetry/opentelemetry-cpp/pull/638/files)) + +## [0.3.0] 2021-03-19 + +* [EXPORTER] Added Zipkin Exporter. + ([#471](https://github.com/open-telemetry/opentelemetry-cpp/pull/471)) +* [API] Added Jaeger propagator. + ([#599](https://github.com/open-telemetry/opentelemetry-cpp/pull/599)) +* [PROPAGATOR] Added Composite Propagator + ([#597](https://github.com/open-telemetry/opentelemetry-cpp/pull/597)) +* [API] Propagate traceflag from parent + ([#603](https://github.com/open-telemetry/opentelemetry-cpp/pull/603)) +* [DOCS] Add sphinx support for api doc generation + ([595](https://github.com/open-telemetry/opentelemetry-cpp/pull/595)) +* [SDK] Add service.name if missing in Resource + ([#616](https://github.com/open-telemetry/opentelemetry-cpp/pull/616)) + +## [0.2.0] 2021-03-02 + +* [SDK] Added `ForceFlush` to `TracerProvider`. + ([#588](https://github.com/open-telemetry/opentelemetry-cpp/pull/588)). +* [SDK] Added Resource API. + ([#502](https://github.com/open-telemetry/opentelemetry-cpp/pull/502)) +* [API] Modified TraceState support for w3c trace context as per specs. +* [SDK] TraceState implementation as per spec + ([#551](https://github.com/open-telemetry/opentelemetry-cpp/pull/551)) +* [API] Added B3 Propagator. + ([#523](https://github.com/open-telemetry/opentelemetry-cpp/pull/523)) +* [Exporter] Added ETW Exporter. + ([#376](https://github.com/open-telemetry/opentelemetry-cpp/pull/376)) +* [CI] Enable cache for Bazel for faster builds. + ([#505](https://github.com/open-telemetry/opentelemetry-cpp/pull/505)) + +## [0.0.1] 2020-12-16 + +### Added + +* Trace API and SDK experimental +* OTLP Exporter + +### Changed + +### Removed diff --git a/ext/opentelemetry-cpp-1.21.0/CMakeLists.txt b/ext/opentelemetry-cpp-1.21.0/CMakeLists.txt new file mode 100644 index 000000000..4e65f0493 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/CMakeLists.txt @@ -0,0 +1,927 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +cmake_minimum_required(VERSION 3.14) + +# See https://cmake.org/cmake/help/latest/policy/CMP0074.html required by +# certain version of zlib which CURL depends on. +if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.12") + cmake_policy(SET CMP0074 NEW) +endif() + +# Allow to use normal variable for option() +if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.13") + cmake_policy(SET CMP0077 NEW) +endif() + +# Prefer CMAKE_MSVC_RUNTIME_LIBRARY if possible +if(POLICY CMP0091) + cmake_policy(SET CMP0091 NEW) +endif() + +if(POLICY CMP0092) + # https://cmake.org/cmake/help/latest/policy/CMP0092.html#policy:CMP0092 Make + # sure the /W3 is not removed from CMAKE_CXX_FLAGS since CMake 3.15 + cmake_policy(SET CMP0092 OLD) +endif() + +# MSVC RTTI flag /GR should not be not added to CMAKE_CXX_FLAGS by default. @see +# https://cmake.org/cmake/help/latest/policy/CMP0117.html +if(CMAKE_VERSION VERSION_GREATER_EQUAL "3.20.0") + cmake_policy(SET CMP0117 NEW) +endif() + +project(opentelemetry-cpp) + +# Mark variables as used so cmake doesn't complain about them +mark_as_advanced(CMAKE_TOOLCHAIN_FILE) + +# Note: CMAKE_FIND_PACKAGE_PREFER_CONFIG requires cmake 3.15. Prefer cmake +# CONFIG search mode to find dependencies. This is important to properly find +# protobuf versions 3.22.0 and above due to the abseil-cpp dependency. +set(CMAKE_FIND_PACKAGE_PREFER_CONFIG TRUE) + +# Don't use customized cmake modules if vcpkg is used to resolve dependence. +if(NOT DEFINED CMAKE_TOOLCHAIN_FILE) + list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/modules/") +endif() + +if(EXISTS "${CMAKE_SOURCE_DIR}/third_party_release") + file(STRINGS "${CMAKE_SOURCE_DIR}/third_party_release" third_party_tags) + foreach(third_party ${third_party_tags}) + string(REGEX REPLACE "^[ ]+" "" third_party ${third_party}) + string(REGEX MATCH "^[^=]+" third_party_name ${third_party}) + string(REPLACE "${third_party_name}=" "" third_party_tag ${third_party}) + set(${third_party_name} "${third_party_tag}") + endforeach() +endif() + +if(DEFINED ENV{ARCH}) + # Architecture may be specified via ARCH environment variable + set(ARCH $ENV{ARCH}) +else() + # Autodetection logic that populates ARCH variable + if(CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*") + # Windows may report AMD64 even if target is 32-bit + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(ARCH x64) + elseif(CMAKE_SIZEOF_VOID_P EQUAL 4) + set(ARCH x86) + endif() + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "i686.*|i386.*|x86.*") + # Windows may report x86 even if target is 64-bit + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(ARCH x64) + elseif(CMAKE_SIZEOF_VOID_P EQUAL 4) + set(ARCH x86) + endif() + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "powerpc") + # AIX will report the processor as 'powerpc' even if building in 64-bit mode + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(ARCH ppc64) + else() + set(ARCH ppc32) + endif() + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES + "^(aarch64.*|AARCH64.*|arm64.*|ARM64.*)") + set(ARCH arm64) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(arm.*|ARM.*)") + set(ARCH arm) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64le") + set(ARCH ppc64le) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64") + set(ARCH ppc64) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(mips.*|MIPS.*)") + set(ARCH mips) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(riscv.*|RISCV.*)") + set(ARCH riscv) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(s390x.*|S390X.*)") + set(ARCH s390x) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(sparc.*|SPARC.*)") + set(ARCH sparc) + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(loongarch.*|LOONGARCH.*)") + set(ARCH loongarch) + else() + message( + FATAL_ERROR + "opentelemetry-cpp: unrecognized target processor ${CMAKE_SYSTEM_PROCESSOR} configuration!" + ) + endif() +endif() +message(STATUS "Building for architecture ARCH=${ARCH}") + +# Autodetect vcpkg toolchain +if(DEFINED ENV{VCPKG_ROOT} AND NOT DEFINED CMAKE_TOOLCHAIN_FILE) + set(CMAKE_TOOLCHAIN_FILE + "$ENV{VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake" + CACHE STRING "") +endif() + +if(VCPKG_CHAINLOAD_TOOLCHAIN_FILE) + include("${VCPKG_CHAINLOAD_TOOLCHAIN_FILE}") +endif() + +option(WITH_ABI_VERSION_1 "ABI version 1" ON) +option(WITH_ABI_VERSION_2 "EXPERIMENTAL: ABI version 2 preview" OFF) + +file(READ "${CMAKE_CURRENT_LIST_DIR}/api/include/opentelemetry/version.h" + OPENTELEMETRY_CPP_HEADER_VERSION_H) + +# +# We do not want to have WITH_ABI_VERSION = "1" or "2", and instead prefer two +# distinct flags, WITH_ABI_VERSION_1 and WITH_ABI_VERSION_2. +# +# This allows: +# +# * to have a specific option description for each ABI +# * to mark experimental/stable/deprecated on flags, for clarity +# * to search for exact abi usage move easily, discouraging: +# +# * cmake -DWITH_ABI_VERSION=${ARG} +# +# While not supported, having distinct WITH_ABI_VERSION_1 and WITH_ABI_VERSION_2 +# flags also opens the possibility to support multiple ABI concurrently, should +# that become necessary. +# +if(WITH_ABI_VERSION_1 AND WITH_ABI_VERSION_2) + # + # Only one ABI is supported in a build. + # + message( + FATAL_ERROR "Set either WITH_ABI_VERSION_1 or WITH_ABI_VERSION_2, not both") +endif() + +if(WITH_ABI_VERSION_2) + set(OPENTELEMETRY_ABI_VERSION_NO "2") +elseif(WITH_ABI_VERSION_1) + set(OPENTELEMETRY_ABI_VERSION_NO "1") +else() + if(OPENTELEMETRY_CPP_HEADER_VERSION_H MATCHES + "OPENTELEMETRY_ABI_VERSION_NO[ \t\r\n]+\"?([0-9]+)\"?") + math(EXPR OPENTELEMETRY_ABI_VERSION_NO ${CMAKE_MATCH_1}) + else() + message( + FATAL_ERROR + "OPENTELEMETRY_ABI_VERSION_NO not found on ${CMAKE_CURRENT_LIST_DIR}/api/include/opentelemetry/version.h" + ) + endif() +endif() + +message(STATUS "OPENTELEMETRY_ABI_VERSION_NO=${OPENTELEMETRY_ABI_VERSION_NO}") + +if(OPENTELEMETRY_CPP_HEADER_VERSION_H MATCHES + "OPENTELEMETRY_VERSION[ \t\r\n]+\"?([^\"]+)\"?") + set(OPENTELEMETRY_VERSION ${CMAKE_MATCH_1}) +else() + message( + FATAL_ERROR + "OPENTELEMETRY_VERSION not found on ${CMAKE_CURRENT_LIST_DIR}/api/include/opentelemetry/version.h" + ) +endif() + +message(STATUS "OPENTELEMETRY_VERSION=${OPENTELEMETRY_VERSION}") + +option(WITH_NO_DEPRECATED_CODE "Do not include deprecated code" OFF) + +set(WITH_STL + "OFF" + CACHE STRING "Which version of the Standard Library for C++ to use") + +option(WITH_GSL + "Whether to use Guidelines Support Library for C++ latest features" OFF) + +set(OPENTELEMETRY_INSTALL_default ON) +if(NOT CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) + set(OPENTELEMETRY_INSTALL_default OFF) +endif() +option(OPENTELEMETRY_INSTALL "Whether to install opentelemetry targets" + ${OPENTELEMETRY_INSTALL_default}) + +include("${PROJECT_SOURCE_DIR}/cmake/tools.cmake") + +if(NOT WITH_STL STREQUAL "OFF") + # These definitions are needed for test projects that do not link against + # opentelemetry-api library directly. We ensure that variant implementation + # (absl::variant or std::variant) in variant unit test code is consistent with + # the global project build definitions. Optimize for speed to reduce the hops + if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + if(CMAKE_BUILD_TYPE MATCHES Debug) + # Turn off optimizations for DEBUG + set(MSVC_CXX_OPT_FLAG "/Od") + else() + string(REGEX MATCH "\/O" result ${CMAKE_CXX_FLAGS}) + if(NOT ${result} MATCHES "\/O") + set(MSVC_CXX_OPT_FLAG "/O2") + endif() + endif() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MSVC_CXX_OPT_FLAG}") + endif() +endif() + +option(WITH_OTLP_RETRY_PREVIEW + "Whether to enable experimental retry functionality" OFF) + +option(WITH_OTLP_GRPC_SSL_MTLS_PREVIEW + "Whether to enable mTLS support fro gRPC" OFF) + +option(WITH_OTLP_GRPC "Whether to include the OTLP gRPC exporter in the SDK" + OFF) + +option(WITH_OTLP_HTTP "Whether to include the OTLP http exporter in the SDK" + OFF) + +option(WITH_OTLP_FILE "Whether to include the OTLP file exporter in the SDK" + OFF) + +option( + WITH_OTLP_HTTP_COMPRESSION + "Whether to include gzip compression for the OTLP http exporter in the SDK" + OFF) + +option(WITH_CURL_LOGGING "Whether to enable select CURL verbosity in OTel logs" + OFF) + +option(WITH_ZIPKIN "Whether to include the Zipkin exporter in the SDK" OFF) + +option(WITH_PROMETHEUS "Whether to include the Prometheus Client in the SDK" + OFF) + +option(WITH_ELASTICSEARCH + "Whether to include the Elasticsearch Client in the SDK" OFF) + +option(WITH_NO_GETENV "Whether the platform supports environment variables" OFF) + +option(BUILD_TESTING "Whether to enable tests" ON) + +option(WITH_BENCHMARK "Whether to build benchmark program" ON) + +option(BUILD_W3CTRACECONTEXT_TEST "Whether to build w3c trace context" OFF) + +option(OTELCPP_MAINTAINER_MODE "Build in maintainer mode (-Wall -Werror)" OFF) + +option(WITH_OPENTRACING "Whether to include the Opentracing shim" OFF) + +option(OTELCPP_VERSIONED_LIBS "Whether to generate the versioned shared libs" + OFF) + +# +# This option is experimental, subject to change in the spec: +# +# * https://github.com/open-telemetry/opentelemetry-specification/issues/2232 +# +option(WITH_REMOVE_METER_PREVIEW + "EXPERIMENTAL, ABI BREAKING: Allow to remove a meter" OFF) + +if(OTELCPP_VERSIONED_LIBS AND NOT BUILD_SHARED_LIBS) + message(FATAL_ERROR "OTELCPP_VERSIONED_LIBS=ON requires BUILD_SHARED_LIBS=ON") +endif() + +if(WIN32) + option(WITH_ETW "Whether to include the ETW Exporter in the SDK" ON) +else() + if(DEFINED (WITH_ETW)) + message(FATAL_ERROR "WITH_ETW is only supported on Windows") + endif() +endif(WIN32) + +# Do not convert deprecated message to error +if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang|AppleClang") + add_compile_options(-Wno-error=deprecated-declarations) +endif() + +option( + WITH_API_ONLY + "Only build the API (use as a header-only library). Overrides WITH_EXAMPLES and all options to enable exporters" + OFF) +option(WITH_EXAMPLES "Whether to build examples" ON) + +# This requires CURL, OFF by default. +option( + WITH_EXAMPLES_HTTP + "Whether to build http client/server examples. Requires WITH_EXAMPLES and CURL" + OFF) + +option(WITH_FUNC_TESTS "Whether to build functional tests" ON) + +option(WITH_ASYNC_EXPORT_PREVIEW "Whether to enable async export" OFF) + +# Exemplar specs status is experimental, so behind feature flag by default +option(WITH_METRICS_EXEMPLAR_PREVIEW + "Whether to enable exemplar within metrics" OFF) + +# Experimental, so behind feature flag by default +option(WITH_THREAD_INSTRUMENTATION_PREVIEW + "Whether to enable thread instrumentation" OFF) + +option(OPENTELEMETRY_SKIP_DYNAMIC_LOADING_TESTS + "Whether to build test libraries that are always linked as shared libs" + OFF) + +# +# Verify options dependencies +# + +if(WITH_EXAMPLES_HTTP AND NOT WITH_EXAMPLES) + message(FATAL_ERROR "WITH_EXAMPLES_HTTP=ON requires WITH_EXAMPLES=ON") +endif() + +find_package(Threads) + +function(install_windows_deps) + # Bootstrap vcpkg from CMake and auto-install deps in case if we are missing + # deps on Windows. Respect the target architecture variable. + set(VCPKG_TARGET_ARCHITECTURE + ${ARCH} + PARENT_SCOPE) + message(STATUS "Installing build tools and dependencies...") + set(ENV{ARCH} ${ARCH}) + execute_process( + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/tools/setup-buildtools.cmd) + set(CMAKE_TOOLCHAIN_FILE + ${CMAKE_CURRENT_SOURCE_DIR}/tools/vcpkg/scripts/buildsystems/vcpkg.cmake + CACHE FILEPATH "") + message( + STATUS + "Make sure that vcpkg.cmake is set as the CMAKE_TOOLCHAIN_FILE at the START of the cmake build process! + Can be command-line arg (cmake -DCMAKE_TOOLCHAIN_FILE=...) or set in your editor of choice." + ) + +endfunction() + +function(set_target_version target_name) + if(OTELCPP_VERSIONED_LIBS) + set_target_properties( + ${target_name} PROPERTIES VERSION ${OPENTELEMETRY_VERSION} + SOVERSION ${OPENTELEMETRY_ABI_VERSION_NO}) + endif() +endfunction() + +if(MSVC) + # Options for Visual C++ compiler: /Zc:__cplusplus - report an updated value + # for recent C++ language standards. Without this option MSVC returns the + # value of __cplusplus="199711L" + if(MSVC_VERSION GREATER 1900) + # __cplusplus flag is not supported by Visual Studio 2015 + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zc:__cplusplus") + endif() +endif() + +# include GNUInstallDirs before include cmake/opentelemetry-proto.cmake because +# opentelemetry-proto installs targets to the location variables defined in +# GNUInstallDirs. +include(GNUInstallDirs) + +if(WITH_PROMETHEUS) + find_package(prometheus-cpp CONFIG QUIET) + if(NOT prometheus-cpp_FOUND) + message(STATUS "Trying to use local prometheus-cpp from submodule") + if(EXISTS ${PROJECT_SOURCE_DIR}/third_party/prometheus-cpp/.git) + set(SAVED_ENABLE_TESTING ${ENABLE_TESTING}) + set(SAVED_CMAKE_CXX_CLANG_TIDY ${CMAKE_CXX_CLANG_TIDY}) + set(SAVED_CMAKE_CXX_INCLUDE_WHAT_YOU_USE + ${CMAKE_CXX_INCLUDE_WHAT_YOU_USE}) + set(ENABLE_TESTING OFF) + set(CMAKE_CXX_CLANG_TIDY "") + set(CMAKE_CXX_INCLUDE_WHAT_YOU_USE "") + add_subdirectory(third_party/prometheus-cpp) + set(ENABLE_TESTING ${SAVED_ENABLE_TESTING}) + set(CMAKE_CXX_CLANG_TIDY ${SAVED_CMAKE_CXX_CLANG_TIDY}) + set(CMAKE_CXX_INCLUDE_WHAT_YOU_USE + ${SAVED_CMAKE_CXX_INCLUDE_WHAT_YOU_USE}) + + # Get the version of the prometheus-cpp submodule + find_package(Git QUIET) + if(Git_FOUND) + execute_process( + COMMAND ${GIT_EXECUTABLE} describe --tags --always + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/third_party/prometheus-cpp + OUTPUT_VARIABLE prometheus-cpp_VERSION + OUTPUT_STRIP_TRAILING_WHITESPACE) + string(REGEX REPLACE "^v" "" prometheus-cpp_VERSION + "${prometheus-cpp_VERSION}") + endif() + + message( + STATUS + "Using local prometheus-cpp from submodule. Version = ${prometheus-cpp_VERSION}" + ) + else() + message( + FATAL_ERROR + "\nprometheus-cpp package was not found. Please either provide it manually or clone with submodules. " + "To initialize, fetch and checkout any nested submodules, you can use the following command:\n" + "git submodule update --init --recursive") + endif() + else() + message(STATUS "Using external prometheus-cpp") + endif() +endif() + +if(WITH_OTLP_GRPC + OR WITH_OTLP_HTTP + OR WITH_OTLP_FILE) + + # Including the CMakeFindDependencyMacro resolves an error from + # gRPCConfig.cmake on some grpc versions. See + # https://github.com/grpc/grpc/pull/33361 for more details. + include(CMakeFindDependencyMacro) + + # Protobuf 3.22+ depends on abseil-cpp and must be found using the cmake + # find_package CONFIG search mode. The following attempts to find Protobuf + # using the CONFIG mode first, and if not found, falls back to the MODULE + # mode. See https://gitlab.kitware.com/cmake/cmake/-/issues/24321 for more + # details. + find_package(Protobuf CONFIG) + if(NOT Protobuf_FOUND) + find_package(Protobuf MODULE) + if(Protobuf_FOUND AND Protobuf_VERSION VERSION_GREATER_EQUAL "3.22.0") + message( + WARNING + "Found Protobuf version ${Protobuf_VERSION} using MODULE mode. " + "Linking errors may occur. Protobuf 3.22+ depends on abseil-cpp " + "and should be found using the CONFIG mode.") + endif() + endif() + + if(WITH_OTLP_GRPC) + find_package(gRPC CONFIG) + endif() + if((NOT Protobuf_FOUND) OR (WITH_OTLP_GRPC AND NOT gRPC_FOUND)) + if(WIN32 AND (NOT DEFINED CMAKE_TOOLCHAIN_FILE)) + install_windows_deps() + endif() + + if(WIN32 AND (NOT DEFINED CMAKE_TOOLCHAIN_FILE)) + message(FATAL_ERROR "Windows dependency installation failed!") + endif() + if(WIN32) + include(${CMAKE_TOOLCHAIN_FILE}) + endif() + + if(NOT Protobuf_FOUND) + find_package(Protobuf CONFIG REQUIRED) + endif() + if(NOT gRPC_FOUND AND WITH_OTLP_GRPC) + find_package(gRPC CONFIG) + endif() + if(WIN32) + # Always use x64 protoc.exe + if(NOT EXISTS "${Protobuf_PROTOC_EXECUTABLE}") + set(Protobuf_PROTOC_EXECUTABLE + ${CMAKE_CURRENT_SOURCE_DIR}/tools/vcpkg/packages/protobuf_x64-windows/tools/protobuf/protoc.exe + ) + endif() + endif() + endif() + # Latest Protobuf imported targets and without legacy module support + if(TARGET protobuf::protoc) + if(CMAKE_CROSSCOMPILING AND Protobuf_PROTOC_EXECUTABLE) + set(PROTOBUF_PROTOC_EXECUTABLE ${Protobuf_PROTOC_EXECUTABLE}) + else() + project_build_tools_get_imported_location(PROTOBUF_PROTOC_EXECUTABLE + protobuf::protoc) + # If protobuf::protoc is not a imported target, then we use the target + # directly for fallback + if(NOT PROTOBUF_PROTOC_EXECUTABLE) + set(PROTOBUF_PROTOC_EXECUTABLE protobuf::protoc) + endif() + endif() + elseif(Protobuf_PROTOC_EXECUTABLE) + # Some versions of FindProtobuf.cmake uses mixed case instead of uppercase + set(PROTOBUF_PROTOC_EXECUTABLE ${Protobuf_PROTOC_EXECUTABLE}) + endif() + include(CMakeDependentOption) + + message(STATUS "PROTOBUF_PROTOC_EXECUTABLE=${PROTOBUF_PROTOC_EXECUTABLE}") + set(SAVED_CMAKE_CXX_CLANG_TIDY ${CMAKE_CXX_CLANG_TIDY}) + set(CMAKE_CXX_CLANG_TIDY "") + include(cmake/opentelemetry-proto.cmake) + set(CMAKE_CXX_CLANG_TIDY ${SAVED_CMAKE_CXX_CLANG_TIDY}) +endif() + +# +# Do we need HTTP CLIENT CURL ? +# + +if(WITH_OTLP_HTTP + OR WITH_ELASTICSEARCH + OR WITH_ZIPKIN + OR BUILD_W3CTRACECONTEXT_TEST + OR WITH_EXAMPLES_HTTP) + set(WITH_HTTP_CLIENT_CURL ON) +else() + set(WITH_HTTP_CLIENT_CURL OFF) +endif() + +# +# Do we need CURL ? +# + +if((NOT WITH_API_ONLY) AND WITH_HTTP_CLIENT_CURL) + # No specific version required. + find_package(CURL REQUIRED) + # Set the CURL_VERSION from the legacy CURL_VERSION_STRING Required for CMake + # versions below 4.0 + if(NOT DEFINED CURL_VERSION AND DEFINED CURL_VERSION_STRING) + set(CURL_VERSION ${CURL_VERSION_STRING}) + endif() +endif() + +# +# Do we need ZLIB ? +# + +if((NOT WITH_API_ONLY) + AND WITH_HTTP_CLIENT_CURL + AND WITH_OTLP_HTTP_COMPRESSION) + # No specific version required. + find_package(ZLIB REQUIRED) + # Set the ZLIB_VERSION from the legacy ZLIB_VERSION_STRING Required for CMake + # versions below 3.26 + if(NOT DEFINED ZLIB_VERSION AND DEFINED ZLIB_VERSION_STRING) + set(ZLIB_VERSION ${ZLIB_VERSION_STRING}) + endif() +endif() + +# +# Do we need NLOHMANN_JSON ? +# + +if(WITH_ELASTICSEARCH + OR WITH_ZIPKIN + OR WITH_OTLP_HTTP + OR WITH_OTLP_FILE + OR BUILD_W3CTRACECONTEXT_TEST + OR WITH_ETW) + set(USE_NLOHMANN_JSON ON) +else() + set(USE_NLOHMANN_JSON OFF) +endif() + +if((NOT WITH_API_ONLY) AND USE_NLOHMANN_JSON) + include(cmake/nlohmann-json.cmake) +endif() + +if(OTELCPP_MAINTAINER_MODE) + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + message(STATUS "Building with gcc in maintainer mode.") + + add_compile_options(-Wall) + add_compile_options(-Werror) + add_compile_options(-Wextra) + + # Tested with GCC 9.4 on github. + if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9.4) + message(STATUS "Building with additional warnings for gcc.") + + # Relaxed warnings + + # Enforced warnings + + # C++ options only + add_compile_options($<$,CXX>:-Wextra-semi>) + add_compile_options( + $<$,CXX>:-Woverloaded-virtual>) + add_compile_options( + $<$,CXX>:-Wsuggest-override>) + add_compile_options( + $<$,CXX>:-Wold-style-cast>) + + # C and C++ + add_compile_options(-Wcast-qual) + add_compile_options(-Wformat-security) + add_compile_options(-Wlogical-op) + add_compile_options(-Wmissing-include-dirs) + add_compile_options(-Wstringop-truncation) + add_compile_options(-Wundef) + add_compile_options(-Wvla) + endif() + elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + message(STATUS "Building with clang in maintainer mode.") + + add_compile_options(-Wall) + add_compile_options(-Werror) + add_compile_options(-Wextra) + + # Tested with Clang 11.0 on github. + if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 11.0) + message(STATUS "Building with additional warnings for clang.") + + # Relaxed warnings + add_compile_options(-Wno-error=unused-private-field) + + # Enforced warnings + add_compile_options(-Wcast-qual) + add_compile_options(-Wconditional-uninitialized) + add_compile_options(-Wextra-semi) + add_compile_options(-Wformat-security) + add_compile_options(-Wheader-hygiene) + add_compile_options(-Winconsistent-missing-destructor-override) + add_compile_options(-Winconsistent-missing-override) + add_compile_options(-Wnewline-eof) + add_compile_options(-Wnon-virtual-dtor) + add_compile_options(-Woverloaded-virtual) + add_compile_options(-Wrange-loop-analysis) + add_compile_options(-Wundef) + add_compile_options(-Wundefined-reinterpret-cast) + add_compile_options(-Wvla) + add_compile_options(-Wold-style-cast) + endif() + elseif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + message(STATUS "Building with msvc in maintainer mode.") + + add_compile_options(/WX) + add_compile_options(/W4) + + # Relaxed warnings + add_compile_options(/wd4100) + add_compile_options(/wd4125) + add_compile_options(/wd4566) + add_compile_options(/wd4127) + add_compile_options(/wd4512) + add_compile_options(/wd4267) + add_compile_options(/wd4996) + + # Enforced warnings + add_compile_options(/we4265) # 'class': class has virtual functions, but + # destructor is not virtual + add_compile_options(/we5204) # A class with virtual functions has + # non-virtual trivial destructor. + + elseif() + message(FATAL_ERROR "Building with unknown compiler in maintainer mode.") + endif() +endif(OTELCPP_MAINTAINER_MODE) + +list(APPEND CMAKE_PREFIX_PATH "${CMAKE_BINARY_DIR}") + +include(CTest) +if(BUILD_TESTING) + if(EXISTS ${CMAKE_BINARY_DIR}/lib/libgtest.a) + # Prefer GTest from build tree. GTest is not always working with + # CMAKE_PREFIX_PATH + set(GTEST_INCLUDE_DIRS + ${CMAKE_CURRENT_SOURCE_DIR}/third_party/googletest/googletest/include + ${CMAKE_CURRENT_SOURCE_DIR}/third_party/googletest/googlemock/include) + if(TARGET gtest) + set(GTEST_BOTH_LIBRARIES gtest gtest_main) + else() + set(GTEST_BOTH_LIBRARIES ${CMAKE_BINARY_DIR}/lib/libgtest.a + ${CMAKE_BINARY_DIR}/lib/libgtest_main.a) + endif() + elseif(WIN32) + # Make sure we are always bootsrapped with vcpkg on Windows + find_package(GTest) + if(NOT (GTEST_FOUND OR GTest_FOUND)) + if(DEFINED CMAKE_TOOLCHAIN_FILE) + message( + FATAL_ERROR + "Pleaes install GTest with the CMAKE_TOOLCHAIN_FILE at ${CMAKE_TOOLCHAIN_FILE}" + ) + else() + install_windows_deps() + include(${CMAKE_TOOLCHAIN_FILE}) + find_package(GTest REQUIRED) + endif() + endif() + else() + # Prefer GTest installed by OS distro, brew or vcpkg package manager + find_package(GTest REQUIRED) + endif() + if(NOT GTEST_BOTH_LIBRARIES) + # New GTest package names + if(TARGET GTest::gtest) + set(GTEST_BOTH_LIBRARIES GTest::gtest GTest::gtest_main) + elseif(TARGET GTest::GTest) + set(GTEST_BOTH_LIBRARIES GTest::GTest GTest::Main) + endif() + endif() + if(GTEST_INCLUDE_DIRS) + include_directories(SYSTEM ${GTEST_INCLUDE_DIRS}) + endif() + message(STATUS "GTEST_INCLUDE_DIRS = ${GTEST_INCLUDE_DIRS}") + message(STATUS "GTEST_BOTH_LIBRARIES = ${GTEST_BOTH_LIBRARIES}") + + # Try to find gmock + if(NOT GMOCK_LIB AND TARGET GTest::gmock) + set(GMOCK_LIB GTest::gmock) + elseif(MSVC) + # Explicitly specify that we consume GTest from shared library. The rest of + # code logic below determines whether we link Release or Debug flavor of the + # library. These flavors have different prefix on Windows, gmock and gmockd + # respectively. + add_definitions(-DGTEST_LINKED_AS_SHARED_LIBRARY=1) + if(GMOCK_LIB) + # unset GMOCK_LIB to force find_library to redo the lookup, as the cached + # entry could cause linking to incorrect flavor of gmock and leading to + # runtime error. + unset(GMOCK_LIB CACHE) + endif() + endif() + if(NOT GMOCK_LIB) + if(MSVC AND CMAKE_BUILD_TYPE STREQUAL "Debug") + find_library(GMOCK_LIB gmockd PATH_SUFFIXES lib) + else() + find_library(GMOCK_LIB gmock PATH_SUFFIXES lib) + endif() + # Reset GMOCK_LIB if it was not found, or some target may link + # GMOCK_LIB-NOTFOUND + if(NOT GMOCK_LIB) + unset(GMOCK_LIB) + unset(GMOCK_LIB CACHE) + endif() + endif() + + enable_testing() + if(WITH_BENCHMARK) + # Benchmark respects the CMAKE_PREFIX_PATH + find_package(benchmark CONFIG REQUIRED) + endif() +endif() + +# Record build config and versions +message(STATUS "---------------------------------------------") +message(STATUS "build settings") +message(STATUS "---------------------------------------------") +message(STATUS "OpenTelemetry: ${OPENTELEMETRY_VERSION}") +message(STATUS "OpenTelemetry ABI: ${OPENTELEMETRY_ABI_VERSION_NO}") +message(STATUS "ARCH: ${ARCH}") +message(STATUS "CXX: ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}") +message(STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") +message(STATUS "CXXFLAGS: ${CMAKE_CXX_FLAGS}") +message(STATUS "CMAKE_CXX_STANDARD: ${CMAKE_CXX_STANDARD}") +message(STATUS "CMAKE_TOOLCHAIN_FILE: ${CMAKE_TOOLCHAIN_FILE}") +message(STATUS "BUILD_SHARED_LIBS: ${BUILD_SHARED_LIBS}") + +message(STATUS "---------------------------------------------") +message(STATUS "opentelemetry-cpp build options") +message(STATUS "---------------------------------------------") +message(STATUS "WITH_API_ONLY: ${WITH_API_ONLY}") +message(STATUS "WITH_NO_DEPRECATED_CODE: ${WITH_NO_DEPRECATED_CODE}") +message(STATUS "WITH_ABI_VERSION_1: ${WITH_ABI_VERSION_1}") +message(STATUS "WITH_ABI_VERSION_2: ${WITH_ABI_VERSION_2}") +message(STATUS "OTELCPP_VERSIONED_LIBS: ${OTELCPP_VERSIONED_LIBS}") +message(STATUS "OTELCPP_MAINTAINER_MODE: ${OTELCPP_MAINTAINER_MODE}") +message(STATUS "WITH_STL: ${WITH_STL}") +message(STATUS "WITH_GSL: ${WITH_GSL}") +message(STATUS "WITH_NO_GETENV: ${WITH_NO_GETENV}") + +message(STATUS "---------------------------------------------") +message(STATUS "opentelemetry-cpp cmake component options") +message(STATUS "---------------------------------------------") +message(STATUS "WITH_OTLP_GRPC: ${WITH_OTLP_GRPC}") +message(STATUS "WITH_OTLP_HTTP: ${WITH_OTLP_HTTP}") +message(STATUS "WITH_OTLP_FILE: ${WITH_OTLP_FILE}") +message(STATUS "WITH_HTTP_CLIENT_CURL: ${WITH_HTTP_CLIENT_CURL}") +message(STATUS "WITH_ZIPKIN: ${WITH_ZIPKIN}") +message(STATUS "WITH_PROMETHEUS: ${WITH_PROMETHEUS}") +message(STATUS "WITH_ELASTICSEARCH: ${WITH_ELASTICSEARCH}") +message(STATUS "WITH_OPENTRACING: ${WITH_OPENTRACING}") +message(STATUS "WITH_ETW: ${WITH_ETW}") +message(STATUS "OPENTELEMETRY_BUILD_DLL: ${OPENTELEMETRY_BUILD_DLL}") + +message(STATUS "---------------------------------------------") +message(STATUS "feature preview options") +message(STATUS "---------------------------------------------") +message(STATUS "WITH_ASYNC_EXPORT_PREVIEW: ${WITH_ASYNC_EXPORT_PREVIEW}") +message( + STATUS + "WITH_THREAD_INSTRUMENTATION_PREVIEW: ${WITH_THREAD_INSTRUMENTATION_PREVIEW}" +) +message( + STATUS "WITH_METRICS_EXEMPLAR_PREVIEW: ${WITH_METRICS_EXEMPLAR_PREVIEW}") +message(STATUS "WITH_REMOVE_METER_PREVIEW: ${WITH_REMOVE_METER_PREVIEW}") +message( + STATUS "WITH_OTLP_GRPC_SSL_MTLS_PREVIEW: ${WITH_OTLP_GRPC_SSL_MTLS_PREVIEW}") +message(STATUS "WITH_OTLP_RETRY_PREVIEW: ${WITH_OTLP_RETRY_PREVIEW}") +message(STATUS "---------------------------------------------") +message(STATUS "third-party options") +message(STATUS "---------------------------------------------") +message(STATUS "WITH_NLOHMANN_JSON: ${USE_NLOHMANN_JSON}") +message(STATUS "WITH_CURL_LOGGING: ${WITH_CURL_LOGGING}") +message(STATUS "WITH_OTLP_HTTP_COMPRESSION: ${WITH_OTLP_HTTP_COMPRESSION}") +message(STATUS "---------------------------------------------") +message(STATUS "examples and test options") +message(STATUS "---------------------------------------------") +message(STATUS "WITH_BENCHMARK: ${WITH_BENCHMARK}") +message(STATUS "WITH_EXAMPLES: ${WITH_EXAMPLES}") +message(STATUS "WITH_EXAMPLES_HTTP: ${WITH_EXAMPLES_HTTP}") +message(STATUS "WITH_FUNC_TESTS: ${WITH_FUNC_TESTS}") +message(STATUS "BUILD_W3CTRACECONTEXT_TEST: ${BUILD_W3CTRACECONTEXT_TEST}") +message(STATUS "BUILD_TESTING: ${BUILD_TESTING}") +message(STATUS "---------------------------------------------") +message(STATUS "versions") +message(STATUS "---------------------------------------------") +message(STATUS "CMake: ${CMAKE_VERSION}") +message(STATUS "GTest: ${GTest_VERSION}") +message(STATUS "benchmark: ${benchmark_VERSION}") +if(WITH_GSL) + message(STATUS "GSL: ${GSL_VERSION}") +endif() +if(absl_FOUND) + message(STATUS "Abseil: ${absl_VERSION}") +endif() +if(Protobuf_FOUND) + message(STATUS "Protobuf: ${Protobuf_VERSION}") +endif() +if(gRPC_FOUND) + message(STATUS "gRPC: ${gRPC_VERSION}") +endif() +if(CURL_FOUND) + message(STATUS "CURL: ${CURL_VERSION}") +endif() +if(ZLIB_FOUND) + message(STATUS "ZLIB: ${ZLIB_VERSION}") +endif() +if(USE_NLOHMANN_JSON) + message(STATUS "nlohmann-json: ${nlohmann_json_VERSION}") +endif() +if(prometheus-cpp_FOUND) + message(STATUS "prometheus-cpp: ${prometheus-cpp_VERSION}") +endif() +message(STATUS "---------------------------------------------") + +include("${PROJECT_SOURCE_DIR}/cmake/otel-install-functions.cmake") + +include(CMakePackageConfigHelpers) + +if(DEFINED OPENTELEMETRY_BUILD_DLL) + if(NOT WIN32) + message(FATAL_ERROR "Build DLL is only supported on Windows!") + endif() + if(NOT MSVC) + message(WARNING "Build DLL is supposed to work with MSVC!") + endif() + if(WITH_STL) + message( + WARNING "Build DLL with C++ STL could cause binary incompatibility!") + endif() + add_definitions(-DOPENTELEMETRY_BUILD_EXPORT_DLL) +endif() + +add_subdirectory(api) + +if(WITH_OPENTRACING) + find_package(OpenTracing CONFIG QUIET) + if(NOT OpenTracing_FOUND) + set(OPENTRACING_DIR "third_party/opentracing-cpp") + message(STATUS "Trying to use local ${OPENTRACING_DIR} from submodule") + if(EXISTS "${PROJECT_SOURCE_DIR}/${OPENTRACING_DIR}/.git") + set(SAVED_BUILD_TESTING ${BUILD_TESTING}) + set(BUILD_TESTING OFF) + set(SAVED_CMAKE_CXX_INCLUDE_WHAT_YOU_USE + ${CMAKE_CXX_INCLUDE_WHAT_YOU_USE}) + set(CMAKE_CXX_INCLUDE_WHAT_YOU_USE "") + add_subdirectory(${OPENTRACING_DIR}) + set(BUILD_TESTING ${SAVED_BUILD_TESTING}) + set(CMAKE_CXX_INCLUDE_WHAT_YOU_USE + ${SAVED_CMAKE_CXX_INCLUDE_WHAT_YOU_USE}) + else() + message( + FATAL_ERROR + "\nopentracing-cpp package was not found. Please either provide it manually or clone with submodules. " + "To initialize, fetch and checkout any nested submodules, you can use the following command:\n" + "git submodule update --init --recursive") + endif() + else() + message(STATUS "Using external opentracing-cpp") + endif() + add_subdirectory(opentracing-shim) +endif() + +if(NOT WITH_API_ONLY) + set(BUILD_TESTING ${BUILD_TESTING}) + + add_subdirectory(sdk) + add_subdirectory(ext) + add_subdirectory(exporters) + + if(BUILD_TESTING) + add_subdirectory(test_common) + endif() + if(WITH_EXAMPLES) + add_subdirectory(examples) + endif() + if(WITH_FUNC_TESTS) + add_subdirectory(functional) + endif() +endif() + +include(cmake/opentelemetry-build-external-component.cmake) +include(cmake/patch-imported-config.cmake) + +if(OPENTELEMETRY_INSTALL) + # Install the cmake config and version files + otel_install_cmake_config() + + # Install the components and associated files + otel_install_components() + + # Install the thirdparty dependency definition file + otel_install_thirdparty_definitions() + + if(BUILD_PACKAGE) + include(cmake/package.cmake) + include(CPack) + endif() +endif() diff --git a/ext/opentelemetry-cpp-1.21.0/CMakeSettings.json b/ext/opentelemetry-cpp-1.21.0/CMakeSettings.json new file mode 100644 index 000000000..02d068a3a --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/CMakeSettings.json @@ -0,0 +1,126 @@ +{ + "configurations": [ + { + "name": "nostd-x64-Debug", + "generator": "Ninja", + "configurationType": "Debug", + "inheritEnvironments": [ + "msvc_x64_x64" + ], + "buildRoot": "${projectDir}\\out\\vs2019\\${name}", + "installRoot": "${projectDir}\\out\\vs2019\\${name}\\install", + "cmakeCommandArgs": "", + "buildCommandArgs": "", + "ctestCommandArgs": "", + "variables": [ + { + "name": "WITH_OTLP_GRPC", + "value": "True", + "type": "BOOL" + }, + { + "name": "WITH_EXAMPLES", + "value": "true", + "type": "BOOL" + } + ] + }, + { + "name": "nostd-x64-Release", + "generator": "Ninja", + "configurationType": "Release", + "inheritEnvironments": [ + "msvc_x64_x64" + ], + "buildRoot": "${projectDir}\\out\\vs2019\\${name}", + "installRoot": "${projectDir}\\out\\vs2019\\${name}\\install", + "cmakeCommandArgs": "", + "buildCommandArgs": "", + "ctestCommandArgs": "", + "cmakeToolchain": "", + "variables": [ + { + "name": "WITH_OTLP_GRPC", + "value": "True", + "type": "BOOL" + }, + { + "name": "WITH_EXAMPLES", + "value": "true", + "type": "BOOL" + } + ] + }, + { + "name": "stdlib-x64-Debug", + "generator": "Ninja", + "configurationType": "Debug", + "inheritEnvironments": [ + "msvc_x64_x64" + ], + "buildRoot": "${projectDir}\\out\\vs2019\\${name}", + "installRoot": "${projectDir}\\out\\vs2019\\${name}\\install", + "cmakeCommandArgs": "", + "buildCommandArgs": "", + "ctestCommandArgs": "", + "variables": [ + { + "name": "WITH_STL", + "value": "True", + "type": "BOOL" + }, + { + "name": "WITH_OTLP_GRPC", + "value": "True", + "type": "BOOL" + }, + { + "name": "WITH_EXAMPLES", + "value": "true", + "type": "BOOL" + }, + { + "name": "WITH_PROMETHEUS", + "value": "True", + "type": "BOOL" + } + ] + }, + { + "name": "stdlib-x64-Release", + "generator": "Ninja", + "configurationType": "Release", + "inheritEnvironments": [ + "msvc_x64_x64" + ], + "buildRoot": "${projectDir}\\out\\vs2019\\${name}", + "installRoot": "${projectDir}\\out\\vs2019\\${name}\\install", + "cmakeCommandArgs": "", + "buildCommandArgs": "", + "ctestCommandArgs": "", + "cmakeToolchain": "", + "variables": [ + { + "name": "WITH_STL", + "value": "True", + "type": "BOOL" + }, + { + "name": "WITH_OTLP_GRPC", + "value": "True", + "type": "BOOL" + }, + { + "name": "WITH_EXAMPLES", + "value": "true", + "type": "BOOL" + }, + { + "name": "WITH_PROMETHEUS", + "value": "True", + "type": "BOOL" + } + ] + } + ] +} diff --git a/ext/opentelemetry-cpp-1.21.0/CODE_OF_CONDUCT.md b/ext/opentelemetry-cpp-1.21.0/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..fee94ae58 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/CODE_OF_CONDUCT.md @@ -0,0 +1,3 @@ +# OpenTelemetry Community Code of Conduct + +OpenTelemetry follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/ext/opentelemetry-cpp-1.21.0/CONTRIBUTING.md b/ext/opentelemetry-cpp-1.21.0/CONTRIBUTING.md new file mode 100644 index 000000000..fda99e097 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/CONTRIBUTING.md @@ -0,0 +1,322 @@ +# Contributing to opentelemetry-cpp + +The OpenTelemetry C/C++ special interest group (SIG) meets regularly. See the +OpenTelemetry [community](https://github.com/open-telemetry/community#cc-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1i1E4-_y4uJ083lCutKGDhkpi3n4_e774SBLi9hPLocw/edit) +for a summary description of past meetings. To request edit access, join the +meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01N3AT62SJ). + +See the [community membership +document](https://github.com/open-telemetry/community/blob/main/community-membership.md) +on how to become a +[**Member**](https://github.com/open-telemetry/community/blob/main/community-membership.md#member), +[**Approver**](https://github.com/open-telemetry/community/blob/main/community-membership.md#approver) +and +[**Maintainer**](https://github.com/open-telemetry/community/blob/main/community-membership.md#maintainer). + +## Development + +OpenTelemetry C++ uses the [Google naming +convention](https://google.github.io/styleguide/cppguide.html#Naming). + +Code is formatted automatically and enforced by CI. + +### Build and Run Code Examples + +Note: these instructions apply to examples configured with Bazel, see +example-specific documentation for other build automation tools. + +Install the latest bazel version by following the steps listed +[here](https://docs.bazel.build/versions/master/install.html). + +Select an example of interest from the [examples +folder](https://github.com/open-telemetry/opentelemetry-cpp/tree/main/examples). +Inside each example directory is a `BUILD` file containing instructions for +Bazel. Find the binary name of your example by inspecting the contents of this +`BUILD` file. + +Build the example from the root of the opentelemetry-cpp directory using Bazel. +Replace `` with the identifier found in the previous step: + +```sh +bazel build //examples/: +``` + +Run the resulting executable to see telemetry from the application as it calls +the instrumented library: + +```sh +bazel-bin/examples// +``` + +For instance, building and running the `simple` example can be done as follows: + +```sh +bazel build //examples/simple:example_simple +bazel-bin/examples/simple/example_simple +``` + +### DevContainer Setup for Project + +This guide provides instructions on how to set up and use the development +container (`devcontainer`) environment to streamline testing and development +for this project. With the DevContainer, you can work in a consistent environment +configured with all the necessary dependencies and tools. + +#### Prerequisites + +Before getting started, ensure you have the following installed: + +* **Docker**: DevContainers require Docker for containerization. +* **Visual Studio Code (VSCode)** with the **Remote - Containers** extension. + +#### Getting Started + +* **Open the Project in DevContainer**: + + Open the project in VSCode. When prompted to "Reopen in Container," select + this option. If you’re not prompted, you can manually open the container by + selecting **Remote-Containers: Reopen in Container** from the command palette + (`F1` or `Ctrl+Shift+P`). + +* **Container Setup**: + + The DevContainer environment will automatically build based on the configuration + files provided (e.g., `.devcontainer/devcontainer.json`). This setup will install + required dependencies, tools, and environment variables needed for the project. + +* **Container Customization**: + See `.devcontainer/README.md` for devcontainer configuration options. + +#### Available Commands + +Once inside the DevContainer, you can use the following commands to run tests +and CI workflows. + +##### 1. Run Tests with Bazelisk + +To run tests with Bazelisk using specific compilation options, use: + +```bash +bazelisk-linux-amd64 test --copt=-DENABLE_LOGS_PREVIEW +--test_output=errors --cache_test_results=no --copt=-DENABLE_TEST //exporters/otlp/... +``` + +###### Command Breakdown + +* `--copt=-DENABLE_LOGS_PREVIEW`: Enables preview logs. +* `--test_output=errors`: Shows only the errors in the test output. +* `--cache_test_results=no`: Forces Bazel to re-run tests without caching. +* `--copt=-DENABLE_TEST`: Enables testing capabilities for the target code. +* `//exporters/otlp/...`: Specifies the test target path. + +##### 2. Run CI Script + +You can also run the CI script provided to perform testing with the +following command as an +example: + +```bash +bash ci/do_ci.sh cmake.exporter.otprotocol.test +``` + +This command initiates the CI pipeline, executing tests specifically for the +**cmake.exporter.otprotocol** module. + +#### Troubleshooting + +If you encounter issues: + +* **Rebuild the DevContainer**: From the command palette, run + **Remote-Containers: Rebuild Container** to reinitialize the environment. +* **Check Bazelisk and CI Script Logs**: Inspect logs for any configuration or + dependency issues. + +#### Additional Notes + +* You can adjust compiler options (`--copt`) as needed to test additional flags + or enable/disable specific features. +* The test results will be displayed in the terminal within the DevContainer for + easy debugging. + +#### Resources + +* **Bazelisk Documentation**: [https://github.com/bazelbuild/bazelisk](https://github.com/bazelbuild/bazelisk) +* **VSCode DevContainer Documentation**: [https://code.visualstudio.com/docs/remote/containers](https://code.visualstudio.com/docs/remote/containers) + +### Docker Development Image + +The `.devcontainer/Dockerfile.dev` +dockerfile can be built directly with the following command. + +```sh + docker build -t opentelemetry-cpp-dev -f ./.devcontainer/Dockerfile.dev . +``` + +You can customize the image using build arguments + to match permissions with the host user. + +```sh + docker build -t opentelemetry-cpp-dev \ + --build-arg USER_UID="$(id -u)" \ + --build-arg USER_GID="$(id -g)" \ + -f ./.devcontainer/Dockerfile.dev . + +``` + +Run an interactive bash session binding your host + opentelemetry-cpp directory to the container's workspace: + +```sh +docker run -it -v "$PWD:/workspaces/opentelemetry-cpp" opentelemetry-cpp-dev bash +``` + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-cpp` via GitHub pull +requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream repo: + +```sh +git clone --recursive https://github.com/open-telemetry/opentelemetry-cpp.git +``` + +Add your fork as a remote: + +```sh +git remote add fork https://github.com/YOUR_GITHUB_USERNAME/opentelemetry-cpp.git +``` + +If you haven't, make sure you are loading the submodules required to build +OpenTelemetry + +```sh +git submodule init +git submodule update +``` + +The source code is automatically formatted using clang-format. + +The output can vary between versions, so make sure to install `clang-format` +version `10.0`, and have `clang-format-10` in your execution path, +so that the helper script `tools/format.sh` can find it. + +Check out a new branch, make modifications and push the branch to your fork: + +```sh +git checkout -b feature +# edit files +tools/format.sh +git commit +git push fork feature +``` + +If you made changes to the Markdown documents (`*.md` files), install the latest +[`markdownlint-cli`](https://github.com/igorshubovych/markdownlint-cli) and run: + +```sh +markdownlint . +``` + +If you modified shell scripts (`*.sh` files), install `shellcheck` and run: + +```sh +shellcheck --severity=error .sh +``` + +Open a pull request against the main `opentelemetry-cpp` repo. + +To run tests locally, please read the [CI instructions](ci/README.md). + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, tag it as + `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure [CLA](https://identity.linuxfoundation.org/projects/cncf) is signed + and CI is clear. +* For non-trivial changes, please update the [CHANGELOG](./CHANGELOG.md). + +### How to Get PRs Merged + +A PR is considered to be **ready to merge** when: + +* It has received two approvals with at least one approval from + [Approver](https://github.com/open-telemetry/community/blob/main/community-membership.md#approver) + / + [Maintainer](https://github.com/open-telemetry/community/blob/main/community-membership.md#maintainer) + (at different company). +* A pull request opened by an Approver / Maintainer can be merged with only one + approval from Approver / Maintainer (at different company). +* Major feedback items/points are resolved. +* It has been open for review for at least one working day. This gives people + reasonable time to review. +* Trivial changes (typo, cosmetic, doc, etc.) don't have to wait for one day. +* Urgent fixes can take exceptions as long as it has been actively communicated. + +Any Maintainer can merge the PR once it is **ready to merge**. Maintainer can +make conscious judgement to merge pull requests which have not strictly met +above mentioned requirements. + +If a PR has been stuck (e.g. there are lots of debates and people couldn't agree +on each other), the owner should try to get people aligned by: + +* Consolidating the perspectives and putting a summary in the PR. It is + recommended to add a link into the PR description, which points to a comment + with a summary in the PR conversation +* Stepping back to see if it makes sense to narrow down the scope of the PR or + split it up. + +If none of the above worked and the PR has been stuck for more than 2 weeks, the +owner should bring it to the OpenTelemetry C++ SIG meeting. See +[README.md](README.md#contributing) for the meeting link. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-cpp follows the +[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification). + +It's especially valuable to read through the [library +guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md). + +## Useful Resources + +Hi! If you’re looking at this document, these resources will provide you the +knowledge to get started as a newcomer to the OpenTelemetry project. They will +help you understand the OpenTelemetry Project, its components, and specifically +the C++ repository. + +### Reading Resources + +* Medium + [article](https://medium.com/opentelemetry/how-to-start-contributing-to-opentelemetry-b23991ad91f4) + (October 2019) on how to start contributing to the OpenTelemetry project. +* Medium + [article](https://medium.com/opentelemetry/opentelemetry-beyond-getting-started-5ac43cd0fe26) + (January 2020) describing the overarching goals and use cases for + OpenTelemetry. + +### Relevant Documentation + +* [OpenTelemetry + Specification](https://github.com/open-telemetry/opentelemetry-specification) +* The OpenTelemetry Specification describes the requirements and expectations + of for all OpenTelemetry implementations. + +* Read through the OpenTelemetry C++ documentation +* The + [API](https://opentelemetry-cpp.readthedocs.io/en/latest/api/api.html) + and + [SDK](https://opentelemetry-cpp.readthedocs.io/en/latest/sdk/sdk.html) + documentation provides a lot of information on what the classes and their + functions are used for. + +Please contribute! You’re welcome to add more information if you come across any +helpful resources. diff --git a/ext/opentelemetry-cpp-1.21.0/DEPRECATED.md b/ext/opentelemetry-cpp-1.21.0/DEPRECATED.md new file mode 100644 index 000000000..ec97f420f --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/DEPRECATED.md @@ -0,0 +1,139 @@ +# Deprecated + +This document lists all the items currently deprecated in opentelemetry-cpp. + +Deprecated items will be removed in the future. + +## Guidelines + +### Maintainer guidelines + +See the [deprecation-process](./docs/deprecation-process.md) + +## [TEMPLATE] + +### New Deprecation Title (Template) + +#### Announcement (Template) + +#### Motivation (Template) + +#### Scope (Template) + +#### Mitigation (Template) + +#### Planned removal (Template) + +## [Platforms] + +N/A + +## [Compilers] + +N/A + +## [Third party dependencies] + +N/A + +## [Build tools] + +N/A + +## [Build scripts] + +N/A + +## [opentelemetry-cpp API] + +N/A + +## [opentelemetry-cpp SDK] + +N/A + +## [opentelemetry-cpp Exporter] + +N/A + +## [Documentation] + +N/A + +## Semantic conventions + +### Header files "semantic_conventions.h" + +#### Announcement (semantic_conventions.h) + +Deprecation is announced as part of the migration to weaver: + +* `Version:` release following opentelemetry-cpp 1.17.0 +* `Date:` Nov 9, 2024 +* `PR:` [PR 3105](https://github.com/open-telemetry/opentelemetry-cpp/pull/3105) + +#### Motivation (semantic_conventions.h) + +The header files for semantic conventions are generated automatically. +The tooling to generate these files is changing: + +* before, the build-tool repository was used +* now, the weaver repository is used + +Changes in tooling allows to generate code that is better organized, +with dedicated header files per group of semantic conventions, +instead of a single header file for everything. + +#### Scope (semantic_conventions.h) + +The following files: + +* `api/include/opentelemetry/trace/semantic_conventions.h` +* `sdk/include/opentelemetry/sdk/resource/semantic_conventions.h` + +are now deprecated. + +They correspond to semantic conventions v1.27.0, +and will no longer be maintained up to date. + +These files will be removed in the future. + +#### Mitigation (semantic_conventions.h) + +Two things have changed: + +* the header file to use +* the symbol name to use. + +Before, the semantic convention for `url.full` was: + +* declared in file `semantic_conventions.h` +* declared as symbol `SemanticConventions::kUrlFull` + +Now, the `url.full` convention, which is part or the `url` group, is: + +* declared in file `semconv/url_attributes.h` +* declared as symbol `semconv::url::kUrlFull` + +Application code that uses semantic conventions must be adjusted +accordingly. + +In addition, semantic conventions that are not marked as stable +are generated in a different header file, placed under directory +`incubating`, to better separate stable and non stable code. + +For example, file `semconv/incubating/url_attributes.h` +defines `semconv::url::kUrlDomain`, +which is not marked as stable in semconv v1.27.0 + +#### Planned removal (semantic_conventions.h) + +The following files: + +* `api/include/opentelemetry/trace/semantic_conventions.h` +* `sdk/include/opentelemetry/sdk/resource/semantic_conventions.h` + +will be removed. + +The removal date is planned for July 1, 2025. +This allows more than six months for applications to adjust. diff --git a/ext/opentelemetry-cpp-1.21.0/INSTALL.md b/ext/opentelemetry-cpp-1.21.0/INSTALL.md new file mode 100644 index 000000000..296a61c44 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/INSTALL.md @@ -0,0 +1,452 @@ +# Building opentelemetry-cpp + +[CMake](https://cmake.org/) and [Bazel](https://bazel.build) are the official +build systems for opentelemetry-cpp. + +## Dependencies + +You can link OpenTelemetry C++ SDK with libraries provided in +[dependencies.md](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/docs/dependencies.md) +(complete list of libraries with versions used in our CI can be found +[here](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/third_party_release)). + +## Build instructions using CMake + +### Prerequisites for CMake build + +- A supported platform (e.g. Windows, macOS or Linux). Refer to [Platforms + Supported](./README.md#supported-development-platforms) for more information. +- A compatible C++ compiler supporting at least C++14. Major compilers are + supported. Refer to [Supported Compilers](./README.md#supported-c-versions) + for more information. +- [Git](https://git-scm.com/) for fetching opentelemetry-cpp source code from + repository. To install Git, consult the [Set up + Git](https://help.github.com/articles/set-up-git/) guide on GitHub. +- [CMake](https://cmake.org/) for building opentelemetry-cpp API, SDK with their + unittests. The minimum CMake version is 3.14. + CMake 3.15+ is recommended on Windows due to known CI test failures with 3.14. + To install CMake, + consult the [Installing CMake](https://cmake.org/install/) guide. +- [GoogleTest](https://github.com/google/googletest) framework to build and run + the unittests. Refer to + [third_party_release](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/third_party_release#L5) + for version of GoogleTest used in CI. To install GoogleTest, consult the + [GoogleTest Build + Instructions](https://github.com/google/googletest/blob/master/googletest/README.md#generic-build-instructions). +- [Google Benchmark](https://github.com/google/benchmark) framework to build and + run benchmark tests. Refer to + [third_party_release](https://github.com/open-telemetry/opentelemetry-cpp/blob/main/third_party_release#L4) + for version of Benchmark used in CI. To install Benchmark, consult the + [GoogleBenchmark Build + Instructions](https://github.com/google/benchmark#installation). +- Apart from above core requirements, the Exporters and Propagators have their + build dependencies. + +### Building dependencies for the OTLP exporters + +The opentelemetry-cpp OTLP exporters depend on Protobuf and gRPC + (in the case of the otlp grpc exporters). +Protobuf (since version 3.22.0) and gRPC depend on Abseil. +For cmake builds, it is best practice to build and install Abseil +, Protobuf, and gPRC as independent packages - +configuring cmake for Protobuf and gRPC to build against + the installed packages instead of using their submodule option. + +If building and installing Protobuf and gRPC manually with cmake the + recommended approach is: + +1. Choose the desired tag version of grpc. Find the compatible versions of abseil + and protobuf by inspecting the submodules of grpc at that tag. +2. Build and install the required version of abseil +3. Build and install the required version of protobuf + - Set the cmake option of Protobuf to build against the installed + package of Abseil (`protobuf_ABSL_PROVIDER=package`) +4. Build and install the required version of grpc + - Set the cmake option of grpc to build against the installed packages + of Abseil and Protobuf (cmake options - `gRPC_ABSL_PROVIDER=package` and `gRPC_PROTOBUF_PROVIDER=package`) + +### Building as standalone CMake Project + +1. Getting the opentelemetry-cpp source with its submodules: + + ```console + # Change to the directory where you want to create the code repository + $ cd ~ + $ mkdir source && cd source && git clone --recurse-submodules https://github.com/open-telemetry/opentelemetry-cpp + Cloning into 'opentelemetry-cpp'... + ... + Resolving deltas: 100% (3225/3225), done. + $ + ``` + +2. Navigate to the repository cloned above, and create the `CMake` build + configuration. + + ```console + $ cd opentelemetry-cpp + $ mkdir build && cd build && cmake .. + -- The C compiler identification is GNU 9.3.0 + -- The CXX compiler identification is GNU 9.3.0 + ... + -- Configuring done + -- Generating done + -- Build files have been written to: /home//source/opentelemetry-cpp/build + $ + ``` + + Some of the available cmake build variables we can use during cmake + configuration: + + - `-DCMAKE_POSITION_INDEPENDENT_CODE=ON` : Please note that with default + configuration, the code is compiled without `-fpic` option, so it is not + suitable for inclusion in shared libraries. To enable the code for + inclusion in shared libraries, this variable is used. + - `-DBUILD_SHARED_LIBS=ON` : To build shared libraries for the targets. + Please refer to note [below](#building-shared-libs-for-windows) for + Windows DLL support. + - `-DWITH_OTLP_GRPC=ON` : To enable building OTLP GRPC exporter. + - `-DWITH_OTLP_HTTP=ON` : To enable building OTLP HTTP exporter. + - `-DWITH_PROMETHEUS=ON` : To enable building prometheus exporter. + - `-DOPENTELEMETRY_INSTALL=ON`: To install `otel-cpp` library needed + for external code linking. + +3. Once the build configuration is created, build the CMake targets - this + includes building SDKs and unittests for API and SDK. Note that since API is + header only library, no separate build is triggered for it. + + ```console + $ cmake --build . --target all + Scanning dependencies of target timestamp_test + [ 0%] Building CXX object api/test/core/CMakeFiles/timestamp_test.dir/timestamp_test.cc.o + [ 1%] Linking CXX executable timestamp_test + ... + Scanning dependencies of target w3c_tracecontext_test + [ 99%] Building CXX object ext/test/w3c_tracecontext_test/CMakeFiles/w3c_tracecontext_test.dir/main.cc.o + [100%] Linking CXX executable w3c_tracecontext_test + [100%] Built target w3c_tracecontext_test + $ + ``` + +4. Once CMake tests are built, run them with `ctest` command + + ```console + $ ctest + Test project /tmp/opentelemetry-cpp/build + Start 1: trace.SystemTimestampTest.Construction + ... + Start 380: ext.http.urlparser.UrlParserTests.BasicTests + ... + 100% tests passed, 0 tests failed out of 380 + $ + ``` + +5. Optionally install the header files for API, and generated targets and header + files for SDK at custom/default install location. + + ```console + $ cmake --install . --prefix // + -- Installing: //lib/cmake/opentelemetry-cpp/opentelemetry-cpp-config.cmake + -- Installing: //lib/cmake/opentelemetry-cpp/opentelemetry-cpp-config-version.cmake + ... + $ + ``` + +### Incorporating into an external CMake Project + +There are two approaches to incoporate `opentelemetry-cpp` into + an external CMake project: + +1. Build and install `opentelemetry-cpp` then use `find_package` + to import its targets + + ```cmake + # Find all installed components and link all imported targets + find_package(opentelemetry-cpp CONFIG REQUIRED) + ... + target_include_directories(foo PRIVATE ${OPENTELEMETRY_CPP_INCLUDE_DIRS}) + target_link_libraries(foo PRIVATE ${OPENTELEMETRY_CPP_LIBRARIES}) + ``` + + ```cmake + # Find a specific component and link its imported target(s) + find_package(opentelemetry-cpp CONFIG REQUIRED COMPONENTS api) + ... + target_link_libraries(foo PRIVATE opentelemetry-cpp::api) + ``` + +2. Use CMake's [FetchContent](https://cmake.org/cmake/help/latest/module/FetchContent.html) + module to fetch and build `opentelemetry-cpp` then make its targets available + + ```cmake + # Fetch from an existing clone and build + include(FetchContent) + FetchContent_Declare(opentelemetry-cpp SOURCE_DIR "") + FetchContent_MakeAvailable(opentelemetry-cpp) + ... + target_link_libraries(foo PRIVATE opentelemetry-cpp::api) + ``` + + ```cmake + # Clone and build opentelemetry-cpp from a git tag + include(FetchContent) + FetchContent_Declare( + opentelemetry-cpp + GIT_REPOSITORY https://github.com/open-telemetry/opentelemetry-cpp.git + GIT_TAG v1.20.0) + FetchContent_MakeAvailable(opentelemetry-cpp) + ... + target_link_libraries(foo PRIVATE opentelemetry-cpp::api) + ``` + +In both cases the project's built or imported CMake targets will be + available in the `opentelemetry-cpp` namespace (ie: `opentelemetry-cpp::api`) + +#### Using opentelemetry-cpp package components + +> **Note:** `opentelemetry-cpp` CMake package components were introduced in `v1.21.0` + +The `opentelemetry-cpp` package supports using the `COMPONENTS` argument to +`find_package`. The following example illustrates using this feature to include +and link the `api` header only target to an instrumented `foo_lib` while only including +and linking the `sdk` and `otlp_grpc_exporter` targets to the `foo_app`. + +```cmake +# foo_lib/CMakeLists.txt +find_package(opentelemetry-cpp CONFIG REQUIRED COMPONENTS api) +add_library(foo_lib foo.cpp) +target_link_libraries(foo_lib PRIVATE opentelemetry-cpp::api) +``` + +```cmake +# foo_app/CMakeLists.txt +find_package(opentelemetry-cpp CONFIG REQUIRED COMPONENTS api sdk exporters_otlp_grpc) +add_executable(foo_app main.cpp) +target_link_libraries(foo_app PRIVATE foo_lib opentelemetry-cpp::api opentelemetry-cpp::trace opentelemetry-cpp::otlp_grpc_exporter ) +``` + +The following table provides the mapping between components and targets. Components +and targets available in the installation depends on the opentelemetry-cpp package +build configuration. + +> **Note:** components `exporters_elasticsearch` and `exporters_etw` + may be moved out of the core package and to `opentelemetry-cpp-contrib` + in a future release + +| Component | Targets | +|----------------------------|--------------------------------------------------------------------------------------------------| +| **api** | opentelemetry-cpp::api | +| **sdk** | opentelemetry-cpp::sdk | +| | opentelemetry-cpp::version | +| | opentelemetry-cpp::common | +| | opentelemetry-cpp::resources | +| | opentelemetry-cpp::trace | +| | opentelemetry-cpp::metrics | +| | opentelemetry-cpp::logs | +| **ext_common** | opentelemetry-cpp::ext | +| **ext_http_curl** | opentelemetry-cpp::http_client_curl | +| **ext_dll** | opentelemetry-cpp::opentelemetry_cpp | +| **exporters_in_memory** | opentelemetry-cpp::in_memory_span_exporter | +| | opentelemetry-cpp::in_memory_metric_exporter | +| **exporters_ostream** | opentelemetry-cpp::ostream_log_record_exporter | +| | opentelemetry-cpp::ostream_metrics_exporter | +| | opentelemetry-cpp::ostream_span_exporter | +| **exporters_otlp_common** | opentelemetry-cpp::proto | +| | opentelemetry-cpp::otlp_recordable | +| **exporters_otlp_file** | opentelemetry-cpp::otlp_file_client | +| | opentelemetry-cpp::otlp_file_exporter | +| | opentelemetry-cpp::otlp_file_log_record_exporter | +| | opentelemetry-cpp::otlp_file_metric_exporter | +| **exporters_otlp_grpc** | opentelemetry-cpp::proto_grpc | +| | opentelemetry-cpp::otlp_grpc_client | +| | opentelemetry-cpp::otlp_grpc_exporter | +| | opentelemetry-cpp::otlp_grpc_log_record_exporter | +| | opentelemetry-cpp::otlp_grpc_metrics_exporter | +| **exporters_otlp_http** | opentelemetry-cpp::otlp_http_client | +| | opentelemetry-cpp::otlp_http_exporter | +| | opentelemetry-cpp::otlp_http_log_record_exporter | +| | opentelemetry-cpp::otlp_http_metric_exporter | +| **exporters_prometheus** | opentelemetry-cpp::prometheus_exporter | +| **exporters_elasticsearch**| opentelemetry-cpp::elasticsearch_log_record_exporter | +| **exporters_etw** | opentelemetry-cpp::etw_exporter | +| **exporters_zipkin** | opentelemetry-cpp::zipkin_trace_exporter | +| **shims_opentracing** | opentelemetry-cpp::opentracing_shim | + +## Build instructions using Bazel + +NOTE: Experimental, and not supported for all the components. Make sure the +[GoogleTest](https://github.com/google/googletest) installation may fail if +there is a different version of googletest already installed in system-defined +path. + +### Prerequisites for Bazel build + +- A supported platform (e.g. Windows, macOS or Linux). Refer to [Platforms +Supported](./README.md#supported-development-platforms) for more information. +- A compatible C++ compiler supporting at least C++14. Major compilers are +supported. Refer to [Supported Compilers](./README.md#supported-c-versions) for +more information. +- [Git](https://git-scm.com/) for fetching opentelemetry-cpp source code from +repository. To install Git, consult the [Set up +Git](https://help.github.com/articles/set-up-git/) guide on GitHub. +- [Bazel](https://www.bazel.build/) for building opentelemetry-cpp API, SDK with +their unittests. We use 3.7.2 in our build system. + +To install Bazel, consult the [Installing +Bazel](https://docs.bazel.build/versions/3.7.0/install.html) guide. + +### Building as standalone Bazel Project + +1. Getting the opentelemetry-cpp source: + + ```console + # Change to the directory where you want to create the code repository + $ cd ~ + $ mkdir source && cd source + $ git clone https://github.com/open-telemetry/opentelemetry-cpp + Cloning into 'opentelemetry-cpp'... + ... + Resolving deltas: 100% (3225/3225), done. + $ + ``` + +2. Navigate to the repository cloned above, download the dependencies and build + the source code: + + ```console + $ cd opentelemetry-cpp + $ bazel build //... + bazel build -- //... -//exporters/otlp/... -//exporters/prometheus/... + Extracting Bazel installation... + Starting local Bazel server and connecting to it... + INFO: Analyzed 121 targets (98 packages loaded, 3815 targets configured). + INFO: Found 121 targets... + INFO: From Compiling sdk/src/trace/tracer_context.cc: + ... + + ``` + +3. Once Bazel tests are built, run them with `bazel test //...` command + + ```console + $ bazel test //... + .. + $ + ``` + +4. The build artifacts will be located under `bazel-bin` + +### Incorporating into an existing Bazel Project + +- WORKSPACE file: + +```console +http_archive( + name = "io_opentelemetry_cpp", + sha256 = "", + strip_prefix = "opentelemetry-cpp-1.0.1", + urls = [ + "https://github.com/open-telemetry/opentelemetry-cpp/archive/refs/tags/v1.0.1.tar.gz" + ], +) + +# Load OpenTelemetry dependencies after load. +load("@io_opentelemetry_cpp//bazel:repository.bzl", "opentelemetry_cpp_deps") + +opentelemetry_cpp_deps() + +# (required after v1.8.0) Load extra dependencies required for OpenTelemetry +load("@io_opentelemetry_cpp//bazel:extra_deps.bzl", "opentelemetry_extra_deps") + +opentelemetry_extra_deps() + +# Load gRPC dependencies after load. +load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps") + +grpc_deps() + +# Load extra gRPC dependencies due to https://github.com/grpc/grpc/issues/20511 +load("@com_github_grpc_grpc//bazel:grpc_extra_deps.bzl", "grpc_extra_deps") + +grpc_extra_deps() + +``` + +- Component level BUILD file: + +```console +cc_library( + name = "" + ... + deps = [ + "@io_opentelemetry_cpp//api", + "@io_opentelemetry_cpp//exporters/otlp:otlp_exporter", + "@io_opentelemetry_cpp//sdk/src/trace", + ... + ], + ... +) +``` + +## Building shared libs for Windows + +Windows DLL build is supported under **preview**. Please check the +[doc](./docs/build-as-dll.md) for more details. + +## Generating binary packages + +OpenTelemetry C++ supports generating platform specific binary packages from CMake +configuration. The packages generated through this mayn't be production ready, +and user may have to customize it further before using it as distribution. + +- Linux : deb, rpm, tgz +- MacOS : tgz +- Windows : NuGet, zip + +This requires platform specific package generators already installed. The package +generation can subsequently be enabled by using BUILD_PACKAGE option during cmake +configuration + + ```console + $ cd opentelemetry-cpp + $ mkdir build && cd build && cmake -DBUILD_PACKAGE=ON .. + + -- Package name: opentelemetry-cpp-1.8.1-ubuntu-20.04-x86_64.deb + -- Configuring done + -- Generating done + ... + $ + ``` + +Once build is complete as specified in [standalone build section](#building-as-standalone-cmake-project), +the package can be generated as below. + + ```console + $ cpack -C debug + CPack: Create package using DEB + ... + CPack: - package: /home//opentelemetry-cpp/build/opentelemetry-cpp-1.8.1-ubuntu-20.04-x86_64.deb generated. + $ + ``` + +## Using Package Managers + +If you are using [Conan](https://www.conan.io/) to manage your dependencies, add +[`opentelemetry-cpp/x.y.z`](https://conan.io/center/opentelemetry-cpp) to your +`conanfile`'s requires, where `x.y.z` is the release version you want to use. +Please file issues [here](https://github.com/conan-io/conan-center-index/issues) +if you experience problems with the packages. + +If you are using [vcpkg](https://github.com/Microsoft/vcpkg/) on your project +for external dependencies, then you can install the [opentelemetry-cpp +package](https://github.com/microsoft/vcpkg/tree/master/ports/opentelemetry-cpp) +with `vcpkg install opentelemetry-cpp` and follow the then displayed +descriptions. Please see the vcpkg project for any issues regarding the +packaging. + +If you are using [alpine linux](https://www.alpinelinux.org/) you can install +the [opentelemetry-cpp packages](https://pkgs.alpinelinux.org/packages?name=opentelemetry-cpp-*) +with `apk add -X http://dl-cdn.alpinelinux.org/alpine/edge/testing opentelemetry-cpp-dev`. + +Please note, these packages are not officially provided and maintained by +OpenTelemetry C++ project, and are just listed here to consolidate all such +efforts for ease of developers. diff --git a/ext/opentelemetry-cpp-1.21.0/LICENSE b/ext/opentelemetry-cpp-1.21.0/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ext/opentelemetry-cpp-1.21.0/MODULE.bazel b/ext/opentelemetry-cpp-1.21.0/MODULE.bazel new file mode 100644 index 000000000..59d844090 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/MODULE.bazel @@ -0,0 +1,25 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +module( + name = "opentelemetry-cpp", + version = "1.21.0", + compatibility_level = 0, + repo_name = "io_opentelemetry_cpp", +) + +bazel_dep(name = "abseil-cpp", version = "20240116.1", repo_name = "com_google_absl") +bazel_dep(name = "bazel_skylib", version = "1.5.0") +bazel_dep(name = "curl", version = "8.8.0") +bazel_dep(name = "grpc", version = "1.63.1.bcr.1", repo_name = "com_github_grpc_grpc") +bazel_dep(name = "nlohmann_json", version = "3.12.0", repo_name = "github_nlohmann_json") +bazel_dep(name = "opentelemetry-proto", version = "1.6.0", repo_name = "com_github_opentelemetry_proto") +bazel_dep(name = "opentracing-cpp", version = "1.6.0", repo_name = "com_github_opentracing") +bazel_dep(name = "platforms", version = "0.0.8") +bazel_dep(name = "prometheus-cpp", version = "1.3.0", repo_name = "com_github_jupp0r_prometheus_cpp") +bazel_dep(name = "protobuf", version = "26.0", repo_name = "com_google_protobuf") +bazel_dep(name = "rules_proto", version = "5.3.0-21.7") +bazel_dep(name = "zlib", version = "1.3.1.bcr.1") + +bazel_dep(name = "google_benchmark", version = "1.8.3", dev_dependency = True, repo_name = "com_github_google_benchmark") +bazel_dep(name = "googletest", version = "1.14.0.bcr.1", dev_dependency = True, repo_name = "com_google_googletest") diff --git a/ext/opentelemetry-cpp-1.21.0/README.md b/ext/opentelemetry-cpp-1.21.0/README.md new file mode 100644 index 000000000..01645aa8c --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/README.md @@ -0,0 +1,127 @@ +# OpenTelemetry C++ + +[![Slack](https://img.shields.io/badge/slack-@cncf/otel/cpp-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01N3AT62SJ) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-cpp/branch/main/graphs/badge.svg?)](https://codecov.io/gh/open-telemetry/opentelemetry-cpp/) +[![Build +Status](https://github.com/open-telemetry/opentelemetry-cpp/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-cpp/actions) +[![Release](https://img.shields.io/github/v/release/open-telemetry/opentelemetry-cpp?include_prereleases&style=)](https://github.com/open-telemetry/opentelemetry-cpp/releases/) +[![FOSSA License Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-cpp.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-cpp?ref=badge_shield&issueType=license) +[![FOSSA Security Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-cpp.svg?type=shield&issueType=security)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-cpp?ref=badge_shield&issueType=security) + +The C++ [OpenTelemetry](https://opentelemetry.io/) client. + +## Project Status + +**Stable** across all 3 signals i.e. `Logs`, `Metrics`, and `Traces`. + +See [Spec Compliance +Matrix](https://github.com/open-telemetry/opentelemetry-specification/blob/main/spec-compliance-matrix.md) +to understand which portions of the specification has been implemented in this +repo. + +## Supported C++ Versions + +Code shipped from this repository generally supports the following versions of +C++ standards: + +* ISO/IEC 14882:2014 (C++14) +* ISO/IEC 14882:2017 (C++17) +* ISO/IEC 14882:2020 (C++20) + +Any exceptions to this are noted in the individual `README.md` files. + +Please note that supporting the [C Programming +Language](https://en.wikipedia.org/wiki/C_(programming_language)) is not a goal +of the current project. + +## Supported Development Platforms + + Our CI pipeline builds and tests on following `x86-64` platforms: + +| Platform | Build type | +|---------------------------------------------------------------------|---------------| +| ubuntu-22.04 (GCC 10, GCC 12, Clang 14) | CMake, Bazel | +| ubuntu-20.04 (GCC 9.4.0 - default compiler) | CMake, Bazel | +| ubuntu-20.04 (GCC 9.4.0 with -std=c++14/17/20 flags) | CMake, Bazel | +| macOS 12.7 (Xcode 14.2) | Bazel | +| Windows Server 2019 (Visual Studio Enterprise 2019) | CMake, Bazel | +| Windows Server 2022 (Visual Studio Enterprise 2022) | CMake | + +In general, the code shipped from this repository should build on all platforms +having C++ compiler with [supported C++ standards](#supported-c-versions). + +## Dependencies + +Please refer to [Dependencies.md](docs/dependencies.md) for OSS Dependencies and +license requirements. + +## Installation + +Please refer to [INSTALL.md](./INSTALL.md). + +## Getting Started + +As an application owner or the library author, you can find the getting started +guide and reference documentation on +[opentelemetry-cpp.readthedocs.io](https://opentelemetry-cpp.readthedocs.io/en/latest/) + +The `examples/simple` directory contains a minimal program demonstrating how to +instrument a small library using a simple `processor` and console `exporter`, +along with build files for CMake and Bazel. + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) + +We meet weekly, and the time of the meeting alternates between Monday at 13:00 +PT and Wednesday at 9:00 PT. The meeting is subject to change depending on +contributors' availability. Check the [OpenTelemetry community +calendar](https://github.com/open-telemetry/community#calendar) +for specific dates and Zoom meeting links. + +Meeting notes are available as a public [Google +doc](https://docs.google.com/document/d/1i1E4-_y4uJ083lCutKGDhkpi3n4_e774SBLi9hPLocw/edit?usp=sharing). +For edit access, get in touch on +[Slack](https://cloud-native.slack.com/archives/C01N3AT62SJ). + +[Maintainers](https://github.com/open-telemetry/community/blob/main/community-membership.md#maintainer) +([@open-telemetry/cpp-maintainers](https://github.com/orgs/open-telemetry/teams/cpp-maintainers)): + +* [Ehsan Saei](https://github.com/esigo) +* [Lalit Kumar Bhasin](https://github.com/lalitb), Microsoft +* [Marc Alff](https://github.com/marcalff), Oracle +* [Tom Tan](https://github.com/ThomsonTan), Microsoft + +[Approvers](https://github.com/open-telemetry/community/blob/main/community-membership.md#approver) +([@open-telemetry/cpp-approvers](https://github.com/orgs/open-telemetry/teams/cpp-approvers)): + +* [Doug Barker](https://github.com/dbarker) +* [Josh Suereth](https://github.com/jsuereth), Google +* [Pranav Sharma](https://github.com/psx95), Google +* [WenTao Ou](https://github.com/owent), Tencent + +[Emeritus +Maintainer/Approver/Triager](https://github.com/open-telemetry/community/blob/main/community-membership.md#emeritus-maintainerapprovertriager): + +* [Alolita Sharma](https://github.com/alolita) +* [Emil Mikulic](https://github.com/g-easy) +* [Jodee Varney](https://github.com/jodeev) +* [Johannes Tax](https://github.com/pyohannes) +* [Max Golovanov](https://github.com/maxgolov) +* [Reiley Yang](https://github.com/reyang) +* [Ryan Burn](https://github.com/rnburn) + +### Thanks to all the people who have contributed + +[![contributors](https://contributors-img.web.app/image?repo=open-telemetry/opentelemetry-cpp)](https://github.com/open-telemetry/opentelemetry-cpp/graphs/contributors) + +## Release Schedule + +See the [release +notes](https://github.com/open-telemetry/opentelemetry-cpp/releases) for +existing releases. + +See the [project +milestones](https://github.com/open-telemetry/opentelemetry-cpp/milestones) for +details on upcoming releases. The dates and features described in issues and +milestones are estimates, and subject to change. diff --git a/ext/opentelemetry-cpp-1.21.0/RELEASING.md b/ext/opentelemetry-cpp-1.21.0/RELEASING.md new file mode 100644 index 000000000..852332e82 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/RELEASING.md @@ -0,0 +1,96 @@ +# Release Process + +## Pre Release + +1: Upgrade to latest [dependencies](docs/maintaining-dependencies.md) +if required. + +2: Make sure all relevant changes for this release are included under +`Unreleased` section in `CHANGELOG.md` and are in language that non-contributors +to the project can understand. + +3: Run the pre-release script. It creates a branch `pre_release_` and +updates `CHANGELOG.md` with the ``: + +```sh +./buildscripts/pre_release.sh -t +``` + +4: Verify that CHANGELOG.md is updated properly: + +```sh +git diff main +``` + +5: Push the changes to upstream and create a Pull Request on GitHub. Be sure to +include the curated changes from the [Changelog](./CHANGELOG.md) in the +description. + +## Tag + +Once the above Pull Request has been approved and merged it is time to tag the +merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the +Pre-Release step! Failure to do so will leave things in a broken state. + +1: Note down the commit hash of the master branch after above PR request is +merged: `` + +```sh +git show -s --format=%H +``` + +2: Create a github tag on this commit hash: + +```sh +git tag -a "" -s -m "Version " "" +``` + +3: Push tag to upstream remote + +```sh +git push upstream +``` + +## Versioning + +Once tag is created, it's time to use that tag for Runtime Versioning + +1: Create a new brach for updating version information in +`./sdk/src/version.cc`. + +```sh +git checkout -b update_version_${tag} master +``` + +2: Run the pre-commit script to update the version: + +```sh +./buildscripts/pre-commit +``` + +3: Check if any changes made since last release broke ABI compatibility. If yes, +update `OPENTELEMETRY_ABI_VERSION_NO` in +[version.h](api/include/opentelemetry/version.h). + +4: Push the changes to upstream and create a Pull Request on GitHub. + +5: Once changes are merged, move the tag created earlier to the new commit hash +from step 4. + +```sh +git tag -f +git push --tags --force +``` + +## Release + +Finally create a Release for the new `` on GitHub. The release body +should include all the release notes from the Changelog for this release. + +## Post Release + +Update the OpenTelemetry.io document +[here](https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/cpp) +by sending a Pull Request. diff --git a/ext/opentelemetry-cpp-1.21.0/Versioning.md b/ext/opentelemetry-cpp-1.21.0/Versioning.md new file mode 100644 index 000000000..b6cbe490e --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/Versioning.md @@ -0,0 +1,107 @@ +# Versioning + +This document describes the versioning policy for this repository. + +## Goals + +### API and SDK Compatibility + +Once the API for a given signal (spans, logs, metrics, baggage) has been +officially released, that API module will function with any SDK that has the +same MAJOR version and equal or greater MINOR or PATCH version. For example, +application compiled with API v1.1 is compatible with SDK v1.1.2, v1.2.0, etc. + +For example, libraries that are instrumented with `opentelemetry 1.0.1` will +function in applications using `opentelemetry 1.11.33` or `opentelemetry 1.3.4`, +buy may not work in applications using `opentelemetry 2.0.0`. + +### ABI Stability + +Refer to the [ABI Policy](./docs/abi-policy.md) for more details. To summarise: + +* The API is header only, and uses ABI compliant interfaces. However, ABI + stability is not guaranteed for SDK. +* In case of ABI breaking changes, a new `inline namespace` version will be + introduced, and the existing linked applications can continue using the older + version unless they relink with newer version. + +## Release Policy + +* Release versions will follow [SemVer 2.0](https://semver.org/). +* Only a single source package containing the API, SDK, and exporters which are + required by the specification would be released. All these components are + always versioned and released together. For example, any changes in one of the + exporter would result in version update of the entire source package even + though there is no changes in API, SDK and other exporters. +* Experimental releases: New (unstable) telemetry signals and features will be + introduced behind feature flag protected by a preprocessor macro. + + ```cpp + #ifdef FEATURE_FLAG + + #endif + ``` + + As we deliver the package in source form, and the user is responsible to build + it for their platform, the user must be aware of these feature flags + (documented in the [CHANGELOG.md](CHANGELOG.md) file). The user must enable + them explicitly through their build system (CMake, Bazel or others) to use any + preview features. + + The guidelines in creating feature flag would be: + + * Naming: + * `ENABLE__PREVIEW` : For experimental release of signal api/sdks + eg, `METRICS_PREVIEW`, `LOGS_PREVIEW`, + * `ENABLE___PREVIEW` : For experimental release for + any feature within stable signal. For example, `TRACING_JAEGER_PREVIEW` to + release the experimental Jaeger exporter for tracing. + * Cleanup: It is good practice to keep feature-flags as shortlived as + possible. And, also important to keep the number of them low. They should be + used such that it is easy to remove/cleanup them once the experimental + feature is stable. + +* New signals will be stabilized via a **minor version bump**, and are not + allowed to break existing stable interfaces. Feature flags will be removed + once we have a stable implementation for the signal. + +* As an exception, small experimental features in otherwise stable + signals/components mayn't necessarily be released under feature flag. These + would be flagged as experimental by adding a `NOTE` in it's header file - + either at the beginning of file, or as the comment for the experimental API + methods. Also, if the complete header is experimental, it would be prefixed as + `experimental_`. As an example, the semantic conventions for trace signal is + experimental at the time of the writing and is within + `experimental_semantic_conventions.h` + +* Code under the "*::detail" namespace implements internal details, and is NOT + part of public interface. Also, any API not documented in the [public + documentation](https://opentelemetry-cpp.readthedocs.io/en/latest/) is NOT + part of the public interface. + +* GitHub releases will be made for all released versions. + +## Example Versioning Lifecycle + +Purely for illustration purposes, not intended to represent actual releases: + +* v0.0.1 release: + * Contains experimental API and SDK of trace (without feature flag) + * No API and SDK of logging and metrics available +* v1.0.0-rc1 release: + * Pre-release, no API/ABI guarantees, but more stable than alpha/beta. + * Contains pre-release API and SDK of trace, baggage and resource + * experimental metrics and logging API/SDK behind feature flag +* v1.0.0: ( with traces ) + * Contains stable API and SDK of trace, baggage and resource + * experimental metrics and logging API/SDK behind feature flag +* v1.5.0 release (with metrics) + * Contains stable API and SDK of metrics, trace, baggage, resource. + * experimental logging API/SDK behind feature flag +* v1.10.0 release (with logging) + * Contains stable API and SDK of logging, metrics, trace, baggage, resource. + +### Before moving to version 1.0.0 + +* Major version zero (0.y.z) is for initial development. Anything MAY change at + any time. The public API SHOULD NOT be considered stable. diff --git a/ext/opentelemetry-cpp-1.21.0/WORKSPACE b/ext/opentelemetry-cpp-1.21.0/WORKSPACE new file mode 100644 index 000000000..a52788ece --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/WORKSPACE @@ -0,0 +1,23 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +workspace(name = "io_opentelemetry_cpp") + +# Load our direct dependencies. +load("//bazel:repository.bzl", "opentelemetry_cpp_deps") + +opentelemetry_cpp_deps() + +load("//bazel:extra_deps.bzl", "opentelemetry_extra_deps") + +opentelemetry_extra_deps() + +# Load gRPC dependencies after load. +load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps") + +grpc_deps() + +# Load extra gRPC dependencies due to https://github.com/grpc/grpc/issues/20511 +load("@com_github_grpc_grpc//bazel:grpc_extra_deps.bzl", "grpc_extra_deps") + +grpc_extra_deps() diff --git a/ext/opentelemetry-cpp-1.21.0/WORKSPACE.bzlmod b/ext/opentelemetry-cpp-1.21.0/WORKSPACE.bzlmod new file mode 100644 index 000000000..0db1b0462 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/WORKSPACE.bzlmod @@ -0,0 +1,4 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +# Disables the default WORKSPACE when using bzlmod diff --git a/ext/opentelemetry-cpp-1.21.0/api/BUILD b/ext/opentelemetry-cpp-1.21.0/api/BUILD new file mode 100644 index 000000000..fb3139356 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/BUILD @@ -0,0 +1,81 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +load("@bazel_skylib//rules:common_settings.bzl", "bool_flag", "int_flag", "string_flag") + +package(default_visibility = ["//visibility:public"]) + +CPP_STDLIBS = [ + "none", + "best", + "2014", + "2017", + "2020", + "2023", +] + +string_flag( + name = "with_cxx_stdlib", + build_setting_default = "best", + values = CPP_STDLIBS, +) + +cc_library( + name = "api", + hdrs = glob(["include/**/*.h"]), + defines = select({ + ":set_cxx_stdlib_none": [], + ### automatic selection + ":set_cxx_stdlib_best": ["OPENTELEMETRY_STL_VERSION=(__cplusplus/100)"], + # See https://learn.microsoft.com/en-us/cpp/build/reference/zc-cplusplus + ":set_cxx_stdlib_best_and_msvc": ["OPENTELEMETRY_STL_VERSION=(_MSVC_LANG/100)"], + ### manual selection + ":set_cxx_stdlib_2014": ["OPENTELEMETRY_STL_VERSION=2014"], + ":set_cxx_stdlib_2017": ["OPENTELEMETRY_STL_VERSION=2017"], + ":set_cxx_stdlib_2020": ["OPENTELEMETRY_STL_VERSION=2020"], + ":set_cxx_stdlib_2023": ["OPENTELEMETRY_STL_VERSION=2023"], + "//conditions:default": [], + }) + select({ + ":abi_version_no_1": ["OPENTELEMETRY_ABI_VERSION_NO=1"], + ":abi_version_no_2": ["OPENTELEMETRY_ABI_VERSION_NO=2"], + }), + strip_include_prefix = "include", + tags = ["api"], + deps = [ + "@com_google_absl//absl/base", + "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:variant", + ], +) + +[config_setting( + name = "set_cxx_stdlib_%s" % v, + flag_values = {":with_cxx_stdlib": v}, +) for v in CPP_STDLIBS] + +config_setting( + name = "set_cxx_stdlib_best_and_msvc", + constraint_values = ["@bazel_tools//tools/cpp:msvc"], + flag_values = {":with_cxx_stdlib": "best"}, +) + +bool_flag( + name = "with_abseil", + build_setting_default = False, + deprecation = "The value of this flag is ignored. Bazel builds always depend on Abseil for its pre-adopted `std::` types. You should remove this flag from your build command.", +) + +int_flag( + name = "abi_version_no", + build_setting_default = 1, +) + +config_setting( + name = "abi_version_no_1", + flag_values = {":abi_version_no": "1"}, +) + +config_setting( + name = "abi_version_no_2", + flag_values = {":abi_version_no": "2"}, +) diff --git a/ext/opentelemetry-cpp-1.21.0/api/CMakeLists.txt b/ext/opentelemetry-cpp-1.21.0/api/CMakeLists.txt new file mode 100644 index 000000000..0707464b3 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/CMakeLists.txt @@ -0,0 +1,140 @@ +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +add_library(opentelemetry_api INTERFACE) +target_include_directories( + opentelemetry_api + INTERFACE "$" + "$") + +set_target_properties(opentelemetry_api PROPERTIES EXPORT_NAME api) + +otel_add_component( + COMPONENT + api + TARGETS + opentelemetry_api + FILES_DIRECTORY + "include/opentelemetry" + FILES_DESTINATION + "include" + FILES_MATCHING + PATTERN + "*.h") + +if(OPENTELEMETRY_INSTALL) + unset(TARGET_DEPS) +endif() + +if(BUILD_TESTING) + add_subdirectory(test) +endif() + +if(WITH_NO_DEPRECATED_CODE) + target_compile_definitions(opentelemetry_api + INTERFACE OPENTELEMETRY_NO_DEPRECATED_CODE) +endif() + +if(WITH_STL STREQUAL "OFF") + message(STATUS "Building WITH_STL=OFF") +elseif(WITH_STL STREQUAL "CXX11") + message(STATUS "Building WITH_STL=CXX11") + target_compile_definitions(opentelemetry_api + INTERFACE OPENTELEMETRY_STL_VERSION=2011) +elseif(WITH_STL STREQUAL "CXX14") + message(STATUS "Building WITH_STL=CXX14") + target_compile_definitions(opentelemetry_api + INTERFACE OPENTELEMETRY_STL_VERSION=2014) +elseif(WITH_STL STREQUAL "CXX17") + message(STATUS "Building WITH_STL=CXX17") + target_compile_definitions(opentelemetry_api + INTERFACE OPENTELEMETRY_STL_VERSION=2017) +elseif(WITH_STL STREQUAL "CXX20") + message(STATUS "Building WITH_STL=CXX20") + target_compile_definitions(opentelemetry_api + INTERFACE OPENTELEMETRY_STL_VERSION=2020) +elseif(WITH_STL STREQUAL "CXX23") + message(STATUS "Building WITH_STL=CXX23") + target_compile_definitions(opentelemetry_api + INTERFACE OPENTELEMETRY_STL_VERSION=2023) +elseif(WITH_STL STREQUAL "ON") + message(STATUS "Building WITH_STL=ON") + # "ON" corresponds to "CXX23" at this time. + target_compile_definitions(opentelemetry_api + INTERFACE OPENTELEMETRY_STL_VERSION=2023) +else() + message( + FATAL_ERROR "WITH_STL must be ON, OFF, CXX11, CXX14, CXX17, CXX20 or CXX23") +endif() + +if(WITH_GSL) + target_compile_definitions(opentelemetry_api INTERFACE HAVE_GSL) + + # Guidelines Support Library path. Used if we are not on not get C++20. + # + find_package(Microsoft.GSL QUIET) + if(TARGET Microsoft.GSL::GSL) + target_link_libraries(opentelemetry_api INTERFACE Microsoft.GSL::GSL) + list(APPEND TARGET_DEPS "gsl") + else() + set(GSL_DIR third_party/ms-gsl) + target_include_directories( + opentelemetry_api INTERFACE "$") + endif() +endif() + +if(WITH_NO_GETENV) + target_compile_definitions(opentelemetry_api INTERFACE NO_GETENV) +endif() + +if(WIN32) + if(WITH_ETW) + target_compile_definitions(opentelemetry_api INTERFACE HAVE_MSGPACK) + endif() +endif() + +if(WITH_ASYNC_EXPORT_PREVIEW) + target_compile_definitions(opentelemetry_api INTERFACE ENABLE_ASYNC_EXPORT) +endif() + +target_compile_definitions( + opentelemetry_api + INTERFACE OPENTELEMETRY_ABI_VERSION_NO=${OPENTELEMETRY_ABI_VERSION_NO}) + +if(WITH_OTLP_RETRY_PREVIEW) + target_compile_definitions(opentelemetry_api + INTERFACE ENABLE_OTLP_RETRY_PREVIEW) +endif() + +if(WITH_OTLP_GRPC_SSL_MTLS_PREVIEW) + target_compile_definitions(opentelemetry_api + INTERFACE ENABLE_OTLP_GRPC_SSL_MTLS_PREVIEW) +endif() + +if(WITH_METRICS_EXEMPLAR_PREVIEW) + target_compile_definitions(opentelemetry_api + INTERFACE ENABLE_METRICS_EXEMPLAR_PREVIEW) +endif() + +if(WITH_THREAD_INSTRUMENTATION_PREVIEW) + target_compile_definitions(opentelemetry_api + INTERFACE ENABLE_THREAD_INSTRUMENTATION_PREVIEW) +endif() + +if(WITH_OTLP_HTTP_COMPRESSION) + target_compile_definitions(opentelemetry_api + INTERFACE ENABLE_OTLP_COMPRESSION_PREVIEW) +endif() + +if(APPLE) + target_link_libraries(opentelemetry_api INTERFACE "-framework CoreFoundation") +endif() + +include(${PROJECT_SOURCE_DIR}/cmake/pkgconfig.cmake) + +if(OPENTELEMETRY_INSTALL) + opentelemetry_add_pkgconfig( + api "OpenTelemetry API" + "A header-only library to support instrumentation with OpenTelemetry." + "${TARGET_DEPS}") +endif() diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/baggage/baggage.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/baggage/baggage.h new file mode 100644 index 000000000..6c799cda2 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/baggage/baggage.h @@ -0,0 +1,299 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/common/kv_properties.h" +#include "opentelemetry/common/macros.h" +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE + +namespace baggage +{ + +class OPENTELEMETRY_EXPORT Baggage +{ +public: + static constexpr size_t kMaxKeyValuePairs = 180; + static constexpr size_t kMaxKeyValueSize = 4096; + static constexpr size_t kMaxSize = 8192; + static constexpr char kKeyValueSeparator = '='; + static constexpr char kMembersSeparator = ','; + static constexpr char kMetadataSeparator = ';'; + + Baggage() noexcept : kv_properties_(new common::KeyValueProperties()) {} + Baggage(size_t size) noexcept : kv_properties_(new common::KeyValueProperties(size)) {} + + template + Baggage(const T &keys_and_values) noexcept + : kv_properties_(new common::KeyValueProperties(keys_and_values)) + {} + + OPENTELEMETRY_API_SINGLETON static nostd::shared_ptr GetDefault() + { + static nostd::shared_ptr baggage{new Baggage()}; + return baggage; + } + + /* Get value for key in the baggage + @returns true if key is found, false otherwise + */ + bool GetValue(nostd::string_view key, std::string &value) const noexcept + { + return kv_properties_->GetValue(key, value); + } + + /* Returns shared_ptr of new baggage object which contains new key-value pair. If key or value is + invalid, copy of current baggage is returned + */ + nostd::shared_ptr Set(const nostd::string_view &key, + const nostd::string_view &value) noexcept + { + + nostd::shared_ptr baggage(new Baggage(kv_properties_->Size() + 1)); + const bool valid_kv = IsValidKey(key) && IsValidValue(value); + + if (valid_kv) + { + baggage->kv_properties_->AddEntry(key, value); + } + + // add rest of the fields. + kv_properties_->GetAllEntries( + [&baggage, &key, &valid_kv](nostd::string_view e_key, nostd::string_view e_value) { + // if key or value was not valid, add all the entries. Add only remaining entries + // otherwise. + if (!valid_kv || key != e_key) + { + baggage->kv_properties_->AddEntry(e_key, e_value); + } + + return true; + }); + + return baggage; + } + + // @return all key-values entries by repeatedly invoking the function reference passed as argument + // for each entry + bool GetAllEntries( + nostd::function_ref callback) const noexcept + { + return kv_properties_->GetAllEntries(callback); + } + + // delete key from the baggage if it exists. Returns shared_ptr of new baggage object. + // if key does not exist, copy of current baggage is returned. + // Validity of key is not checked as invalid keys should never be populated in baggage in the + // first place. + nostd::shared_ptr Delete(nostd::string_view key) noexcept + { + // keeping size of baggage same as key might not be found in it + nostd::shared_ptr baggage(new Baggage(kv_properties_->Size())); + kv_properties_->GetAllEntries( + [&baggage, &key](nostd::string_view e_key, nostd::string_view e_value) { + if (key != e_key) + baggage->kv_properties_->AddEntry(e_key, e_value); + return true; + }); + return baggage; + } + + // Returns shared_ptr of baggage after extracting key-value pairs from header + static nostd::shared_ptr FromHeader(nostd::string_view header) noexcept + { + if (header.size() > kMaxSize) + { + // header size exceeds maximum threshold, return empty baggage + return GetDefault(); + } + + common::KeyValueStringTokenizer kv_str_tokenizer(header); + size_t cnt = kv_str_tokenizer.NumTokens(); // upper bound on number of kv pairs + if (cnt > kMaxKeyValuePairs) + { + cnt = kMaxKeyValuePairs; + } + + nostd::shared_ptr baggage(new Baggage(cnt)); + bool kv_valid; + nostd::string_view key, value; + + while (kv_str_tokenizer.next(kv_valid, key, value) && baggage->kv_properties_->Size() < cnt) + { + if (!kv_valid || (key.size() + value.size() > kMaxKeyValueSize)) + { + // if kv pair is not valid, skip it + continue; + } + + // NOTE : metadata is kept as part of value only as it does not have any semantic meaning. + // but, we need to extract it (else Decode on value will return error) + nostd::string_view metadata; + auto metadata_separator = value.find(kMetadataSeparator); + if (metadata_separator != std::string::npos) + { + metadata = value.substr(metadata_separator); + value = value.substr(0, metadata_separator); + } + + bool err = 0; + auto key_str = UrlDecode(common::StringUtil::Trim(key), err); + auto value_str = UrlDecode(common::StringUtil::Trim(value), err); + + if (err == false && IsValidKey(key_str) && IsValidValue(value_str)) + { + if (!metadata.empty()) + { + value_str.append(metadata.data(), metadata.size()); + } + baggage->kv_properties_->AddEntry(key_str, value_str); + } + } + + return baggage; + } + + // Creates string from baggage object. + std::string ToHeader() const noexcept + { + std::string header_s; + bool first = true; + kv_properties_->GetAllEntries([&](nostd::string_view key, nostd::string_view value) { + if (!first) + { + header_s.push_back(kMembersSeparator); + } + else + { + first = false; + } + header_s.append(UrlEncode(key)); + header_s.push_back(kKeyValueSeparator); + + // extracting metadata from value. We do not encode metadata + auto metadata_separator = value.find(kMetadataSeparator); + if (metadata_separator != std::string::npos) + { + header_s.append(UrlEncode(value.substr(0, metadata_separator))); + auto metadata = value.substr(metadata_separator); + header_s.append(std::string(metadata.data(), metadata.size())); + } + else + { + header_s.append(UrlEncode(value)); + } + return true; + }); + return header_s; + } + +private: + static bool IsPrintableString(nostd::string_view str) + { + for (const auto ch : str) + { + if (ch < ' ' || ch > '~') + { + return false; + } + } + + return true; + } + + static bool IsValidKey(nostd::string_view key) { return key.size() && IsPrintableString(key); } + + static bool IsValidValue(nostd::string_view value) { return IsPrintableString(value); } + + // Uri encode key value pairs before injecting into header + // Implementation inspired from : https://golang.org/src/net/url/url.go?s=7851:7884#L264 + static std::string UrlEncode(nostd::string_view str) + { + auto to_hex = [](char c) -> char { + static const char *hex = "0123456789ABCDEF"; + return hex[c & 15]; + }; + + std::string ret; + + for (auto c : str) + { + if (std::isalnum(c) || c == '-' || c == '_' || c == '.' || c == '~') + { + ret.push_back(c); + } + else if (c == ' ') + { + ret.push_back('+'); + } + else + { + ret.push_back('%'); + ret.push_back(to_hex(c >> 4)); + ret.push_back(to_hex(c & 15)); + } + } + + return ret; + } + + // Uri decode key value pairs after extracting from header + static std::string UrlDecode(nostd::string_view str, bool &err) + { + auto IsHex = [](char c) { + return std::isdigit(c) || (c >= 'A' && c <= 'F') || (c >= 'a' && c <= 'f'); + }; + + auto from_hex = [](char c) -> char { + // c - '0' produces integer type which could trigger error/warning when casting to char, + // but the cast is safe here. + return static_cast(std::isdigit(c) ? c - '0' : std::toupper(c) - 'A' + 10); + }; + + std::string ret; + + for (size_t i = 0; i < str.size(); i++) + { + if (str[i] == '%') + { + if (i + 2 >= str.size() || !IsHex(str[i + 1]) || !IsHex(str[i + 2])) + { + err = 1; + return ""; + } + ret.push_back(from_hex(str[i + 1]) << 4 | from_hex(str[i + 2])); + i += 2; + } + else if (str[i] == '+') + { + ret.push_back(' '); + } + else if (std::isalnum(str[i]) || str[i] == '-' || str[i] == '_' || str[i] == '.' || + str[i] == '~') + { + ret.push_back(str[i]); + } + else + { + err = 1; + return ""; + } + } + + return ret; + } + +private: + // Store entries in a C-style array to avoid using std::array or std::vector. + nostd::unique_ptr kv_properties_; +}; + +} // namespace baggage + +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/baggage/baggage_context.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/baggage/baggage_context.h new file mode 100644 index 000000000..a0016353b --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/baggage/baggage_context.h @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/baggage/baggage.h" +#include "opentelemetry/context/context.h" +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE + +namespace baggage +{ + +static const std::string kBaggageHeader = "baggage"; + +inline nostd::shared_ptr GetBaggage(const context::Context &context) noexcept +{ + context::ContextValue context_value = context.GetValue(kBaggageHeader); + if (nostd::holds_alternative>(context_value)) + { + return nostd::get>(context_value); + } + static nostd::shared_ptr empty_baggage{new Baggage()}; + return empty_baggage; +} + +inline context::Context SetBaggage(context::Context &context, + const nostd::shared_ptr &baggage) noexcept +{ + return context.SetValue(kBaggageHeader, baggage); +} + +} // namespace baggage +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/baggage/propagation/baggage_propagator.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/baggage/propagation/baggage_propagator.h new file mode 100644 index 000000000..d75409ed6 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/baggage/propagation/baggage_propagator.h @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include + +#include "opentelemetry/baggage/baggage.h" +#include "opentelemetry/baggage/baggage_context.h" +#include "opentelemetry/context/context.h" +#include "opentelemetry/context/propagation/text_map_propagator.h" +#include "opentelemetry/nostd/function_ref.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace baggage +{ +namespace propagation +{ + +class BaggagePropagator : public context::propagation::TextMapPropagator +{ +public: + void Inject(context::propagation::TextMapCarrier &carrier, + const context::Context &context) noexcept override + { + auto baggage = baggage::GetBaggage(context); + auto header = baggage->ToHeader(); + if (header.size()) + { + carrier.Set(kBaggageHeader, header); + } + } + + context::Context Extract(const context::propagation::TextMapCarrier &carrier, + context::Context &context) noexcept override + { + nostd::string_view baggage_str = carrier.Get(baggage::kBaggageHeader); + auto baggage = baggage::Baggage::FromHeader(baggage_str); + + if (baggage->ToHeader().size()) + { + return baggage::SetBaggage(context, baggage); + } + else + { + return context; + } + } + + bool Fields(nostd::function_ref callback) const noexcept override + { + return callback(kBaggageHeader); + } +}; +} // namespace propagation +} // namespace baggage +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/attribute_value.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/attribute_value.h new file mode 100644 index 000000000..af4cc83d4 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/attribute_value.h @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/nostd/span.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/variant.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace common +{ +/// OpenTelemetry signals can be enriched by adding attributes. The +/// \c AttributeValue type is defined as a variant of all attribute value +/// types the OpenTelemetry C++ API supports. +/// +/// The following attribute value types are supported by the OpenTelemetry +/// specification: +/// - Primitive types: string, boolean, double precision floating point +/// (IEEE 754-1985) or signed 64 bit integer. +/// - Homogenous arrays of primitive type values. +/// +/// \warning +/// \parblock The OpenTelemetry C++ API currently supports several attribute +/// value types that are not covered by the OpenTelemetry specification: +/// - \c uint64_t +/// - \c nostd::span +/// - \c nostd::span +/// +/// Those types are reserved for future use and currently should not be +/// used. There are no guarantees around how those values are handled by +/// exporters. +/// \endparblock +using AttributeValue = + nostd::variant, + nostd::span, + nostd::span, + nostd::span, + nostd::span, + nostd::span, + // Not currently supported by the specification, but reserved for future use. + // Added to provide support for all primitive C++ types. + uint64_t, + // Not currently supported by the specification, but reserved for future use. + // Added to provide support for all primitive C++ types. + nostd::span, + // Not currently supported by the specification, but reserved for future use. + // See https://github.com/open-telemetry/opentelemetry-specification/issues/780 + nostd::span>; + +enum AttributeType +{ + kTypeBool, + kTypeInt, + kTypeInt64, + kTypeUInt, + kTypeDouble, + kTypeCString, + kTypeString, + kTypeSpanBool, + kTypeSpanInt, + kTypeSpanInt64, + kTypeSpanUInt, + kTypeSpanDouble, + kTypeSpanString, + kTypeUInt64, + kTypeSpanUInt64, + kTypeSpanByte +}; + +} // namespace common +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/key_value_iterable.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/key_value_iterable.h new file mode 100644 index 000000000..9d43e1571 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/key_value_iterable.h @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/common/attribute_value.h" +#include "opentelemetry/nostd/function_ref.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace common +{ +/** + * Supports internal iteration over a collection of key-value pairs. + */ +class KeyValueIterable +{ +public: + virtual ~KeyValueIterable() = default; + + /** + * Iterate over key-value pairs + * @param callback a callback to invoke for each key-value. If the callback returns false, + * the iteration is aborted. + * @return true if every key-value pair was iterated over + */ + virtual bool ForEachKeyValue(nostd::function_ref + callback) const noexcept = 0; + + /** + * @return the number of key-value pairs + */ + virtual size_t size() const noexcept = 0; +}; + +/** + * Supports internal iteration over a collection of key-value pairs. + */ +class NoopKeyValueIterable : public KeyValueIterable +{ +public: + ~NoopKeyValueIterable() override = default; + + /** + * Iterate over key-value pairs + * @param callback a callback to invoke for each key-value. If the callback returns false, + * the iteration is aborted. + * @return true if every key-value pair was iterated over + */ + bool ForEachKeyValue( + nostd::function_ref) const noexcept override + { + return true; + } + + /** + * @return the number of key-value pairs + */ + size_t size() const noexcept override { return 0; } +}; + +} // namespace common +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/key_value_iterable_view.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/key_value_iterable_view.h new file mode 100644 index 000000000..e22fb6f06 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/key_value_iterable_view.h @@ -0,0 +1,146 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "opentelemetry/common/attribute_value.h" +#include "opentelemetry/common/key_value_iterable.h" +#include "opentelemetry/nostd/function_ref.h" +#include "opentelemetry/nostd/span.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/type_traits.h" +#include "opentelemetry/nostd/utility.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace common +{ +// NOTE - code within `detail` namespace implements internal details, and not part +// of the public interface. +namespace detail +{ +inline void take_key_value(nostd::string_view, common::AttributeValue) {} + +template +auto is_key_value_iterable_impl(T iterable) + -> decltype(take_key_value(std::begin(iterable)->first, std::begin(iterable)->second), + nostd::size(iterable), + std::true_type{}); + +std::false_type is_key_value_iterable_impl(...); + +template +struct is_key_value_iterable +{ + static const bool value = decltype(detail::is_key_value_iterable_impl(std::declval()))::value; +}; +} // namespace detail + +/** + * @brief Container for key-value pairs that can transform every value in it to one of types + * listed in common::AttributeValue. It may contain value types that are not directly map'able + * to primitive value types. In that case the `ForEachKeyValue` method acts as a transform to + * convert the value type to one listed under AtributeValue (bool, int32_t, int64_t, uint32_t, + * uint64_t, double, nostd::string_view, or arrays of primite types). For example, if UUID, + * GUID, or UTF-16 string type is passed as one of values stored inside this container, the + * container itself may provide a custom implementation of `ForEachKeyValue` to transform the + * 'non-standard' type to one of the standard types. + */ +template +class KeyValueIterableView final : public KeyValueIterable +{ + +public: + explicit KeyValueIterableView(const T &container) noexcept : container_{&container} {} + + // KeyValueIterable + bool ForEachKeyValue(nostd::function_ref + callback) const noexcept override + { + auto iter = std::begin(*container_); + auto last = std::end(*container_); + for (; iter != last; ++iter) + { + if (!callback(iter->first, iter->second)) + { + return false; + } + } + return true; + } + + size_t size() const noexcept override { return nostd::size(*container_); } + +private: + const T *container_; +}; + +template ::value> * = nullptr> +KeyValueIterableView MakeKeyValueIterableView(const T &container) noexcept +{ + return KeyValueIterableView(container); +} + +/** + * Utility function to help to make a attribute view from initializer_list + * + * @param attributes + * @return nostd::span> + */ +inline static nostd::span> +MakeAttributes(std::initializer_list> + attributes) noexcept +{ + return nostd::span>{ + attributes.begin(), attributes.end()}; +} + +/** + * Utility function to help to make a attribute view from a span + * + * @param attributes + * @return nostd::span> + */ +inline static nostd::span> +MakeAttributes( + nostd::span> attributes) noexcept +{ + return attributes; +} + +/** + * Utility function to help to make a attribute view from a KeyValueIterable + * + * @param attributes + * @return common::KeyValueIterable + */ +inline static const common::KeyValueIterable &MakeAttributes( + const common::KeyValueIterable &attributes) noexcept +{ + return attributes; +} + +/** + * Utility function to help to make a attribute view from a key-value iterable object + * + * @param attributes + * @return nostd::span> + */ +template < + class ArgumentType, + nostd::enable_if_t::value> * = nullptr> +inline static common::KeyValueIterableView MakeAttributes( + const ArgumentType &arg) noexcept +{ + return common::KeyValueIterableView(arg); +} + +} // namespace common +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/kv_properties.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/kv_properties.h new file mode 100644 index 000000000..b5accab40 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/kv_properties.h @@ -0,0 +1,272 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/common/key_value_iterable_view.h" +#include "opentelemetry/common/string_util.h" +#include "opentelemetry/nostd/function_ref.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/unique_ptr.h" +#include "opentelemetry/version.h" + +#include +#include +#include + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace common +{ + +// Constructor parameter for KeyValueStringTokenizer +struct KeyValueStringTokenizerOptions +{ + char member_separator = ','; + char key_value_separator = '='; + bool ignore_empty_members = true; +}; + +// Tokenizer for key-value headers +class KeyValueStringTokenizer +{ +public: + KeyValueStringTokenizer( + nostd::string_view str, + const KeyValueStringTokenizerOptions &opts = KeyValueStringTokenizerOptions()) noexcept + : str_(str), opts_(opts), index_(0) + {} + + static nostd::string_view GetDefaultKeyOrValue() + { + static std::string default_str = ""; + return default_str; + } + + // Returns next key value in the string header + // @param valid_kv : if the found kv pair is valid or not + // @param key : key in kv pair + // @param key : value in kv pair + // @returns true if next kv pair was found, false otherwise. + bool next(bool &valid_kv, nostd::string_view &key, nostd::string_view &value) noexcept + { + valid_kv = true; + while (index_ < str_.size()) + { + bool is_empty_pair = false; + size_t end = str_.find(opts_.member_separator, index_); + if (end == std::string::npos) + { + end = str_.size() - 1; + } + else if (end == index_) // empty pair. do not update end + { + is_empty_pair = true; + } + else + { + end--; + } + + auto list_member = StringUtil::Trim(str_, index_, end); + if (list_member.size() == 0 || is_empty_pair) + { + // empty list member + index_ = end + 2 - is_empty_pair; + if (opts_.ignore_empty_members) + { + continue; + } + + valid_kv = true; + key = GetDefaultKeyOrValue(); + value = GetDefaultKeyOrValue(); + return true; + } + + auto key_end_pos = list_member.find(opts_.key_value_separator); + if (key_end_pos == std::string::npos) + { + // invalid member + valid_kv = false; + } + else + { + key = list_member.substr(0, key_end_pos); + value = list_member.substr(key_end_pos + 1); + } + + index_ = end + 2; + + return true; + } + + // no more entries remaining + return false; + } + + // Returns total number of tokens in header string + size_t NumTokens() const noexcept + { + size_t cnt = 0, begin = 0; + while (begin < str_.size()) + { + ++cnt; + size_t end = str_.find(opts_.member_separator, begin); + if (end == std::string::npos) + { + break; + } + + begin = end + 1; + } + + return cnt; + } + + // Resets the iterator + void reset() noexcept { index_ = 0; } + +private: + nostd::string_view str_; + KeyValueStringTokenizerOptions opts_; + size_t index_; +}; + +// Class to store fixed size array of key-value pairs of string type +class KeyValueProperties +{ + // Class to store key-value pairs of string types +public: + class Entry + { + public: + Entry() : key_(nullptr), value_(nullptr) {} + + // Copy constructor + Entry(const Entry ©) + { + key_ = CopyStringToPointer(copy.key_.get()); + value_ = CopyStringToPointer(copy.value_.get()); + } + + // Copy assignment operator + Entry &operator=(Entry &other) + { + key_ = CopyStringToPointer(other.key_.get()); + value_ = CopyStringToPointer(other.value_.get()); + return *this; + } + + // Move contructor and assignment operator + Entry(Entry &&other) = default; + Entry &operator=(Entry &&other) = default; + + // Creates an Entry for a given key-value pair. + Entry(nostd::string_view key, nostd::string_view value) + { + key_ = CopyStringToPointer(key); + value_ = CopyStringToPointer(value); + } + + // Gets the key associated with this entry. + nostd::string_view GetKey() const noexcept { return key_.get(); } + + // Gets the value associated with this entry. + nostd::string_view GetValue() const noexcept { return value_.get(); } + + // Sets the value for this entry. This overrides the previous value. + void SetValue(nostd::string_view value) noexcept { value_ = CopyStringToPointer(value); } + + private: + // Store key and value as raw char pointers to avoid using std::string. + nostd::unique_ptr key_; + nostd::unique_ptr value_; + + // Copies string into a buffer and returns a unique_ptr to the buffer. + // This is a workaround for the fact that memcpy doesn't accept a const destination. + nostd::unique_ptr CopyStringToPointer(nostd::string_view str) + { + char *temp = new char[str.size() + 1]; + memcpy(temp, str.data(), str.size()); + temp[str.size()] = '\0'; + return nostd::unique_ptr(temp); + } + }; + + // Maintain the number of entries in entries_. + size_t num_entries_; + + // Max size of allocated array + size_t max_num_entries_; + + // Store entries in a C-style array to avoid using std::array or std::vector. + nostd::unique_ptr entries_; + +public: + // Create Key-value list of given size + // @param size : Size of list. + KeyValueProperties(size_t size) noexcept + : num_entries_(0), max_num_entries_(size), entries_(new Entry[size]) + {} + + // Create Empty Key-Value list + KeyValueProperties() noexcept : num_entries_(0), max_num_entries_(0), entries_(nullptr) {} + + template ::value>::type> + KeyValueProperties(const T &keys_and_values) noexcept + : num_entries_(0), + max_num_entries_(keys_and_values.size()), + entries_(new Entry[max_num_entries_]) + { + for (auto &e : keys_and_values) + { + Entry entry(e.first, e.second); + (entries_.get())[num_entries_++] = std::move(entry); + } + } + + // Adds new kv pair into kv properties + void AddEntry(nostd::string_view key, nostd::string_view value) noexcept + { + if (num_entries_ < max_num_entries_) + { + Entry entry(key, value); + (entries_.get())[num_entries_++] = std::move(entry); + } + } + + // Returns all kv pair entries + bool GetAllEntries( + nostd::function_ref callback) const noexcept + { + for (size_t i = 0; i < num_entries_; i++) + { + auto &entry = (entries_.get())[i]; + if (!callback(entry.GetKey(), entry.GetValue())) + { + return false; + } + } + return true; + } + + // Return value for key if exists, return false otherwise + bool GetValue(nostd::string_view key, std::string &value) const noexcept + { + for (size_t i = 0; i < num_entries_; i++) + { + auto &entry = (entries_.get())[i]; + if (entry.GetKey() == key) + { + const auto &entry_value = entry.GetValue(); + value = std::string(entry_value.data(), entry_value.size()); + return true; + } + } + return false; + } + + size_t Size() const noexcept { return num_entries_; } +}; +} // namespace common +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/macros.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/macros.h new file mode 100644 index 000000000..71d12a57b --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/macros.h @@ -0,0 +1,523 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +/* + OPENTELEMETRY_HAVE_BUILTIN&OPENTELEMETRY_HAVE_FEATURE + + Checks whether the compiler supports a Clang Feature Checking Macro, and if + so, checks whether it supports the provided builtin function "x" where x + is one of the functions noted in + https://clang.llvm.org/docs/LanguageExtensions.html + + Note: Use this macro to avoid an extra level of #ifdef __has_builtin check. + http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html +*/ +#if !defined(OPENTELEMETRY_HAVE_BUILTIN) +# ifdef __has_builtin +# define OPENTELEMETRY_HAVE_BUILTIN(x) __has_builtin(x) +# else +# define OPENTELEMETRY_HAVE_BUILTIN(x) 0 +# endif +#endif + +#if !defined(OPENTELEMETRY_HAVE_FEATURE) +# ifdef __has_feature +# define OPENTELEMETRY_HAVE_FEATURE(f) __has_feature(f) +# else +# define OPENTELEMETRY_HAVE_FEATURE(f) 0 +# endif +#endif + +/* + has feature + + OPENTELEMETRY_HAVE_ATTRIBUTE + + A function-like feature checking macro that is a wrapper around + `__has_attribute`, which is defined by GCC 5+ and Clang and evaluates to a + nonzero constant integer if the attribute is supported or 0 if not. + + It evaluates to zero if `__has_attribute` is not defined by the compiler. + + GCC: https://gcc.gnu.org/gcc-5/changes.html + Clang: https://clang.llvm.org/docs/LanguageExtensions.html +*/ +#if !defined(OPENTELEMETRY_HAVE_ATTRIBUTE) +# ifdef __has_attribute +# define OPENTELEMETRY_HAVE_ATTRIBUTE(x) __has_attribute(x) +# else +# define OPENTELEMETRY_HAVE_ATTRIBUTE(x) 0 +# endif +#endif + +/* + OPENTELEMETRY_HAVE_CPP_ATTRIBUTE + + A function-like feature checking macro that accepts C++11 style attributes. + It's a wrapper around `__has_cpp_attribute`, defined by ISO C++ SD-6 + (https://en.cppreference.com/w/cpp/experimental/feature_test). If we don't + find `__has_cpp_attribute`, will evaluate to 0. +*/ +#if !defined(OPENTELEMETRY_HAVE_CPP_ATTRIBUTE) +# if defined(__cplusplus) && defined(__has_cpp_attribute) +// NOTE: requiring __cplusplus above should not be necessary, but +// works around https://bugs.llvm.org/show_bug.cgi?id=23435. +# define OPENTELEMETRY_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +# else +# define OPENTELEMETRY_HAVE_CPP_ATTRIBUTE(x) 0 +# endif +#endif + +/* + Expected usage pattern: + + if OPENTELEMETRY_LIKELY_CONDITION (ptr != nullptr) + { + do_something_likely(); + } else { + do_something_unlikely(); + } + + This pattern works with gcc/clang and __builtin_expect(), + as well as with C++20. + It is unclear if __builtin_expect() will be deprecated + in favor of C++20 [[likely]] or not. + + OPENTELEMETRY_LIKELY_CONDITION is preferred over OPENTELEMETRY_LIKELY, + to be revisited when C++20 is required. +*/ + +#if !defined(OPENTELEMETRY_LIKELY_CONDITION) && defined(__cplusplus) +// Only use likely with C++20 +# if __cplusplus >= 202002L +// GCC 9 has likely attribute but do not support declare it at the beginning of statement +# if defined(__has_cpp_attribute) && (defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 9) +# if __has_cpp_attribute(likely) +# define OPENTELEMETRY_LIKELY_CONDITION(C) (C) [[likely]] +# endif +# endif +# endif +#endif +#if !defined(OPENTELEMETRY_LIKELY_CONDITION) && (defined(__clang__) || defined(__GNUC__)) +// Only use if supported by the compiler +# define OPENTELEMETRY_LIKELY_CONDITION(C) (__builtin_expect(!!(C), true)) +#endif +#ifndef OPENTELEMETRY_LIKELY_CONDITION +// Do not use likely annotations +# define OPENTELEMETRY_LIKELY_CONDITION(C) (C) +#endif + +#if !defined(OPENTELEMETRY_UNLIKELY_CONDITION) && defined(__cplusplus) +// Only use unlikely with C++20 +# if __cplusplus >= 202002L +// GCC 9 has unlikely attribute but do not support declare it at the beginning of statement +# if defined(__has_cpp_attribute) && (defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 9) +# if __has_cpp_attribute(unlikely) +# define OPENTELEMETRY_UNLIKELY_CONDITION(C) (C) [[unlikely]] +# endif +# endif +# endif +#endif +#if !defined(OPENTELEMETRY_UNLIKELY_CONDITION) && (defined(__clang__) || defined(__GNUC__)) +// Only use if supported by the compiler +# define OPENTELEMETRY_UNLIKELY_CONDITION(C) (__builtin_expect(!!(C), false)) +#endif +#ifndef OPENTELEMETRY_UNLIKELY_CONDITION +// Do not use unlikely annotations +# define OPENTELEMETRY_UNLIKELY_CONDITION(C) (C) +#endif + +/* + Expected usage pattern: + + if (ptr != nullptr) + OPENTELEMETRY_LIKELY + { + do_something_likely(); + } else { + do_something_unlikely(); + } + + This pattern works starting with C++20. + See https://en.cppreference.com/w/cpp/language/attributes/likely + + Please use OPENTELEMETRY_LIKELY_CONDITION instead for now. +*/ + +#if !defined(OPENTELEMETRY_LIKELY) && defined(__cplusplus) +// Only use likely with C++20 +# if __cplusplus >= 202002L +// GCC 9 has likely attribute but do not support declare it at the beginning of statement +# if defined(__has_cpp_attribute) && (defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 9) +# if __has_cpp_attribute(likely) +# define OPENTELEMETRY_LIKELY [[likely]] +# endif +# endif +# endif +#endif + +#ifndef OPENTELEMETRY_LIKELY +# define OPENTELEMETRY_LIKELY +#endif + +#if !defined(OPENTELEMETRY_UNLIKELY) && defined(__cplusplus) +// Only use unlikely with C++20 +# if __cplusplus >= 202002L +// GCC 9 has unlikely attribute but do not support declare it at the beginning of statement +# if defined(__has_cpp_attribute) && (defined(__clang__) || !defined(__GNUC__) || __GNUC__ > 9) +# if __has_cpp_attribute(unlikely) +# define OPENTELEMETRY_UNLIKELY [[unlikely]] +# endif +# endif +# endif +#endif + +#ifndef OPENTELEMETRY_UNLIKELY +# define OPENTELEMETRY_UNLIKELY +#endif + +/// \brief Declare variable as maybe unused +/// usage: +/// OPENTELEMETRY_MAYBE_UNUSED int a; +/// class OPENTELEMETRY_MAYBE_UNUSED a; +/// OPENTELEMETRY_MAYBE_UNUSED int a(); +/// +#if defined(__cplusplus) && __cplusplus >= 201703L +# define OPENTELEMETRY_MAYBE_UNUSED [[maybe_unused]] +#elif defined(__clang__) +# define OPENTELEMETRY_MAYBE_UNUSED __attribute__((unused)) +#elif defined(__GNUC__) && ((__GNUC__ >= 4) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 1))) +# define OPENTELEMETRY_MAYBE_UNUSED __attribute__((unused)) +#elif (defined(_MSC_VER) && _MSC_VER >= 1910) && (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +# define OPENTELEMETRY_MAYBE_UNUSED [[maybe_unused]] +#else +# define OPENTELEMETRY_MAYBE_UNUSED +#endif + +#ifndef OPENTELEMETRY_RTTI_ENABLED +# if defined(__clang__) +# if __has_feature(cxx_rtti) +# define OPENTELEMETRY_RTTI_ENABLED +# endif +# elif defined(__GNUG__) +# if defined(__GXX_RTTI) +# define OPENTELEMETRY_RTTI_ENABLED +# endif +# elif defined(_MSC_VER) +# if defined(_CPPRTTI) +# define OPENTELEMETRY_RTTI_ENABLED +# endif +# endif +#endif + +#if defined(__cplusplus) && __cplusplus >= 201402L +# define OPENTELEMETRY_DEPRECATED [[deprecated]] +#elif defined(__clang__) +# define OPENTELEMETRY_DEPRECATED __attribute__((deprecated)) +#elif defined(__GNUC__) +# define OPENTELEMETRY_DEPRECATED __attribute__((deprecated)) +#elif defined(_MSC_VER) +# if _MSC_VER >= 1910 && defined(_MSVC_LANG) && _MSVC_LANG >= 201703L +# define OPENTELEMETRY_DEPRECATED [[deprecated]] +# else +# define OPENTELEMETRY_DEPRECATED __declspec(deprecated) +# endif +#else +# define OPENTELEMETRY_DEPRECATED +#endif + +#if defined(__cplusplus) && __cplusplus >= 201402L +# define OPENTELEMETRY_DEPRECATED_MESSAGE(msg) [[deprecated(msg)]] +#elif defined(__clang__) +# define OPENTELEMETRY_DEPRECATED_MESSAGE(msg) __attribute__((deprecated(msg))) +#elif defined(__GNUC__) +# define OPENTELEMETRY_DEPRECATED_MESSAGE(msg) __attribute__((deprecated(msg))) +#elif defined(_MSC_VER) +# if _MSC_VER >= 1910 && defined(_MSVC_LANG) && _MSVC_LANG >= 201703L +# define OPENTELEMETRY_DEPRECATED_MESSAGE(msg) [[deprecated(msg)]] +# else +# define OPENTELEMETRY_DEPRECATED_MESSAGE(msg) __declspec(deprecated(msg)) +# endif +#else +# define OPENTELEMETRY_DEPRECATED_MESSAGE(msg) +#endif + +// Regex support +#if (__GNUC__ == 4 && (__GNUC_MINOR__ == 8 || __GNUC_MINOR__ == 9)) +# define OPENTELEMETRY_HAVE_WORKING_REGEX 0 +#else +# define OPENTELEMETRY_HAVE_WORKING_REGEX 1 +#endif + +/* clang-format off */ + +/** + @page HEADER_ONLY_SINGLETON Header only singleton. + + @section ELF_SINGLETON + + For clang and gcc, the desired coding pattern is as follows. + + @verbatim + class Foo + { + // (a) + __attribute__((visibility("default"))) + // (b) + T& get_singleton() + { + // (c) + static T singleton; + return singleton; + } + }; + @endverbatim + + (a) is needed when the code is build with + @code -fvisibility="hidden" @endcode + to ensure that all instances of (b) are visible to the linker. + + What is duplicated in the binary is @em code, in (b). + + The linker will make sure only one instance + of all the (b) methods is used. + + (c) is a singleton implemented inside a method. + + This is very desirable, because: + + - the C++ compiler guarantees that construction + of the variable (c) is thread safe. + + - constructors for (c) singletons are executed in code path order, + or not at all if the singleton is never used. + + @section OTHER_SINGLETON + + For other platforms, header only singletons are not supported at this +point. + + @section CODING_PATTERN + + The coding pattern to use in the source code is as follows + + @verbatim + class Foo + { + OPENTELEMETRY_API_SINGLETON + T& get_singleton() + { + static T singleton; + return singleton; + } + }; + @endverbatim +*/ + +/* clang-format on */ + +#if defined(__clang__) + +# define OPENTELEMETRY_API_SINGLETON __attribute__((visibility("default"))) +# define OPENTELEMETRY_LOCAL_SYMBOL __attribute__((visibility("hidden"))) + +#elif defined(__GNUC__) + +# define OPENTELEMETRY_API_SINGLETON __attribute__((visibility("default"))) +# define OPENTELEMETRY_LOCAL_SYMBOL __attribute__((visibility("hidden"))) + +#else + +/* Add support for other compilers here. */ + +# define OPENTELEMETRY_API_SINGLETON +# define OPENTELEMETRY_LOCAL_SYMBOL + +#endif + +// +// Atomic wrappers based on compiler intrinsics for memory read/write. +// The tailing number is read/write length in bits. +// +// N.B. Compiler intrinsic is used because the usage of C++ standard library is restricted in the +// OpenTelemetry C++ API. +// +#if defined(__GNUC__) + +# define OPENTELEMETRY_ATOMIC_READ_8(ptr) __atomic_load_n(ptr, __ATOMIC_SEQ_CST) +# define OPENTELEMETRY_ATOMIC_WRITE_8(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST) + +#elif defined(_MSC_VER) + +# include + +# define OPENTELEMETRY_ATOMIC_READ_8(ptr) \ + static_cast(_InterlockedCompareExchange8(reinterpret_cast(ptr), 0, 0)) +# define OPENTELEMETRY_ATOMIC_WRITE_8(ptr, value) \ + _InterlockedExchange8(reinterpret_cast(ptr), static_cast(value)) + +#else +# error port atomics read/write for the current platform +#endif + +/* clang-format on */ +// +// The if/elif order matters here. If both OPENTELEMETRY_BUILD_IMPORT_DLL and +// OPENTELEMETRY_BUILD_EXPORT_DLL are defined, the former takes precedence. +// +// TODO: consider define OPENTELEMETRY_EXPORT for cygwin/gcc, see below link. +// https://gcc.gnu.org/wiki/Visibility#How_to_use_the_new_C.2B-.2B-_visibility_support +// +#if defined(_MSC_VER) && defined(OPENTELEMETRY_BUILD_IMPORT_DLL) + +# define OPENTELEMETRY_EXPORT __declspec(dllimport) + +#elif defined(_MSC_VER) && defined(OPENTELEMETRY_BUILD_EXPORT_DLL) + +# define OPENTELEMETRY_EXPORT __declspec(dllexport) + +#else + +// +// build OpenTelemetry as static library or not on Windows. +// +# define OPENTELEMETRY_EXPORT + +#endif + +// OPENTELEMETRY_HAVE_EXCEPTIONS +// +// Checks whether the compiler both supports and enables exceptions. Many +// compilers support a "no exceptions" mode that disables exceptions. +// +// Generally, when OPENTELEMETRY_HAVE_EXCEPTIONS is not defined: +// +// * Code using `throw` and `try` may not compile. +// * The `noexcept` specifier will still compile and behave as normal. +// * The `noexcept` operator may still return `false`. +// +// For further details, consult the compiler's documentation. +#ifndef OPENTELEMETRY_HAVE_EXCEPTIONS +# if defined(__clang__) && ((__clang_major__ * 100) + __clang_minor__) < 306 +// Clang < 3.6 +// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro +# if defined(__EXCEPTIONS) && OPENTELEMETRY_HAVE_FEATURE(cxx_exceptions) +# define OPENTELEMETRY_HAVE_EXCEPTIONS 1 +# endif // defined(__EXCEPTIONS) && OPENTELEMETRY_HAVE_FEATURE(cxx_exceptions) +# elif OPENTELEMETRY_HAVE_FEATURE(cxx_exceptions) +# define OPENTELEMETRY_HAVE_EXCEPTIONS 1 +// Handle remaining special cases and default to exceptions being supported. +# elif !(defined(__GNUC__) && !defined(__EXCEPTIONS) && !defined(__cpp_exceptions)) && \ + !(defined(_MSC_VER) && !defined(_CPPUNWIND)) +# define OPENTELEMETRY_HAVE_EXCEPTIONS 1 +# endif +#endif +#ifndef OPENTELEMETRY_HAVE_EXCEPTIONS +# define OPENTELEMETRY_HAVE_EXCEPTIONS 0 +#endif + +/* + OPENTELEMETRY_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function + parameter or implicit object parameter is retained by the return value of the + annotated function (or, for a parameter of a constructor, in the value of the + constructed object). This attribute causes warnings to be produced if a + temporary object does not live long enough. + + When applied to a reference parameter, the referenced object is assumed to be + retained by the return value of the function. When applied to a non-reference + parameter (for example, a pointer or a class type), all temporaries + referenced by the parameter are assumed to be retained by the return value of + the function. + + See also the upstream documentation: + https://clang.llvm.org/docs/AttributeReference.html#lifetimebound +*/ +#ifndef OPENTELEMETRY_ATTRIBUTE_LIFETIME_BOUND +# if OPENTELEMETRY_HAVE_CPP_ATTRIBUTE(clang::lifetimebound) +# define OPENTELEMETRY_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]] +# elif OPENTELEMETRY_HAVE_ATTRIBUTE(lifetimebound) +# define OPENTELEMETRY_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound)) +# else +# define OPENTELEMETRY_ATTRIBUTE_LIFETIME_BOUND +# endif +#endif + +// OPENTELEMETRY_HAVE_MEMORY_SANITIZER +// +// MemorySanitizer (MSan) is a detector of uninitialized reads. It consists of +// a compiler instrumentation module and a run-time library. +#ifndef OPENTELEMETRY_HAVE_MEMORY_SANITIZER +# if !defined(__native_client__) && OPENTELEMETRY_HAVE_FEATURE(memory_sanitizer) +# define OPENTELEMETRY_HAVE_MEMORY_SANITIZER 1 +# else +# define OPENTELEMETRY_HAVE_MEMORY_SANITIZER 0 +# endif +#endif + +#if OPENTELEMETRY_HAVE_MEMORY_SANITIZER && OPENTELEMETRY_HAVE_ATTRIBUTE(no_sanitize_memory) +# define OPENTELEMETRY_SANITIZER_NO_MEMORY \ + __attribute__((no_sanitize_memory)) // __attribute__((no_sanitize("memory"))) +#else +# define OPENTELEMETRY_SANITIZER_NO_MEMORY +#endif + +// OPENTELEMETRY_HAVE_THREAD_SANITIZER +// +// ThreadSanitizer (TSan) is a fast data race detector. +#ifndef OPENTELEMETRY_HAVE_THREAD_SANITIZER +# if defined(__SANITIZE_THREAD__) +# define OPENTELEMETRY_HAVE_THREAD_SANITIZER 1 +# elif OPENTELEMETRY_HAVE_FEATURE(thread_sanitizer) +# define OPENTELEMETRY_HAVE_THREAD_SANITIZER 1 +# else +# define OPENTELEMETRY_HAVE_THREAD_SANITIZER 0 +# endif +#endif + +#if OPENTELEMETRY_HAVE_THREAD_SANITIZER && OPENTELEMETRY_HAVE_ATTRIBUTE(no_sanitize_thread) +# define OPENTELEMETRY_SANITIZER_NO_THREAD \ + __attribute__((no_sanitize_thread)) // __attribute__((no_sanitize("thread"))) +#else +# define OPENTELEMETRY_SANITIZER_NO_THREAD +#endif + +// OPENTELEMETRY_HAVE_ADDRESS_SANITIZER +// +// AddressSanitizer (ASan) is a fast memory error detector. +#ifndef OPENTELEMETRY_HAVE_ADDRESS_SANITIZER +# if defined(__SANITIZE_ADDRESS__) +# define OPENTELEMETRY_HAVE_ADDRESS_SANITIZER 1 +# elif OPENTELEMETRY_HAVE_FEATURE(address_sanitizer) +# define OPENTELEMETRY_HAVE_ADDRESS_SANITIZER 1 +# else +# define OPENTELEMETRY_HAVE_ADDRESS_SANITIZER 0 +# endif +#endif + +// OPENTELEMETRY_HAVE_HWADDRESS_SANITIZER +// +// Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan +// memory error detector which can use CPU features like ARM TBI, Intel LAM or +// AMD UAI. +#ifndef OPENTELEMETRY_HAVE_HWADDRESS_SANITIZER +# if defined(__SANITIZE_HWADDRESS__) +# define OPENTELEMETRY_HAVE_HWADDRESS_SANITIZER 1 +# elif OPENTELEMETRY_HAVE_FEATURE(hwaddress_sanitizer) +# define OPENTELEMETRY_HAVE_HWADDRESS_SANITIZER 1 +# else +# define OPENTELEMETRY_HAVE_HWADDRESS_SANITIZER 0 +# endif +#endif + +#if OPENTELEMETRY_HAVE_ADDRESS_SANITIZER && OPENTELEMETRY_HAVE_ATTRIBUTE(no_sanitize_address) +# define OPENTELEMETRY_SANITIZER_NO_ADDRESS \ + __attribute__((no_sanitize_address)) // __attribute__((no_sanitize("address"))) +#elif OPENTELEMETRY_HAVE_ADDRESS_SANITIZER && defined(_MSC_VER) && _MSC_VER >= 1928 +# define OPENTELEMETRY_SANITIZER_NO_ADDRESS __declspec(no_sanitize_address) +#elif OPENTELEMETRY_HAVE_HWADDRESS_SANITIZER && OPENTELEMETRY_HAVE_ATTRIBUTE(no_sanitize) +# define OPENTELEMETRY_SANITIZER_NO_ADDRESS __attribute__((no_sanitize("hwaddress"))) +#else +# define OPENTELEMETRY_SANITIZER_NO_ADDRESS +#endif diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/spin_lock_mutex.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/spin_lock_mutex.h new file mode 100644 index 000000000..7031fa4d2 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/spin_lock_mutex.h @@ -0,0 +1,133 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include + +#include "opentelemetry/version.h" + +#if defined(_MSC_VER) +# define _WINSOCKAPI_ // stops including winsock.h +# include +#elif defined(__i386__) || defined(__x86_64__) +# if defined(__clang__) +# include +# elif defined(__INTEL_COMPILER) +# include +# endif +#endif + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace common +{ + +constexpr int SPINLOCK_FAST_ITERATIONS = 100; +constexpr int SPINLOCK_SLEEP_MS = 1; + +/** + * A Mutex which uses atomic flags and spin-locks instead of halting threads. + * + * This mutex uses an incremental back-off strategy with the following phases: + * 1. A tight spin-lock loop (pending: using hardware PAUSE/YIELD instructions) + * 2. A loop where the current thread yields control after checking the lock. + * 3. Issuing a thread-sleep call before starting back in phase 1. + * + * This is meant to give a good balance of perofrmance and CPU consumption in + * practice. + * + * This mutex uses an incremental back-off strategy with the following phases: + * 1. A tight spin-lock loop (pending: using hardware PAUSE/YIELD instructions) + * 2. A loop where the current thread yields control after checking the lock. + * 3. Issuing a thread-sleep call before starting back in phase 1. + * + * This is meant to give a good balance of perofrmance and CPU consumption in + * practice. + * + * This class implements the `BasicLockable` specification: + * https://en.cppreference.com/w/cpp/named_req/BasicLockable + */ +class SpinLockMutex +{ +public: + SpinLockMutex() noexcept {} + ~SpinLockMutex() noexcept = default; + SpinLockMutex(const SpinLockMutex &) = delete; + SpinLockMutex &operator=(const SpinLockMutex &) = delete; + + static inline void fast_yield() noexcept + { +// Issue a Pause/Yield instruction while spinning. +#if defined(_MSC_VER) + YieldProcessor(); +#elif defined(__i386__) || defined(__x86_64__) +# if defined(__clang__) || defined(__INTEL_COMPILER) + _mm_pause(); +# else + __builtin_ia32_pause(); +# endif +#elif defined(__armel__) || defined(__ARMEL__) + asm volatile("nop" ::: "memory"); +#elif defined(__arm__) || defined(__aarch64__) // arm big endian / arm64 + __asm__ __volatile__("yield" ::: "memory"); +#else + // TODO: Issue PAGE/YIELD on other architectures. +#endif + } + + /** + * Attempts to lock the mutex. Return immediately with `true` (success) or `false` (failure). + */ + bool try_lock() noexcept + { + return !flag_.load(std::memory_order_relaxed) && + !flag_.exchange(true, std::memory_order_acquire); + } + + /** + * Blocks until a lock can be obtained for the current thread. + * + * This mutex will spin the current CPU waiting for the lock to be available. This can have + * decent performance in scenarios where there is low lock contention and lock-holders achieve + * their work quickly. It degrades in scenarios where locked tasks take a long time. + */ + void lock() noexcept + { + for (;;) + { + // Try once + if (!flag_.exchange(true, std::memory_order_acquire)) + { + return; + } + // Spin-Fast (goal ~10ns) + for (std::size_t i = 0; i < SPINLOCK_FAST_ITERATIONS; ++i) + { + if (try_lock()) + { + return; + } + fast_yield(); + } + // Yield then try again (goal ~100ns) + std::this_thread::yield(); + if (try_lock()) + { + return; + } + // Sleep and then start the whole process again. (goal ~1000ns) + std::this_thread::sleep_for(std::chrono::milliseconds(SPINLOCK_SLEEP_MS)); + } + return; + } + /** Releases the lock held by the execution agent. Throws no exceptions. */ + void unlock() noexcept { flag_.store(false, std::memory_order_release); } + +private: + std::atomic flag_{false}; +}; + +} // namespace common +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/string_util.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/string_util.h new file mode 100644 index 000000000..273a65272 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/string_util.h @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace common +{ + +class StringUtil +{ +public: + static nostd::string_view Trim(nostd::string_view str, size_t left, size_t right) noexcept + { + while (left <= right && isspace(str[left])) + { + left++; + } + while (left <= right && isspace(str[right])) + { + right--; + } + return str.substr(left, 1 + right - left); + } + + static nostd::string_view Trim(nostd::string_view str) noexcept + { + if (str.empty()) + { + return str; + } + + return Trim(str, 0, str.size() - 1); + } +}; + +} // namespace common + +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/timestamp.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/timestamp.h new file mode 100644 index 000000000..f7c79b8b5 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/common/timestamp.h @@ -0,0 +1,206 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include + +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace common +{ +/** + * @brief A timepoint relative to the system clock epoch. + * + * This is used for marking the beginning and end of an operation. + */ +class SystemTimestamp +{ +public: + /** + * @brief Initializes a system timestamp pointing to the start of the epoch. + */ + SystemTimestamp() noexcept : nanos_since_epoch_{0} {} + + /** + * @brief Initializes a system timestamp from a duration. + * + * @param time_since_epoch Time elapsed since the beginning of the epoch. + */ + template + explicit SystemTimestamp(const std::chrono::duration &time_since_epoch) noexcept + : nanos_since_epoch_{static_cast( + std::chrono::duration_cast(time_since_epoch).count())} + {} + + /** + * @brief Initializes a system timestamp based on a point in time. + * + * @param time_point A point in time. + */ + /*implicit*/ SystemTimestamp(const std::chrono::system_clock::time_point &time_point) noexcept + : SystemTimestamp{time_point.time_since_epoch()} + {} + + /** + * @brief Returns a time point for the time stamp. + * + * @return A time point corresponding to the time stamp. + */ + operator std::chrono::system_clock::time_point() const noexcept + { + return std::chrono::system_clock::time_point{ + std::chrono::duration_cast( + std::chrono::nanoseconds{nanos_since_epoch_})}; + } + + /** + * @brief Returns the nanoseconds since the beginning of the epoch. + * + * @return Elapsed nanoseconds since the beginning of the epoch for this timestamp. + */ + std::chrono::nanoseconds time_since_epoch() const noexcept + { + return std::chrono::nanoseconds{nanos_since_epoch_}; + } + + /** + * @brief Compare two steady time stamps. + * + * @return true if the two time stamps are equal. + */ + bool operator==(const SystemTimestamp &other) const noexcept + { + return nanos_since_epoch_ == other.nanos_since_epoch_; + } + + /** + * @brief Compare two steady time stamps for inequality. + * + * @return true if the two time stamps are not equal. + */ + bool operator!=(const SystemTimestamp &other) const noexcept + { + return nanos_since_epoch_ != other.nanos_since_epoch_; + } + +private: + int64_t nanos_since_epoch_; +}; + +/** + * @brief A timepoint relative to the monotonic clock epoch + * + * This is used for calculating the duration of an operation. + */ +class SteadyTimestamp +{ +public: + /** + * @brief Initializes a monotonic timestamp pointing to the start of the epoch. + */ + SteadyTimestamp() noexcept : nanos_since_epoch_{0} {} + + /** + * @brief Initializes a monotonic timestamp from a duration. + * + * @param time_since_epoch Time elapsed since the beginning of the epoch. + */ + template + explicit SteadyTimestamp(const std::chrono::duration &time_since_epoch) noexcept + : nanos_since_epoch_{static_cast( + std::chrono::duration_cast(time_since_epoch).count())} + {} + + /** + * @brief Initializes a monotonic timestamp based on a point in time. + * + * @param time_point A point in time. + */ + /*implicit*/ SteadyTimestamp(const std::chrono::steady_clock::time_point &time_point) noexcept + : SteadyTimestamp{time_point.time_since_epoch()} + {} + + /** + * @brief Returns a time point for the time stamp. + * + * @return A time point corresponding to the time stamp. + */ + operator std::chrono::steady_clock::time_point() const noexcept + { + return std::chrono::steady_clock::time_point{ + std::chrono::duration_cast( + std::chrono::nanoseconds{nanos_since_epoch_})}; + } + + /** + * @brief Returns the nanoseconds since the beginning of the epoch. + * + * @return Elapsed nanoseconds since the beginning of the epoch for this timestamp. + */ + std::chrono::nanoseconds time_since_epoch() const noexcept + { + return std::chrono::nanoseconds{nanos_since_epoch_}; + } + + /** + * @brief Compare two steady time stamps. + * + * @return true if the two time stamps are equal. + */ + bool operator==(const SteadyTimestamp &other) const noexcept + { + return nanos_since_epoch_ == other.nanos_since_epoch_; + } + + /** + * @brief Compare two steady time stamps for inequality. + * + * @return true if the two time stamps are not equal. + */ + bool operator!=(const SteadyTimestamp &other) const noexcept + { + return nanos_since_epoch_ != other.nanos_since_epoch_; + } + +private: + int64_t nanos_since_epoch_; +}; + +class DurationUtil +{ +public: + template + static std::chrono::duration AdjustWaitForTimeout( + std::chrono::duration timeout, + std::chrono::duration indefinite_value) noexcept + { + // Do not call now() when this duration is max value, now() may have a expensive cost. + if (timeout == (std::chrono::duration::max)()) + { + return indefinite_value; + } + + // std::future::wait_for, std::this_thread::sleep_for, and std::condition_variable::wait_for + // may use steady_clock or system_clock.We need make sure now() + timeout do not overflow. + auto max_timeout = std::chrono::duration_cast>( + (std::chrono::steady_clock::time_point::max)() - std::chrono::steady_clock::now()); + if (timeout >= max_timeout) + { + return indefinite_value; + } + max_timeout = std::chrono::duration_cast>( + (std::chrono::system_clock::time_point::max)() - std::chrono::system_clock::now()); + if (timeout >= max_timeout) + { + return indefinite_value; + } + + return timeout; + } +}; + +} // namespace common +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/config.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/config.h new file mode 100644 index 000000000..cb52f3b5d --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/config.h @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include // IWYU pragma: keep + +#if defined(OPENTELEMETRY_ABI_VERSION_NO) && OPENTELEMETRY_ABI_VERSION_NO >= 2 +# error \ + "opentelemetry/config.h is removed in ABI version 2 and later. Please use opentelemetry/version.h instead." +#else +# if defined(__clang__) || defined(__GNUC__) +# pragma GCC warning \ + "opentelemetry/config.h is deprecated. Please use opentelemetry/version.h instead." +# elif defined(_MSC_VER) +# pragma message( \ + "[WARNING]: opentelemetry/config.h is deprecated. Please use opentelemetry/version.h instead.") +# endif +#endif diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/context.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/context.h new file mode 100644 index 000000000..924036efa --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/context.h @@ -0,0 +1,171 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include + +#include "opentelemetry/context/context_value.h" +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/variant.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace context +{ + +// The context class provides a context identifier. Is built as a linked list +// of DataList nodes and each context holds a shared_ptr to a place within +// the list that determines which keys and values it has access to. All that +// come before and none that come after. +class Context +{ + +public: + Context() = default; + // Creates a context object from a map of keys and identifiers, this will + // hold a shared_ptr to the head of the DataList linked list + template + Context(const T &keys_and_values) noexcept + : head_{nostd::shared_ptr{new DataList(keys_and_values)}} + {} + + // Creates a context object from a key and value, this will + // hold a shared_ptr to the head of the DataList linked list + Context(nostd::string_view key, ContextValue value) noexcept + : head_{nostd::shared_ptr{new DataList(key, value)}} + {} + + // Accepts a new iterable and then returns a new context that + // contains the new key and value data. It attaches the + // exisiting list to the end of the new list. + template + Context SetValues(T &values) noexcept + { + Context context = Context(values); + nostd::shared_ptr last = context.head_; + while (last->next_ != nullptr) + { + last = last->next_; + } + last->next_ = head_; + return context; + } + + // Accepts a new iterable and then returns a new context that + // contains the new key and value data. It attaches the + // exisiting list to the end of the new list. + Context SetValue(nostd::string_view key, ContextValue value) noexcept + { + Context context = Context(key, value); + context.head_->next_ = head_; + return context; + } + + // Returns the value associated with the passed in key. + context::ContextValue GetValue(const nostd::string_view key) const noexcept + { + for (DataList *data = head_.get(); data != nullptr; data = data->next_.get()) + { + if (key.size() == data->key_length_) + { + if (std::memcmp(key.data(), data->key_, data->key_length_) == 0) + { + return data->value_; + } + } + } + return ContextValue{}; + } + + // Checks for key and returns true if found + bool HasKey(const nostd::string_view key) const noexcept + { + return !nostd::holds_alternative(GetValue(key)); + } + + bool operator==(const Context &other) const noexcept { return (head_ == other.head_); } + +private: + // A linked list to contain the keys and values of this context node + struct DataList + { + char *key_ = nullptr; + + nostd::shared_ptr next_{nullptr}; + + size_t key_length_ = 0UL; + + ContextValue value_; + + DataList() = default; + + // Builds a data list off of a key and value iterable and returns the head + template + DataList(const T &keys_and_vals) + { + bool first = true; + auto *node = this; + for (auto &iter : keys_and_vals) + { + if (first) + { + *node = DataList(iter.first, iter.second); + first = false; + } + else + { + node->next_ = nostd::shared_ptr(new DataList(iter.first, iter.second)); + node = node->next_.get(); + } + } + } + + // Builds a data list with just a key and value, so it will just be the head + // and returns that head. + DataList(nostd::string_view key, const ContextValue &value) + { + key_ = new char[key.size()]; + key_length_ = key.size(); + std::memcpy(key_, key.data(), key.size() * sizeof(char)); + next_ = nostd::shared_ptr{nullptr}; + value_ = value; + } + + DataList(const DataList &other) + : key_(new char[other.key_length_]), + next_(other.next_), + key_length_(other.key_length_), + value_(other.value_) + { + std::memcpy(key_, other.key_, other.key_length_ * sizeof(char)); + } + + DataList &operator=(DataList &&other) noexcept + { + key_length_ = other.key_length_; + value_ = std::move(other.value_); + next_ = std::move(other.next_); + + key_ = other.key_; + other.key_ = nullptr; + + return *this; + } + + ~DataList() + { + if (key_ != nullptr) + { + delete[] key_; + } + } + }; + + // Head of the list which holds the keys and values of this context + nostd::shared_ptr head_; +}; +} // namespace context +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/context_value.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/context_value.h new file mode 100644 index 000000000..27bd40fb0 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/context_value.h @@ -0,0 +1,35 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/nostd/variant.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace baggage +{ +class Baggage; +} // namespace baggage + +namespace trace +{ +class Span; +class SpanContext; +} // namespace trace + +namespace context +{ +using ContextValue = nostd::variant, + nostd::shared_ptr, + nostd::shared_ptr>; +} // namespace context +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/composite_propagator.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/composite_propagator.h new file mode 100644 index 000000000..f7afe23bf --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/composite_propagator.h @@ -0,0 +1,93 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include + +#include "opentelemetry/context/propagation/text_map_propagator.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace context +{ +namespace propagation +{ + +class CompositePropagator : public TextMapPropagator +{ +public: + CompositePropagator(std::vector> propagators) + : propagators_(std::move(propagators)) + {} + + /** + * Run each of the configured propagators with the given context and carrier. + * Propagators are run in the order they are configured, so if multiple + * propagators write the same carrier key, the propagator later in the list + * will "win". + * + * @param carrier Carrier into which context will be injected + * @param context Context to inject + * + */ + + void Inject(TextMapCarrier &carrier, const context::Context &context) noexcept override + { + for (auto &p : propagators_) + { + p->Inject(carrier, context); + } + } + + /** + * Run each of the configured propagators with the given context and carrier. + * Propagators are run in the order they are configured, so if multiple + * propagators write the same context key, the propagator later in the list + * will "win". + * + * @param carrier Carrier from which to extract context + * @param context Context to add values to + */ + context::Context Extract(const TextMapCarrier &carrier, + context::Context &context) noexcept override + { + auto first = true; + context::Context tmp_context; + for (auto &p : propagators_) + { + if (first) + { + tmp_context = p->Extract(carrier, context); + first = false; + } + else + { + tmp_context = p->Extract(carrier, tmp_context); + } + } + return propagators_.size() ? tmp_context : context; + } + + /** + * Invoke callback with fields set to carrier by `inject` method for all the + * configured propagators + * Returns true if all invocation return true + */ + bool Fields(nostd::function_ref callback) const noexcept override + { + bool status = true; + for (auto &p : propagators_) + { + status = status && p->Fields(callback); + } + return status; + } + +private: + std::vector> propagators_; +}; +} // namespace propagation +} // namespace context +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/global_propagator.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/global_propagator.h new file mode 100644 index 000000000..85293202f --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/global_propagator.h @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/context/propagation/noop_propagator.h" + +#include "opentelemetry/common/macros.h" +#include "opentelemetry/common/spin_lock_mutex.h" +#include "opentelemetry/nostd/shared_ptr.h" + +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace context +{ +namespace propagation +{ + +class TextMapPropagator; + +/* Stores the singleton TextMapPropagator */ + +class OPENTELEMETRY_EXPORT GlobalTextMapPropagator +{ +public: + static nostd::shared_ptr GetGlobalPropagator() noexcept + { + std::lock_guard guard(GetLock()); + return nostd::shared_ptr(GetPropagator()); + } + + static void SetGlobalPropagator(const nostd::shared_ptr &prop) noexcept + { + std::lock_guard guard(GetLock()); + GetPropagator() = prop; + } + +private: + OPENTELEMETRY_API_SINGLETON static nostd::shared_ptr &GetPropagator() noexcept + { + static nostd::shared_ptr propagator(new NoOpPropagator()); + return propagator; + } + + OPENTELEMETRY_API_SINGLETON static common::SpinLockMutex &GetLock() noexcept + { + static common::SpinLockMutex lock; + return lock; + } +}; + +} // namespace propagation +} // namespace context +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/noop_propagator.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/noop_propagator.h new file mode 100644 index 000000000..4e1b30c17 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/noop_propagator.h @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/context/propagation/text_map_propagator.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace context +{ +namespace propagation +{ + +/** + * No-op implementation TextMapPropagator + */ +class NoOpPropagator : public TextMapPropagator +{ +public: + /** Noop extract function does nothing and returns the input context */ + context::Context Extract(const TextMapCarrier & /*carrier*/, + context::Context &context) noexcept override + { + return context; + } + + /** Noop inject function does nothing */ + void Inject(TextMapCarrier & /*carrier*/, + const context::Context & /* context */) noexcept override + {} + + bool Fields(nostd::function_ref /* callback */) const noexcept override + { + return true; + } +}; +} // namespace propagation +} // namespace context +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/text_map_propagator.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/text_map_propagator.h new file mode 100644 index 000000000..21bc29361 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/propagation/text_map_propagator.h @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/context/context.h" +#include "opentelemetry/nostd/function_ref.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace context +{ +namespace propagation +{ + +// TextMapCarrier is the storage medium used by TextMapPropagator. +class TextMapCarrier +{ +public: + // returns the value associated with the passed key. + virtual nostd::string_view Get(nostd::string_view key) const noexcept = 0; + + // stores the key-value pair. + virtual void Set(nostd::string_view key, nostd::string_view value) noexcept = 0; + + /* list of all the keys in the carrier. + By default, it returns true without invoking callback */ + virtual bool Keys(nostd::function_ref /* callback */) const noexcept + { + return true; + } + virtual ~TextMapCarrier() = default; +}; + +// The TextMapPropagator class provides an interface that enables extracting and injecting +// context into carriers that travel in-band across process boundaries. HTTP frameworks and clients +// can integrate with TextMapPropagator by providing the object containing the +// headers, and a getter and setter function for the extraction and +// injection of values, respectively. + +class TextMapPropagator +{ +public: + // Returns the context that is stored in the carrier with the TextMapCarrier as extractor. + virtual context::Context Extract(const TextMapCarrier &carrier, + context::Context &context) noexcept = 0; + + // Sets the context for carrier with self defined rules. + virtual void Inject(TextMapCarrier &carrier, const context::Context &context) noexcept = 0; + + // Gets the fields set in the carrier by the `inject` method + virtual bool Fields(nostd::function_ref callback) const noexcept = 0; + + virtual ~TextMapPropagator() = default; +}; +} // namespace propagation +} // namespace context +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/runtime_context.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/runtime_context.h new file mode 100644 index 000000000..e3f88254c --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/context/runtime_context.h @@ -0,0 +1,340 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include + +#include "opentelemetry/common/macros.h" +#include "opentelemetry/context/context.h" +#include "opentelemetry/context/context_value.h" +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/unique_ptr.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace context +{ +// The Token object provides is returned when attaching objects to the +// RuntimeContext object and is associated with a context object, and +// can be provided to the RuntimeContext Detach method to remove the +// associated context from the RuntimeContext. +class Token +{ +public: + bool operator==(const Context &other) const noexcept { return context_ == other; } + + ~Token() noexcept; + +private: + friend class RuntimeContextStorage; + + // A constructor that sets the token's Context object to the + // one that was passed in. + Token(const Context &context) : context_(context) {} + + const Context context_; +}; + +/** + * RuntimeContextStorage is used by RuntimeContext to store Context frames. + * + * Custom context management strategies can be implemented by deriving from + * this class and passing an initialized RuntimeContextStorage object to + * RuntimeContext::SetRuntimeContextStorage. + */ +class OPENTELEMETRY_EXPORT RuntimeContextStorage +{ +public: + /** + * Return the current context. + * @return the current context + */ + virtual Context GetCurrent() noexcept = 0; + + /** + * Set the current context. + * @param the new current context + * @return a token for the new current context. This never returns a nullptr. + */ + virtual nostd::unique_ptr Attach(const Context &context) noexcept = 0; + + /** + * Detach the context related to the given token. + * @param token a token related to a context + * @return true if the context could be detached + */ + virtual bool Detach(Token &token) noexcept = 0; + + virtual ~RuntimeContextStorage() {} + +protected: + nostd::unique_ptr CreateToken(const Context &context) noexcept + { + return nostd::unique_ptr(new Token(context)); + } +}; + +/** + * Construct and return the default RuntimeContextStorage + * @return a ThreadLocalContextStorage + */ +static RuntimeContextStorage *GetDefaultStorage() noexcept; + +// Provides a wrapper for propagating the context object globally. +// +// By default, a thread-local runtime context storage is used. +class OPENTELEMETRY_EXPORT RuntimeContext +{ +public: + // Return the current context. + static Context GetCurrent() noexcept { return GetRuntimeContextStorage()->GetCurrent(); } + + // Sets the current 'Context' object. Returns a token + // that can be used to reset to the previous Context. + static nostd::unique_ptr Attach(const Context &context) noexcept + { + return GetRuntimeContextStorage()->Attach(context); + } + + // Resets the context to a previous value stored in the + // passed in token. Returns true if successful, false otherwise + static bool Detach(Token &token) noexcept { return GetRuntimeContextStorage()->Detach(token); } + + // Sets the Key and Value into the passed in context or if a context is not + // passed in, the RuntimeContext. + // Should be used to SetValues to the current RuntimeContext, is essentially + // equivalent to RuntimeContext::GetCurrent().SetValue(key,value). Keep in + // mind that the current RuntimeContext will not be changed, and the new + // context will be returned. + static Context SetValue(nostd::string_view key, + const ContextValue &value, + Context *context = nullptr) noexcept + { + Context temp_context; + if (context == nullptr) + { + temp_context = GetCurrent(); + } + else + { + temp_context = *context; + } + return temp_context.SetValue(key, value); + } + + // Returns the value associated with the passed in key and either the + // passed in context* or the runtime context if a context is not passed in. + // Should be used to get values from the current RuntimeContext, is + // essentially equivalent to RuntimeContext::GetCurrent().GetValue(key). + static ContextValue GetValue(nostd::string_view key, Context *context = nullptr) noexcept + { + Context temp_context; + if (context == nullptr) + { + temp_context = GetCurrent(); + } + else + { + temp_context = *context; + } + return temp_context.GetValue(key); + } + + /** + * Provide a custom runtime context storage. + * + * This provides a possibility to override the default thread-local runtime + * context storage. This has to be set before any spans are created by the + * application, otherwise the behavior is undefined. + * + * @param storage a custom runtime context storage + */ + static void SetRuntimeContextStorage( + const nostd::shared_ptr &storage) noexcept + { + GetStorage() = storage; + } + + /** + * Provide a pointer to const runtime context storage. + * + * The returned pointer can only be used for extending the lifetime of the runtime context + * storage. + * + */ + static nostd::shared_ptr GetConstRuntimeContextStorage() noexcept + { + return GetRuntimeContextStorage(); + } + +private: + static nostd::shared_ptr GetRuntimeContextStorage() noexcept + { + return GetStorage(); + } + + OPENTELEMETRY_API_SINGLETON static nostd::shared_ptr &GetStorage() noexcept + { + static nostd::shared_ptr context(GetDefaultStorage()); + return context; + } +}; + +inline Token::~Token() noexcept +{ + context::RuntimeContext::Detach(*this); +} + +// The ThreadLocalContextStorage class is a derived class from +// RuntimeContextStorage and provides a wrapper for propagating context through +// cpp thread locally. This file must be included to use the RuntimeContext +// class if another implementation has not been registered. +class ThreadLocalContextStorage : public RuntimeContextStorage +{ +public: + ThreadLocalContextStorage() noexcept = default; + + // Return the current context. + Context GetCurrent() noexcept override { return GetStack().Top(); } + + // Resets the context to the value previous to the passed in token. This will + // also detach all child contexts of the passed in token. + // Returns true if successful, false otherwise. + bool Detach(Token &token) noexcept override + { + // In most cases, the context to be detached is on the top of the stack. + if (token == GetStack().Top()) + { + GetStack().Pop(); + return true; + } + + if (!GetStack().Contains(token)) + { + return false; + } + + while (!(token == GetStack().Top())) + { + GetStack().Pop(); + } + + GetStack().Pop(); + + return true; + } + + // Sets the current 'Context' object. Returns a token + // that can be used to reset to the previous Context. + nostd::unique_ptr Attach(const Context &context) noexcept override + { + GetStack().Push(context); + return CreateToken(context); + } + +private: + // A nested class to store the attached contexts in a stack. + class Stack + { + friend class ThreadLocalContextStorage; + + Stack() noexcept : size_(0), capacity_(0), base_(nullptr) {} + + // Pops the top Context off the stack. + void Pop() noexcept + { + if (size_ == 0) + { + return; + } + // Store empty Context before decrementing `size`, to ensure + // the shared_ptr object (if stored in prev context object ) are released. + // The stack is not resized, and the unused memory would be reutilised + // for subsequent context storage. + base_[size_ - 1] = Context(); + size_ -= 1; + } + + bool Contains(const Token &token) const noexcept + { + for (size_t pos = size_; pos > 0; --pos) + { + if (token == base_[pos - 1]) + { + return true; + } + } + + return false; + } + + // Returns the Context at the top of the stack. + Context Top() const noexcept + { + if (size_ == 0) + { + return Context(); + } + return base_[size_ - 1]; + } + + // Pushes the passed in context pointer to the top of the stack + // and resizes if necessary. + void Push(const Context &context) noexcept + { + size_++; + if (size_ > capacity_) + { + Resize(size_ * 2); + } + base_[size_ - 1] = context; + } + + // Reallocates the storage array to the pass in new capacity size. + void Resize(size_t new_capacity) noexcept + { + size_t old_size = size_ - 1; + if (new_capacity == 0) + { + new_capacity = 2; + } + Context *temp = new Context[new_capacity]; + if (base_ != nullptr) + { + // vs2015 does not like this construct considering it unsafe: + // - std::copy(base_, base_ + old_size, temp); + // Ref. + // https://stackoverflow.com/questions/12270224/xutility2227-warning-c4996-std-copy-impl + for (size_t i = 0; i < (std::min)(old_size, new_capacity); i++) + { + temp[i] = base_[i]; + } + delete[] base_; + } + base_ = temp; + capacity_ = new_capacity; + } + + ~Stack() noexcept { delete[] base_; } + + size_t size_; + size_t capacity_; + Context *base_; + }; + + OPENTELEMETRY_API_SINGLETON Stack &GetStack() + { + static thread_local Stack stack_ = Stack(); + return stack_; + } +}; + +static RuntimeContextStorage *GetDefaultStorage() noexcept +{ + return new ThreadLocalContextStorage(); +} +} // namespace context +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/detail/preprocessor.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/detail/preprocessor.h new file mode 100644 index 000000000..acdf02eb0 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/detail/preprocessor.h @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// NOTE - code within detail namespace implements internal details, and not part +// of the public interface. + +#pragma once + +#define OPENTELEMETRY_STRINGIFY(S) OPENTELEMETRY_STRINGIFY_(S) +#define OPENTELEMETRY_STRINGIFY_(S) #S + +#define OPENTELEMETRY_CONCAT(A, B) OPENTELEMETRY_CONCAT_(A, B) +#define OPENTELEMETRY_CONCAT_(A, B) A##B + +// Import the C++20 feature-test macros +#ifdef __has_include +# if __has_include() +# include +# endif +#elif defined(_MSC_VER) && ((defined(__cplusplus) && __cplusplus >= 202002L) || \ + (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)) +# if _MSC_VER >= 1922 +# include +# endif +#endif diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/event_id.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/event_id.h new file mode 100644 index 000000000..65306ef79 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/event_id.h @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/unique_ptr.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace logs +{ + +/** + * EventId class which acts the Id of the event with an optional name. + */ +class EventId +{ +public: + EventId(int64_t id, nostd::string_view name) noexcept + : id_{id}, name_{nostd::unique_ptr{new char[name.length() + 1]}} + { + std::copy(name.begin(), name.end(), name_.get()); + name_.get()[name.length()] = 0; + } + + EventId(int64_t id) noexcept : id_{id}, name_{nullptr} {} + +public: + int64_t id_; + nostd::unique_ptr name_; +}; + +} // namespace logs +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/event_logger.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/event_logger.h new file mode 100644 index 000000000..bcc103562 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/event_logger.h @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/logs/log_record.h" +#include "opentelemetry/logs/logger.h" +#include "opentelemetry/logs/logger_type_traits.h" +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/unique_ptr.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace logs +{ +#if OPENTELEMETRY_ABI_VERSION_NO < 2 +/** + * Handles event log record creation. + **/ +class OPENTELEMETRY_DEPRECATED EventLogger +{ +public: + virtual ~EventLogger() = default; + + /* Returns the name of the logger */ + virtual const nostd::string_view GetName() noexcept = 0; + + /* Returns the delegate logger of this event logger */ + virtual nostd::shared_ptr GetDelegateLogger() noexcept = 0; + + /** + * Emit a event Log Record object + * + * @param event_name Event name + * @param log_record Log record + */ + virtual void EmitEvent(nostd::string_view event_name, + nostd::unique_ptr &&log_record) noexcept = 0; + + /** + * Emit a event Log Record object with arguments + * + * @param event_name Event name + * @tparam args Arguments which can be used to set data of log record by type. + * Severity -> severity, severity_text + * string_view -> body + * AttributeValue -> body + * SpanContext -> span_id,tace_id and trace_flags + * SpanId -> span_id + * TraceId -> tace_id + * TraceFlags -> trace_flags + * SystemTimestamp -> timestamp + * system_clock::time_point -> timestamp + * KeyValueIterable -> attributes + * Key value iterable container -> attributes + * span> -> attributes(return type of MakeAttributes) + */ + template + void EmitEvent(nostd::string_view event_name, ArgumentType &&...args) + { + nostd::shared_ptr delegate_logger = GetDelegateLogger(); + if (!delegate_logger) + { + return; + } + nostd::unique_ptr log_record = delegate_logger->CreateLogRecord(); + + IgnoreTraitResult(detail::LogRecordSetterTrait::type>::Set( + log_record.get(), std::forward(args))...); + + EmitEvent(event_name, std::move(log_record)); + } + +private: + template + void IgnoreTraitResult(ValueType &&...) + {} +}; +#endif +} // namespace logs +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/event_logger_provider.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/event_logger_provider.h new file mode 100644 index 000000000..9eea0ca23 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/event_logger_provider.h @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace logs +{ + +class EventLogger; +class Logger; + +#if OPENTELEMETRY_ABI_VERSION_NO < 2 +/** + * Creates new EventLogger instances. + */ +class OPENTELEMETRY_DEPRECATED EventLoggerProvider +{ +public: + virtual ~EventLoggerProvider() = default; + + /** + * Creates a named EventLogger instance. + * + */ + + virtual nostd::shared_ptr CreateEventLogger( + nostd::shared_ptr delegate_logger, + nostd::string_view event_domain) noexcept = 0; +}; +#endif +} // namespace logs +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/log_record.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/log_record.h new file mode 100644 index 000000000..81dec568e --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/log_record.h @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/common/attribute_value.h" +#include "opentelemetry/common/timestamp.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace trace +{ +class SpanId; +class TraceId; +class TraceFlags; +} // namespace trace + +namespace logs +{ + +enum class Severity : uint8_t; + +/** + * Maintains a representation of a log in a format that can be processed by a recorder. + * + * This class is thread-compatible. + */ +class LogRecord +{ +public: + virtual ~LogRecord() = default; + + /** + * Set the timestamp for this log. + * @param timestamp the timestamp to set + */ + virtual void SetTimestamp(common::SystemTimestamp timestamp) noexcept = 0; + + /** + * Set the observed timestamp for this log. + * @param timestamp the timestamp to set + */ + virtual void SetObservedTimestamp(common::SystemTimestamp timestamp) noexcept = 0; + + /** + * Set the severity for this log. + * @param severity the severity of the event + */ + virtual void SetSeverity(logs::Severity severity) noexcept = 0; + + /** + * Set body field for this log. + * @param message the body to set + */ + virtual void SetBody(const common::AttributeValue &message) noexcept = 0; + + /** + * Set an attribute of a log. + * @param key the name of the attribute + * @param value the attribute value + */ + virtual void SetAttribute(nostd::string_view key, + const common::AttributeValue &value) noexcept = 0; + + /** + * Set the Event Id. + * @param id The event id to set + * @param name Optional event name to set + */ + // TODO: mark this as pure virtual once all exporters have been updated + virtual void SetEventId(int64_t id, nostd::string_view name = {}) noexcept = 0; + + /** + * Set the trace id for this log. + * @param trace_id the trace id to set + */ + virtual void SetTraceId(const trace::TraceId &trace_id) noexcept = 0; + + /** + * Set the span id for this log. + * @param span_id the span id to set + */ + virtual void SetSpanId(const trace::SpanId &span_id) noexcept = 0; + + /** + * Inject trace_flags for this log. + * @param trace_flags the trace flags to set + */ + virtual void SetTraceFlags(const trace::TraceFlags &trace_flags) noexcept = 0; +}; +} // namespace logs +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/logger.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/logger.h new file mode 100644 index 000000000..52f403b0a --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/logger.h @@ -0,0 +1,492 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/logs/logger_type_traits.h" +#include "opentelemetry/logs/severity.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/unique_ptr.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace common +{ +class KeyValueIterable; +} // namespace common + +namespace logs +{ + +class EventId; +class LogRecord; + +/** + * Handles log record creation. + **/ +class Logger +{ +public: + virtual ~Logger() = default; + + /* Returns the name of the logger */ + virtual const nostd::string_view GetName() noexcept = 0; + + /** + * Create a Log Record object + * + * @return nostd::unique_ptr + */ + virtual nostd::unique_ptr CreateLogRecord() noexcept = 0; + + /** + * Emit a Log Record object + * + * @param log_record Log record + */ + virtual void EmitLogRecord(nostd::unique_ptr &&log_record) noexcept = 0; + + /** + * Emit a Log Record object with arguments + * + * @param log_record Log record + * @param args Arguments which can be used to set data of log record by type. + * Severity -> severity, severity_text + * string_view -> body + * AttributeValue -> body + * SpanContext -> span_id,trace_id and trace_flags + * SpanId -> span_id + * TraceId -> trace_id + * TraceFlags -> trace_flags + * SystemTimestamp -> timestamp + * system_clock::time_point -> timestamp + * KeyValueIterable -> attributes + * Key value iterable container -> attributes + * span> -> attributes(return type of MakeAttributes) + */ + template + void EmitLogRecord(nostd::unique_ptr &&log_record, ArgumentType &&...args) + { + if (!log_record) + { + return; + } + + // + // Keep the parameter pack unpacking order from left to right because left + // ones are usually more important like severity and event_id than the + // attributes. The left to right unpack order could pass the more important + // data to processors to avoid caching and memory allocating. + // +#if __cplusplus <= 201402L + // C++14 does not support fold expressions for parameter pack expansion. + int dummy[] = {(detail::LogRecordSetterTrait::type>::Set( + log_record.get(), std::forward(args)), + 0)...}; + IgnoreTraitResult(dummy); +#else + IgnoreTraitResult((detail::LogRecordSetterTrait::type>::Set( + log_record.get(), std::forward(args)), + ...)); +#endif + + EmitLogRecord(std::move(log_record)); + } + + /** + * Emit a Log Record object with arguments + * + * @param args Arguments which can be used to set data of log record by type. + * Severity -> severity, severity_text + * string_view -> body + * AttributeValue -> body + * SpanContext -> span_id,trace_id and trace_flags + * SpanId -> span_id + * TraceId -> trace_id + * TraceFlags -> trace_flags + * SystemTimestamp -> timestamp + * system_clock::time_point -> timestamp + * KeyValueIterable -> attributes + * Key value iterable container -> attributes + * span> -> attributes(return type of MakeAttributes) + */ + template + void EmitLogRecord(ArgumentType &&...args) + { + nostd::unique_ptr log_record = CreateLogRecord(); + + EmitLogRecord(std::move(log_record), std::forward(args)...); + } + + /** + * Writes a log with a severity of trace. + * @param args Arguments which can be used to set data of log record by type. + * string_view -> body + * AttributeValue -> body + * SpanContext -> span_id,trace_id and trace_flags + * SpanId -> span_id + * TraceId -> trace_id + * TraceFlags -> trace_flags + * SystemTimestamp -> timestamp + * system_clock::time_point -> timestamp + * KeyValueIterable -> attributes + * Key value iterable container -> attributes + * span> -> attributes(return type of MakeAttributes) + */ + template + void Trace(ArgumentType &&...args) noexcept + { + static_assert( + !detail::LogRecordHasType::type...>::value, + "Severity is already set."); + this->EmitLogRecord(Severity::kTrace, std::forward(args)...); + } + + /** + * Writes a log with a severity of debug. + * @param args Arguments which can be used to set data of log record by type. + * string_view -> body + * AttributeValue -> body + * SpanContext -> span_id,trace_id and trace_flags + * SpanId -> span_id + * TraceId -> trace_id + * TraceFlags -> trace_flags + * SystemTimestamp -> timestamp + * system_clock::time_point -> timestamp + * KeyValueIterable -> attributes + * Key value iterable container -> attributes + * span> -> attributes(return type of MakeAttributes) + */ + template + void Debug(ArgumentType &&...args) noexcept + { + static_assert( + !detail::LogRecordHasType::type...>::value, + "Severity is already set."); + this->EmitLogRecord(Severity::kDebug, std::forward(args)...); + } + + /** + * Writes a log with a severity of info. + * @param args Arguments which can be used to set data of log record by type. + * string_view -> body + * AttributeValue -> body + * SpanContext -> span_id,trace_id and trace_flags + * SpanId -> span_id + * TraceId -> trace_id + * TraceFlags -> trace_flags + * SystemTimestamp -> timestamp + * system_clock::time_point -> timestamp + * KeyValueIterable -> attributes + * Key value iterable container -> attributes + * span> -> attributes(return type of MakeAttributes) + */ + template + void Info(ArgumentType &&...args) noexcept + { + static_assert( + !detail::LogRecordHasType::type...>::value, + "Severity is already set."); + this->EmitLogRecord(Severity::kInfo, std::forward(args)...); + } + + /** + * Writes a log with a severity of warn. + * @param args Arguments which can be used to set data of log record by type. + * string_view -> body + * AttributeValue -> body + * SpanContext -> span_id,trace_id and trace_flags + * SpanId -> span_id + * TraceId -> trace_id + * TraceFlags -> trace_flags + * SystemTimestamp -> timestamp + * system_clock::time_point -> timestamp + * KeyValueIterable -> attributes + * Key value iterable container -> attributes + * span> -> attributes(return type of MakeAttributes) + */ + template + void Warn(ArgumentType &&...args) noexcept + { + static_assert( + !detail::LogRecordHasType::type...>::value, + "Severity is already set."); + this->EmitLogRecord(Severity::kWarn, std::forward(args)...); + } + + /** + * Writes a log with a severity of error. + * @param args Arguments which can be used to set data of log record by type. + * string_view -> body + * AttributeValue -> body + * SpanContext -> span_id,trace_id and trace_flags + * SpanId -> span_id + * TraceId -> trace_id + * TraceFlags -> trace_flags + * SystemTimestamp -> timestamp + * system_clock::time_point -> timestamp + * KeyValueIterable -> attributes + * Key value iterable container -> attributes + * span> -> attributes(return type of MakeAttributes) + */ + template + void Error(ArgumentType &&...args) noexcept + { + static_assert( + !detail::LogRecordHasType::type...>::value, + "Severity is already set."); + this->EmitLogRecord(Severity::kError, std::forward(args)...); + } + + /** + * Writes a log with a severity of fatal. + * @param args Arguments which can be used to set data of log record by type. + * string_view -> body + * AttributeValue -> body + * SpanContext -> span_id,trace_id and trace_flags + * SpanId -> span_id + * TraceId -> trace_id + * TraceFlags -> trace_flags + * SystemTimestamp -> timestamp + * system_clock::time_point -> timestamp + * KeyValueIterable -> attributes + * Key value iterable container -> attributes + * span> -> attributes(return type of MakeAttributes) + */ + template + void Fatal(ArgumentType &&...args) noexcept + { + static_assert( + !detail::LogRecordHasType::type...>::value, + "Severity is already set."); + this->EmitLogRecord(Severity::kFatal, std::forward(args)...); + } + + // + // OpenTelemetry C++ user-facing Logs API + // + + inline bool Enabled(Severity severity, const EventId &event_id) const noexcept + { + if OPENTELEMETRY_LIKELY_CONDITION (!Enabled(severity)) + { + return false; + } + return EnabledImplementation(severity, event_id); + } + + inline bool Enabled(Severity severity, int64_t event_id) const noexcept + { + if OPENTELEMETRY_LIKELY_CONDITION (!Enabled(severity)) + { + return false; + } + return EnabledImplementation(severity, event_id); + } + + inline bool Enabled(Severity severity) const noexcept + { + return static_cast(severity) >= OPENTELEMETRY_ATOMIC_READ_8(&minimum_severity_); + } + + /** + * Log an event + * + * @severity severity of the log + * @event_id event identifier of the log + * @format an utf-8 string following https://messagetemplates.org/ + * @attributes key value pairs of the log + */ + virtual void Log(Severity severity, + const EventId &event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->EmitLogRecord(severity, event_id, format, attributes); + } + + virtual void Log(Severity severity, + int64_t event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->EmitLogRecord(severity, EventId{event_id}, format, attributes); + } + + virtual void Log(Severity severity, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->EmitLogRecord(severity, format, attributes); + } + + virtual void Log(Severity severity, nostd::string_view message) noexcept + { + this->EmitLogRecord(severity, message); + } + + // Convenient wrappers based on virtual methods Log(). + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber + + inline void Trace(const EventId &event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kTrace, event_id, format, attributes); + } + + inline void Trace(int64_t event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kTrace, EventId{event_id}, format, attributes); + } + + inline void Trace(nostd::string_view format, const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kTrace, format, attributes); + } + + inline void Trace(nostd::string_view message) noexcept { this->Log(Severity::kTrace, message); } + + inline void Debug(const EventId &event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kDebug, event_id, format, attributes); + } + + inline void Debug(int64_t event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kDebug, EventId{event_id}, format, attributes); + } + + inline void Debug(nostd::string_view format, const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kDebug, format, attributes); + } + + inline void Debug(nostd::string_view message) noexcept { this->Log(Severity::kDebug, message); } + + inline void Info(const EventId &event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kInfo, event_id, format, attributes); + } + + inline void Info(int64_t event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kInfo, EventId{event_id}, format, attributes); + } + + inline void Info(nostd::string_view format, const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kInfo, format, attributes); + } + + inline void Info(nostd::string_view message) noexcept { this->Log(Severity::kInfo, message); } + + inline void Warn(const EventId &event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kWarn, event_id, format, attributes); + } + + inline void Warn(int64_t event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kWarn, EventId{event_id}, format, attributes); + } + + inline void Warn(nostd::string_view format, const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kWarn, format, attributes); + } + + inline void Warn(nostd::string_view message) noexcept { this->Log(Severity::kWarn, message); } + + inline void Error(const EventId &event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kError, event_id, format, attributes); + } + + inline void Error(int64_t event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kError, EventId{event_id}, format, attributes); + } + + inline void Error(nostd::string_view format, const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kError, format, attributes); + } + + inline void Error(nostd::string_view message) noexcept { this->Log(Severity::kError, message); } + + inline void Fatal(const EventId &event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kFatal, event_id, format, attributes); + } + + inline void Fatal(int64_t event_id, + nostd::string_view format, + const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kFatal, EventId{event_id}, format, attributes); + } + + inline void Fatal(nostd::string_view format, const common::KeyValueIterable &attributes) noexcept + { + this->Log(Severity::kFatal, format, attributes); + } + + inline void Fatal(nostd::string_view message) noexcept { this->Log(Severity::kFatal, message); } + + // + // End of OpenTelemetry C++ user-facing Log API. + // + +protected: + // TODO: discuss with community about naming for internal methods. + virtual bool EnabledImplementation(Severity /*severity*/, + const EventId & /*event_id*/) const noexcept + { + return false; + } + + virtual bool EnabledImplementation(Severity /*severity*/, int64_t /*event_id*/) const noexcept + { + return false; + } + + void SetMinimumSeverity(uint8_t severity_or_max) noexcept + { + OPENTELEMETRY_ATOMIC_WRITE_8(&minimum_severity_, severity_or_max); + } + +private: + template + void IgnoreTraitResult(ValueType &&...) + {} + + // + // minimum_severity_ can be updated concurrently by multiple threads/cores, so race condition on + // read/write should be handled. And std::atomic can not be used here because it is not ABI + // compatible for OpenTelemetry C++ API. + // + mutable uint8_t minimum_severity_{kMaxSeverity}; +}; +} // namespace logs +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/logger_provider.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/logger_provider.h new file mode 100644 index 000000000..ab1493c5c --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/logger_provider.h @@ -0,0 +1,71 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/common/key_value_iterable.h" +#include "opentelemetry/common/key_value_iterable_view.h" +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/nostd/span.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/type_traits.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace logs +{ + +class Logger; + +/** + * Creates new Logger instances. + */ +class OPENTELEMETRY_EXPORT LoggerProvider +{ +public: + virtual ~LoggerProvider() = default; + + /** + * Gets or creates a named Logger instance. + * + * Optionally a version can be passed to create a named and versioned Logger + * instance. + * + * Optionally a configuration file name can be passed to create a configuration for + * the Logger instance. + * + */ + + virtual nostd::shared_ptr GetLogger( + nostd::string_view logger_name, + nostd::string_view library_name = "", + nostd::string_view library_version = "", + nostd::string_view schema_url = "", + const common::KeyValueIterable &attributes = common::NoopKeyValueIterable()) = 0; + + nostd::shared_ptr GetLogger( + nostd::string_view logger_name, + nostd::string_view library_name, + nostd::string_view library_version, + nostd::string_view schema_url, + std::initializer_list> attributes) + { + return GetLogger(logger_name, library_name, library_version, schema_url, + nostd::span>{ + attributes.begin(), attributes.end()}); + } + + template ::value> * = nullptr> + nostd::shared_ptr GetLogger(nostd::string_view logger_name, + nostd::string_view library_name, + nostd::string_view library_version, + nostd::string_view schema_url, + const T &attributes) + { + return GetLogger(logger_name, library_name, library_version, schema_url, + common::KeyValueIterableView(attributes)); + } +}; +} // namespace logs +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/logger_type_traits.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/logger_type_traits.h new file mode 100644 index 000000000..d88a6ffbd --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/logger_type_traits.h @@ -0,0 +1,204 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include + +#include "opentelemetry/common/attribute_value.h" +#include "opentelemetry/common/key_value_iterable.h" +#include "opentelemetry/common/timestamp.h" +#include "opentelemetry/logs/event_id.h" +#include "opentelemetry/logs/log_record.h" +#include "opentelemetry/logs/severity.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/type_traits.h" +#include "opentelemetry/trace/span_context.h" +#include "opentelemetry/trace/span_id.h" +#include "opentelemetry/trace/trace_flags.h" +#include "opentelemetry/trace/trace_id.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace logs +{ +namespace detail +{ +template +struct LogRecordSetterTrait; + +template <> +struct LogRecordSetterTrait +{ + template + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + log_record->SetSeverity(std::forward(arg)); + + return log_record; + } +}; + +template <> +struct LogRecordSetterTrait +{ + template + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + log_record->SetEventId(arg.id_, nostd::string_view{arg.name_.get()}); + + return log_record; + } +}; + +template <> +struct LogRecordSetterTrait +{ + template + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + log_record->SetSpanId(arg.span_id()); + log_record->SetTraceId(arg.trace_id()); + log_record->SetTraceFlags(arg.trace_flags()); + + return log_record; + } +}; + +template <> +struct LogRecordSetterTrait +{ + template + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + log_record->SetSpanId(std::forward(arg)); + + return log_record; + } +}; + +template <> +struct LogRecordSetterTrait +{ + template + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + log_record->SetTraceId(std::forward(arg)); + + return log_record; + } +}; + +template <> +struct LogRecordSetterTrait +{ + template + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + log_record->SetTraceFlags(std::forward(arg)); + + return log_record; + } +}; + +template <> +struct LogRecordSetterTrait +{ + template + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + log_record->SetTimestamp(std::forward(arg)); + + return log_record; + } +}; + +template <> +struct LogRecordSetterTrait +{ + template + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + log_record->SetTimestamp(common::SystemTimestamp(std::forward(arg))); + + return log_record; + } +}; + +template <> +struct LogRecordSetterTrait +{ + template + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + arg.ForEachKeyValue( + [&log_record](nostd::string_view key, common::AttributeValue value) noexcept { + log_record->SetAttribute(key, value); + return true; + }); + + return log_record; + } +}; + +template +struct LogRecordSetterTrait +{ + static_assert(!std::is_same, ValueType>::value && + !std::is_same, ValueType>::value, + "unique_ptr is not allowed, please use std::move()"); + + template ::value || + std::is_convertible::value, + void> * = nullptr> + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + log_record->SetBody(std::forward(arg)); + + return log_record; + } + + template ::value, bool> + * = nullptr> + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + return LogRecordSetterTrait::Set(log_record, + std::forward(arg)); + } + + template ::value, int> * = + nullptr> + inline static LogRecord *Set(LogRecord *log_record, ArgumentType &&arg) noexcept + { + for (auto &argv : arg) + { + log_record->SetAttribute(argv.first, argv.second); + } + + return log_record; + } +}; + +template +struct LogRecordHasType; + +template +struct LogRecordHasType : public std::false_type +{}; + +template +struct LogRecordHasType + : public std::conditional::value, + std::true_type, + LogRecordHasType>::type +{}; + +} // namespace detail + +} // namespace logs +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/noop.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/noop.h new file mode 100644 index 000000000..24312f061 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/noop.h @@ -0,0 +1,131 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +// Please refer to provider.h for documentation on how to obtain a Logger object. +// +// This file is part of the internal implementation of OpenTelemetry. Nothing in this file should be +// used directly. Please refer to logger.h for documentation on these interfaces. + +#include "opentelemetry/logs/event_logger.h" +#include "opentelemetry/logs/event_logger_provider.h" +#include "opentelemetry/logs/logger.h" +#include "opentelemetry/logs/logger_provider.h" +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/unique_ptr.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace common +{ +class KeyValueIterable; +} // namespace common + +namespace logs +{ +/** + * No-op implementation of Logger. This class should not be used directly. It should only be + * instantiated using a LoggerProvider's GetLogger() call. + */ +class NoopLogger final : public Logger +{ +public: + const nostd::string_view GetName() noexcept override { return "noop logger"; } + + nostd::unique_ptr CreateLogRecord() noexcept override + { + /* + * Do not return memory shared between threads, + * a `new` + `delete` for each noop record can not be avoided, + * due to the semantic of unique_ptr. + */ + return nostd::unique_ptr(new NoopLogRecord()); + } + + using Logger::EmitLogRecord; + + void EmitLogRecord(nostd::unique_ptr &&) noexcept override {} + +private: + class NoopLogRecord : public LogRecord + { + public: + NoopLogRecord() = default; + ~NoopLogRecord() override = default; + + void SetTimestamp(common::SystemTimestamp /* timestamp */) noexcept override {} + void SetObservedTimestamp(common::SystemTimestamp /* timestamp */) noexcept override {} + void SetSeverity(logs::Severity /* severity */) noexcept override {} + void SetBody(const common::AttributeValue & /* message */) noexcept override {} + void SetAttribute(nostd::string_view /* key */, + const common::AttributeValue & /* value */) noexcept override + {} + void SetEventId(int64_t /* id */, nostd::string_view /* name */) noexcept override {} + void SetTraceId(const trace::TraceId & /* trace_id */) noexcept override {} + void SetSpanId(const trace::SpanId & /* span_id */) noexcept override {} + void SetTraceFlags(const trace::TraceFlags & /* trace_flags */) noexcept override {} + }; +}; + +/** + * No-op implementation of a LoggerProvider. + */ +class NoopLoggerProvider final : public LoggerProvider +{ +public: + NoopLoggerProvider() : logger_{nostd::shared_ptr(new NoopLogger())} {} + + nostd::shared_ptr GetLogger(nostd::string_view /* logger_name */, + nostd::string_view /* library_name */, + nostd::string_view /* library_version */, + nostd::string_view /* schema_url */, + const common::KeyValueIterable & /* attributes */) override + { + return logger_; + } + +private: + nostd::shared_ptr logger_; +}; + +#if OPENTELEMETRY_ABI_VERSION_NO < 2 +class NoopEventLogger final : public EventLogger +{ +public: + NoopEventLogger() : logger_{nostd::shared_ptr(new NoopLogger())} {} + + const nostd::string_view GetName() noexcept override { return "noop event logger"; } + + nostd::shared_ptr GetDelegateLogger() noexcept override { return logger_; } + + void EmitEvent(nostd::string_view, nostd::unique_ptr &&) noexcept override {} + +private: + nostd::shared_ptr logger_; +}; + +/** + * No-op implementation of a EventLoggerProvider. + */ +class NoopEventLoggerProvider final : public EventLoggerProvider +{ +public: + NoopEventLoggerProvider() : event_logger_{nostd::shared_ptr(new NoopEventLogger())} + {} + + nostd::shared_ptr CreateEventLogger( + nostd::shared_ptr /*delegate_logger*/, + nostd::string_view /*event_domain*/) noexcept override + { + return event_logger_; + } + +private: + nostd::shared_ptr event_logger_; +}; +#endif + +} // namespace logs +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/provider.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/provider.h new file mode 100644 index 000000000..0c1b4deea --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/provider.h @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/common/macros.h" +#include "opentelemetry/common/spin_lock_mutex.h" +#include "opentelemetry/logs/noop.h" +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace logs +{ + +#if OPENTELEMETRY_ABI_VERSION_NO < 2 +class EventLoggerProvider; +#endif +class LoggerProvider; + +/** + * Stores the singleton global LoggerProvider. + */ +class OPENTELEMETRY_EXPORT Provider +{ +public: + /** + * Returns the singleton LoggerProvider. + * + * By default, a no-op LoggerProvider is returned. This will never return a + * nullptr LoggerProvider. + */ + static nostd::shared_ptr GetLoggerProvider() noexcept + { + std::lock_guard guard(GetLock()); + return nostd::shared_ptr(GetProvider()); + } + + /** + * Changes the singleton LoggerProvider. + */ + static void SetLoggerProvider(const nostd::shared_ptr &tp) noexcept + { + std::lock_guard guard(GetLock()); + GetProvider() = tp; + } + +#if OPENTELEMETRY_ABI_VERSION_NO < 2 + /** + * Returns the singleton EventLoggerProvider. + * + * By default, a no-op EventLoggerProvider is returned. This will never return a + * nullptr EventLoggerProvider. + */ + OPENTELEMETRY_DEPRECATED static nostd::shared_ptr + GetEventLoggerProvider() noexcept + { + std::lock_guard guard(GetLock()); + return nostd::shared_ptr(GetEventProvider()); + } + + /** + * Changes the singleton EventLoggerProvider. + */ + OPENTELEMETRY_DEPRECATED static void SetEventLoggerProvider( + const nostd::shared_ptr &tp) noexcept + { + std::lock_guard guard(GetLock()); + GetEventProvider() = tp; + } +#endif + +private: + OPENTELEMETRY_API_SINGLETON static nostd::shared_ptr &GetProvider() noexcept + { + static nostd::shared_ptr provider(new NoopLoggerProvider); + return provider; + } + +#if OPENTELEMETRY_ABI_VERSION_NO < 2 + OPENTELEMETRY_DEPRECATED + OPENTELEMETRY_API_SINGLETON static nostd::shared_ptr & + GetEventProvider() noexcept + { + static nostd::shared_ptr provider(new NoopEventLoggerProvider); + return provider; + } +#endif + + OPENTELEMETRY_API_SINGLETON static common::SpinLockMutex &GetLock() noexcept + { + static common::SpinLockMutex lock; + return lock; + } +}; + +} // namespace logs +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/severity.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/severity.h new file mode 100644 index 000000000..f4d961615 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/logs/severity.h @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace logs +{ + +/** + * Severity Levels assigned to log events, based on Log Data Model, + * with the addition of kInvalid (mapped to a severity number of 0). + * + * See + * https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber + */ +enum class Severity : uint8_t +{ + kInvalid = 0, + kTrace = 1, + kTrace2 = 2, + kTrace3 = 3, + kTrace4 = 4, + kDebug = 5, + kDebug2 = 6, + kDebug3 = 7, + kDebug4 = 8, + kInfo = 9, + kInfo2 = 10, + kInfo3 = 11, + kInfo4 = 12, + kWarn = 13, + kWarn2 = 14, + kWarn3 = 15, + kWarn4 = 16, + kError = 17, + kError2 = 18, + kError3 = 19, + kError4 = 20, + kFatal = 21, + kFatal2 = 22, + kFatal3 = 23, + kFatal4 = 24 +}; + +const uint8_t kMaxSeverity = 255; + +/** + * Mapping of the severity enum above, to a severity text string (in all caps). + * This severity text can be printed out by exporters. Capital letters follow the + * spec naming convention. + * + * Included to follow the specification's recommendation to print both + * severity number and text in each log record. + */ +const nostd::string_view SeverityNumToText[25] = { + "INVALID", "TRACE", "TRACE2", "TRACE3", "TRACE4", "DEBUG", "DEBUG2", "DEBUG3", "DEBUG4", + "INFO", "INFO2", "INFO3", "INFO4", "WARN", "WARN2", "WARN3", "WARN4", "ERROR", + "ERROR2", "ERROR3", "ERROR4", "FATAL", "FATAL2", "FATAL3", "FATAL4"}; + +} // namespace logs +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/async_instruments.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/async_instruments.h new file mode 100644 index 000000000..f73eb2f51 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/async_instruments.h @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/metrics/observer_result.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace metrics +{ + +using ObservableCallbackPtr = void (*)(ObserverResult, void *); + +class ObservableInstrument +{ +public: + ObservableInstrument() = default; + virtual ~ObservableInstrument() = default; + + /** + * Sets up a function that will be called whenever a metric collection is initiated. + */ + virtual void AddCallback(ObservableCallbackPtr, void *state) noexcept = 0; + + /** + * Remove a function that was configured to be called whenever a metric collection is initiated. + */ + virtual void RemoveCallback(ObservableCallbackPtr, void *state) noexcept = 0; +}; + +} // namespace metrics +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/meter.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/meter.h new file mode 100644 index 000000000..59cfb8782 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/meter.h @@ -0,0 +1,174 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/unique_ptr.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace metrics +{ + +template +class Counter; + +template +class Histogram; + +template +class UpDownCounter; + +template +class Gauge; + +class ObservableInstrument; + +/** + * Handles instrument creation and provides a facility for batch recording. + * + * This class provides methods to create new metric instruments, record a + * batch of values to a specified set of instruments, and collect + * measurements from all instruments. + * + */ +class Meter +{ +public: + virtual ~Meter() = default; + + /** + * Creates a Counter with the passed characteristics and returns a unique_ptr to that Counter. + * + * @param name the name of the new Counter. + * @param description a brief description of what the Counter is used for. + * @param unit the unit of metric values following https://unitsofmeasure.org/ucum.html. + * @return a shared pointer to the created Counter. + */ + + virtual nostd::unique_ptr> CreateUInt64Counter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + + virtual nostd::unique_ptr> CreateDoubleCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + + /** + * Creates a Asynchronous (Observable) counter with the passed characteristics and returns a + * shared_ptr to that Observable Counter + * + * @param name the name of the new Observable Counter. + * @param description a brief description of what the Observable Counter is used for. + * @param unit the unit of metric values following https://unitsofmeasure.org/ucum.html. + */ + virtual nostd::shared_ptr CreateInt64ObservableCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + + virtual nostd::shared_ptr CreateDoubleObservableCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + + /** + * Creates a Histogram with the passed characteristics and returns a unique_ptr to that Histogram. + * + * @param name the name of the new Histogram. + * @param description a brief description of what the Histogram is used for. + * @param unit the unit of metric values following https://unitsofmeasure.org/ucum.html. + * @return a shared pointer to the created Histogram. + */ + virtual nostd::unique_ptr> CreateUInt64Histogram( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + + virtual nostd::unique_ptr> CreateDoubleHistogram( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + +#if OPENTELEMETRY_ABI_VERSION_NO >= 2 + /** + * Creates a Gauge with the passed characteristics and returns a unique_ptr to that Gauge. + * + * @param name the name of the new Gauge. + * @param description a brief description of what the Gauge is used for. + * @param unit the unit of metric values following https://unitsofmeasure.org/ucum.html. + * @return a unique pointer to the created Gauge. + */ + + virtual nostd::unique_ptr> CreateInt64Gauge( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + + virtual nostd::unique_ptr> CreateDoubleGauge( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; +#endif + + /** + * Creates a Asynchronous (Observable) Gauge with the passed characteristics and returns a + * shared_ptr to that Observable Gauge + * + * @param name the name of the new Observable Gauge. + * @param description a brief description of what the Observable Gauge is used for. + * @param unit the unit of metric values following https://unitsofmeasure.org/ucum.html. + */ + virtual nostd::shared_ptr CreateInt64ObservableGauge( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + + virtual nostd::shared_ptr CreateDoubleObservableGauge( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + + /** + * Creates an UpDownCounter with the passed characteristics and returns a unique_ptr to that + * UpDownCounter. + * + * @param name the name of the new UpDownCounter. + * @param description a brief description of what the UpDownCounter is used for. + * @param unit the unit of metric values following https://unitsofmeasure.org/ucum.html. + * @return a shared pointer to the created UpDownCounter. + */ + virtual nostd::unique_ptr> CreateInt64UpDownCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + + virtual nostd::unique_ptr> CreateDoubleUpDownCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + + /** + * Creates a Asynchronous (Observable) UpDownCounter with the passed characteristics and returns + * a shared_ptr to that Observable UpDownCounter + * + * @param name the name of the new Observable UpDownCounter. + * @param description a brief description of what the Observable UpDownCounter is used for. + * @param unit the unit of metric values following https://unitsofmeasure.org/ucum.html. + */ + virtual nostd::shared_ptr CreateInt64ObservableUpDownCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; + + virtual nostd::shared_ptr CreateDoubleObservableUpDownCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept = 0; +}; +} // namespace metrics +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/meter_provider.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/meter_provider.h new file mode 100644 index 000000000..fca960bb8 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/meter_provider.h @@ -0,0 +1,148 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/common/key_value_iterable.h" +#include "opentelemetry/common/key_value_iterable_view.h" +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/type_traits.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace metrics +{ + +class Meter; + +/** + * Creates new Meter instances. + */ +class MeterProvider +{ +public: + virtual ~MeterProvider() = default; + +#if OPENTELEMETRY_ABI_VERSION_NO >= 2 + + /** + * Gets or creates a named Meter instance (ABI). + * + * @since ABI_VERSION 2 + * + * @param[in] name Meter instrumentation scope + * @param[in] version Instrumentation scope version + * @param[in] schema_url Instrumentation scope schema URL + * @param[in] attributes Instrumentation scope attributes (optional, may be nullptr) + */ + virtual nostd::shared_ptr GetMeter( + nostd::string_view name, + nostd::string_view version, + nostd::string_view schema_url, + const common::KeyValueIterable *attributes) noexcept = 0; + + /** + * Gets or creates a named Meter instance (API helper). + * + * @since ABI_VERSION 2 + * + * @param[in] name Meter instrumentation scope + * @param[in] version Instrumentation scope version, optional + * @param[in] schema_url Instrumentation scope schema URL, optional + */ + nostd::shared_ptr GetMeter(nostd::string_view name, + nostd::string_view version = "", + nostd::string_view schema_url = "") + { + return GetMeter(name, version, schema_url, nullptr); + } + + /** + * Gets or creates a named Meter instance (API helper). + * + * @since ABI_VERSION 2 + * + * @param[in] name Meter instrumentation scope + * @param[in] version Instrumentation scope version + * @param[in] schema_url Instrumentation scope schema URL + * @param[in] attributes Instrumentation scope attributes + */ + nostd::shared_ptr GetMeter( + nostd::string_view name, + nostd::string_view version, + nostd::string_view schema_url, + std::initializer_list> attributes) + { + /* Build a container from std::initializer_list. */ + nostd::span> attributes_span{ + attributes.begin(), attributes.end()}; + + /* Build a view on the container. */ + common::KeyValueIterableView< + nostd::span>> + iterable_attributes{attributes_span}; + + /* Add attributes using the view. */ + return GetMeter(name, version, schema_url, &iterable_attributes); + } + + /** + * Gets or creates a named Meter instance (API helper). + * + * @since ABI_VERSION 2 + * + * @param[in] name Meter instrumentation scope + * @param[in] version Instrumentation scope version + * @param[in] schema_url Instrumentation scope schema URL + * @param[in] attributes Instrumentation scope attributes container + */ + template ::value> * = nullptr> + nostd::shared_ptr GetMeter(nostd::string_view name, + nostd::string_view version, + nostd::string_view schema_url, + const T &attributes) + { + /* Build a view on the container. */ + common::KeyValueIterableView iterable_attributes(attributes); + + /* Add attributes using the view. */ + return GetMeter(name, version, schema_url, &iterable_attributes); + } + +#else + /** + * Gets or creates a named Meter instance (ABI) + * + * @since ABI_VERSION 1 + * + * @param[in] name Meter instrumentation scope + * @param[in] version Instrumentation scope version, optional + * @param[in] schema_url Instrumentation scope schema URL, optional + */ + virtual nostd::shared_ptr GetMeter(nostd::string_view name, + nostd::string_view version = "", + nostd::string_view schema_url = "") noexcept = 0; +#endif + +#if OPENTELEMETRY_ABI_VERSION_NO >= 2 + /** + * Remove a named Meter instance (ABI). + * + * This API is experimental, see + * https://github.com/open-telemetry/opentelemetry-specification/issues/2232 + * + * @since ABI_VERSION 2 + * + * @param[in] name Meter instrumentation scope + * @param[in] version Instrumentation scope version, optional + * @param[in] schema_url Instrumentation scope schema URL, optional + */ + virtual void RemoveMeter(nostd::string_view name, + nostd::string_view version = "", + nostd::string_view schema_url = "") noexcept = 0; +#endif +}; +} // namespace metrics +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/noop.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/noop.h new file mode 100644 index 000000000..1d508b938 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/noop.h @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/metrics/async_instruments.h" +#include "opentelemetry/metrics/meter.h" +#include "opentelemetry/metrics/meter_provider.h" +#include "opentelemetry/metrics/observer_result.h" +#include "opentelemetry/metrics/sync_instruments.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace metrics +{ + +template +class NoopCounter : public Counter +{ +public: + NoopCounter(nostd::string_view /* name */, + nostd::string_view /* description */, + nostd::string_view /* unit */) noexcept + {} + void Add(T /* value */) noexcept override {} + void Add(T /* value */, const context::Context & /* context */) noexcept override {} + void Add(T /* value */, const common::KeyValueIterable & /* attributes */) noexcept override {} + void Add(T /* value */, + const common::KeyValueIterable & /* attributes */, + const context::Context & /* context */) noexcept override + {} +}; + +template +class NoopHistogram : public Histogram +{ +public: + NoopHistogram(nostd::string_view /* name */, + nostd::string_view /* description */, + nostd::string_view /* unit */) noexcept + {} + void Record(T /* value */, const context::Context & /* context */) noexcept override {} + void Record(T /* value */, + const common::KeyValueIterable & /* attributes */, + const context::Context & /* context */) noexcept override + {} +#if OPENTELEMETRY_ABI_VERSION_NO >= 2 + void Record(T /*value*/, + const opentelemetry::common::KeyValueIterable & /*attributes*/) noexcept override + {} + + void Record(T /*value*/) noexcept override {} +#endif +}; + +template +class NoopUpDownCounter : public UpDownCounter +{ +public: + NoopUpDownCounter(nostd::string_view /* name */, + nostd::string_view /* description */, + nostd::string_view /* unit */) noexcept + {} + ~NoopUpDownCounter() override = default; + void Add(T /* value */) noexcept override {} + void Add(T /* value */, const context::Context & /* context */) noexcept override {} + void Add(T /* value */, const common::KeyValueIterable & /* attributes */) noexcept override {} + void Add(T /* value */, + const common::KeyValueIterable & /* attributes */, + const context::Context & /* context */) noexcept override + {} +}; + +#if OPENTELEMETRY_ABI_VERSION_NO >= 2 +template +class NoopGauge : public Gauge +{ +public: + NoopGauge(nostd::string_view /* name */, + nostd::string_view /* description */, + nostd::string_view /* unit */) noexcept + {} + ~NoopGauge() override = default; + void Record(T /* value */) noexcept override {} + void Record(T /* value */, const context::Context & /* context */) noexcept override {} + void Record(T /* value */, const common::KeyValueIterable & /* attributes */) noexcept override {} + void Record(T /* value */, + const common::KeyValueIterable & /* attributes */, + const context::Context & /* context */) noexcept override + {} +}; +#endif + +class NoopObservableInstrument : public ObservableInstrument +{ +public: + NoopObservableInstrument(nostd::string_view /* name */, + nostd::string_view /* description */, + nostd::string_view /* unit */) noexcept + {} + + void AddCallback(ObservableCallbackPtr, void * /* state */) noexcept override {} + void RemoveCallback(ObservableCallbackPtr, void * /* state */) noexcept override {} +}; + +/** + * No-op implementation of Meter. + */ +class NoopMeter final : public Meter +{ +public: + nostd::unique_ptr> CreateUInt64Counter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::unique_ptr>{new NoopCounter(name, description, unit)}; + } + + nostd::unique_ptr> CreateDoubleCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::unique_ptr>{new NoopCounter(name, description, unit)}; + } + + nostd::shared_ptr CreateInt64ObservableCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::shared_ptr( + new NoopObservableInstrument(name, description, unit)); + } + + nostd::shared_ptr CreateDoubleObservableCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::shared_ptr( + new NoopObservableInstrument(name, description, unit)); + } + + nostd::unique_ptr> CreateUInt64Histogram( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::unique_ptr>{ + new NoopHistogram(name, description, unit)}; + } + + nostd::unique_ptr> CreateDoubleHistogram( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::unique_ptr>{new NoopHistogram(name, description, unit)}; + } + +#if OPENTELEMETRY_ABI_VERSION_NO >= 2 + nostd::unique_ptr> CreateInt64Gauge(nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::unique_ptr>{new NoopGauge(name, description, unit)}; + } + + nostd::unique_ptr> CreateDoubleGauge(nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::unique_ptr>{new NoopGauge(name, description, unit)}; + } +#endif + + nostd::shared_ptr CreateInt64ObservableGauge( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::shared_ptr( + new NoopObservableInstrument(name, description, unit)); + } + + nostd::shared_ptr CreateDoubleObservableGauge( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::shared_ptr( + new NoopObservableInstrument(name, description, unit)); + } + + nostd::unique_ptr> CreateInt64UpDownCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::unique_ptr>{ + new NoopUpDownCounter(name, description, unit)}; + } + + nostd::unique_ptr> CreateDoubleUpDownCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::unique_ptr>{ + new NoopUpDownCounter(name, description, unit)}; + } + + nostd::shared_ptr CreateInt64ObservableUpDownCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::shared_ptr( + new NoopObservableInstrument(name, description, unit)); + } + + nostd::shared_ptr CreateDoubleObservableUpDownCounter( + nostd::string_view name, + nostd::string_view description = "", + nostd::string_view unit = "") noexcept override + { + return nostd::shared_ptr( + new NoopObservableInstrument(name, description, unit)); + } +}; + +/** + * No-op implementation of a MeterProvider. + */ +class NoopMeterProvider final : public MeterProvider +{ +public: + NoopMeterProvider() : meter_{nostd::shared_ptr(new NoopMeter)} {} + +#if OPENTELEMETRY_ABI_VERSION_NO >= 2 + nostd::shared_ptr GetMeter( + nostd::string_view /* name */, + nostd::string_view /* version */, + nostd::string_view /* schema_url */, + const common::KeyValueIterable * /* attributes */) noexcept override + { + return meter_; + } +#else + nostd::shared_ptr GetMeter(nostd::string_view /* name */, + nostd::string_view /* version */, + nostd::string_view /* schema_url */) noexcept override + { + return meter_; + } +#endif + +#if OPENTELEMETRY_ABI_VERSION_NO >= 2 + void RemoveMeter(nostd::string_view /* name */, + nostd::string_view /* version */, + nostd::string_view /* schema_url */) noexcept override + {} +#endif + +private: + nostd::shared_ptr meter_; +}; +} // namespace metrics +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/observer_result.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/observer_result.h new file mode 100644 index 000000000..56c42bc20 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/observer_result.h @@ -0,0 +1,54 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/common/attribute_value.h" +#include "opentelemetry/common/key_value_iterable_view.h" +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/nostd/span.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/type_traits.h" +#include "opentelemetry/nostd/variant.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace metrics +{ + +/** + * ObserverResultT class is necessary for the callback recording asynchronous + * instrument use. + */ +template +class ObserverResultT +{ + +public: + virtual ~ObserverResultT() = default; + + virtual void Observe(T value) noexcept = 0; + + virtual void Observe(T value, const common::KeyValueIterable &attributes) noexcept = 0; + + template ::value> * = nullptr> + void Observe(T value, const U &attributes) noexcept + { + this->Observe(value, common::KeyValueIterableView{attributes}); + } + + void Observe(T value, + std::initializer_list> + attributes) noexcept + { + this->Observe(value, nostd::span>{ + attributes.begin(), attributes.end()}); + } +}; + +using ObserverResult = nostd::variant>, + nostd::shared_ptr>>; + +} // namespace metrics +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/provider.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/provider.h new file mode 100644 index 000000000..20a606a77 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/provider.h @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/common/spin_lock_mutex.h" +#include "opentelemetry/metrics/meter_provider.h" +#include "opentelemetry/metrics/noop.h" +#include "opentelemetry/nostd/shared_ptr.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace metrics +{ + +/** + * Stores the singleton global MeterProvider. + */ +class Provider +{ +public: + /** + * Returns the singleton MeterProvider. + * + * By default, a no-op MeterProvider is returned. This will never return a + * nullptr MeterProvider. + */ + static nostd::shared_ptr GetMeterProvider() noexcept + { + std::lock_guard guard(GetLock()); + return nostd::shared_ptr(GetProvider()); + } + + /** + * Changes the singleton MeterProvider. + */ + static void SetMeterProvider(const nostd::shared_ptr &tp) noexcept + { + std::lock_guard guard(GetLock()); + GetProvider() = tp; + } + +private: + OPENTELEMETRY_API_SINGLETON static nostd::shared_ptr &GetProvider() noexcept + { + static nostd::shared_ptr provider(new NoopMeterProvider); + return provider; + } + + OPENTELEMETRY_API_SINGLETON static common::SpinLockMutex &GetLock() noexcept + { + static common::SpinLockMutex lock; + return lock; + } +}; + +} // namespace metrics +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/sync_instruments.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/sync_instruments.h new file mode 100644 index 000000000..9eaec3352 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/metrics/sync_instruments.h @@ -0,0 +1,328 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/common/attribute_value.h" +#include "opentelemetry/common/key_value_iterable_view.h" +#include "opentelemetry/context/context.h" +#include "opentelemetry/nostd/span.h" +#include "opentelemetry/nostd/string_view.h" +#include "opentelemetry/nostd/type_traits.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace metrics +{ + +class SynchronousInstrument +{ +public: + SynchronousInstrument() = default; + virtual ~SynchronousInstrument() = default; +}; + +/* A Counter instrument that adds values. */ +template +class Counter : public SynchronousInstrument +{ + +public: + /** + * Record a value + * + * @param value The increment amount. MUST be non-negative. + */ + virtual void Add(T value) noexcept = 0; + + /** + * Record a value + * + * @param value The increment amount. MUST be non-negative. + * @param context The explicit context to associate with this measurement. + */ + virtual void Add(T value, const context::Context &context) noexcept = 0; + + /** + * Record a value with a set of attributes. + * + * @param value The increment amount. MUST be non-negative. + * @param attributes A set of attributes to associate with the value. + */ + + virtual void Add(T value, const common::KeyValueIterable &attributes) noexcept = 0; + + /** + * Record a value with a set of attributes. + * + * @param value The increment amount. MUST be non-negative. + * @param attributes A set of attributes to associate with the value. + * @param context The explicit context to associate with this measurement. + */ + virtual void Add(T value, + const common::KeyValueIterable &attributes, + const context::Context &context) noexcept = 0; + + template ::value> * = nullptr> + void Add(T value, const U &attributes) noexcept + { + this->Add(value, common::KeyValueIterableView{attributes}); + } + + template ::value> * = nullptr> + void Add(T value, const U &attributes, const context::Context &context) noexcept + { + this->Add(value, common::KeyValueIterableView{attributes}, context); + } + + void Add(T value, + std::initializer_list> + attributes) noexcept + { + this->Add(value, nostd::span>{ + attributes.begin(), attributes.end()}); + } + + void Add(T value, + std::initializer_list> attributes, + const context::Context &context) noexcept + { + this->Add(value, + nostd::span>{ + attributes.begin(), attributes.end()}, + context); + } +}; + +/** A histogram instrument that records values. */ + +template +class Histogram : public SynchronousInstrument +{ +public: +#if OPENTELEMETRY_ABI_VERSION_NO >= 2 + /** + * @since ABI_VERSION 2 + * Records a value. + * + * @param value The measurement value. MUST be non-negative. + */ + virtual void Record(T value) noexcept = 0; + + /** + * @since ABI_VERSION 2 + * Records a value with a set of attributes. + * + * @param value The measurement value. MUST be non-negative. + * @param attribute A set of attributes to associate with the value. + */ + virtual void Record(T value, const common::KeyValueIterable &attribute) noexcept = 0; + + template ::value> * = nullptr> + void Record(T value, const U &attributes) noexcept + { + this->Record(value, common::KeyValueIterableView{attributes}); + } + + void Record(T value, + std::initializer_list> + attributes) noexcept + { + this->Record(value, nostd::span>{ + attributes.begin(), attributes.end()}); + } +#endif + + /** + * Records a value. + * + * @param value The measurement value. MUST be non-negative. + * @param context The explicit context to associate with this measurement. + */ + virtual void Record(T value, const context::Context &context) noexcept = 0; + + /** + * Records a value with a set of attributes. + * + * @param value The measurement value. MUST be non-negative. + * @param attributes A set of attributes to associate with the value.. + * @param context The explicit context to associate with this measurement. + */ + virtual void Record(T value, + const common::KeyValueIterable &attributes, + const context::Context &context) noexcept = 0; + + template ::value> * = nullptr> + void Record(T value, const U &attributes, const context::Context &context) noexcept + { + this->Record(value, common::KeyValueIterableView{attributes}, context); + } + + void Record( + T value, + std::initializer_list> attributes, + const context::Context &context) noexcept + { + this->Record(value, + nostd::span>{ + attributes.begin(), attributes.end()}, + context); + } +}; + +/** An up-down-counter instrument that adds or reduce values. */ + +template +class UpDownCounter : public SynchronousInstrument +{ +public: + /** + * Record a value. + * + * @param value The increment amount. May be positive, negative or zero. + */ + virtual void Add(T value) noexcept = 0; + + /** + * Record a value. + * + * @param value The increment amount. May be positive, negative or zero. + * @param context The explicit context to associate with this measurement. + */ + virtual void Add(T value, const context::Context &context) noexcept = 0; + + /** + * Record a value with a set of attributes. + * + * @param value The increment amount. May be positive, negative or zero. + * @param attributes A set of attributes to associate with the count. + */ + virtual void Add(T value, const common::KeyValueIterable &attributes) noexcept = 0; + + /** + * Record a value with a set of attributes. + * + * @param value The increment amount. May be positive, negative or zero. + * @param attributes A set of attributes to associate with the count. + * @param context The explicit context to associate with this measurement. + */ + virtual void Add(T value, + const common::KeyValueIterable &attributes, + const context::Context &context) noexcept = 0; + + template ::value> * = nullptr> + void Add(T value, const U &attributes) noexcept + { + this->Add(value, common::KeyValueIterableView{attributes}); + } + + template ::value> * = nullptr> + void Add(T value, const U &attributes, const context::Context &context) noexcept + { + this->Add(value, common::KeyValueIterableView{attributes}, context); + } + + void Add(T value, + std::initializer_list> + attributes) noexcept + { + this->Add(value, nostd::span>{ + attributes.begin(), attributes.end()}); + } + + void Add(T value, + std::initializer_list> attributes, + const context::Context &context) noexcept + { + this->Add(value, + nostd::span>{ + attributes.begin(), attributes.end()}, + context); + } +}; + +#if OPENTELEMETRY_ABI_VERSION_NO >= 2 +/* A Gauge instrument that records values. */ +template +class Gauge : public SynchronousInstrument +{ + +public: + /** + * Record a value + * + * @param value The measurement value. May be positive, negative or zero. + */ + virtual void Record(T value) noexcept = 0; + + /** + * Record a value + * + * @param value The measurement value. May be positive, negative or zero. + * @param context The explicit context to associate with this measurement. + */ + virtual void Record(T value, const context::Context &context) noexcept = 0; + + /** + * Record a value with a set of attributes. + * + * @param value The measurement value. May be positive, negative or zero. + * @param attributes A set of attributes to associate with the value. + */ + + virtual void Record(T value, const common::KeyValueIterable &attributes) noexcept = 0; + + /** + * Record a value with a set of attributes. + * + * @param value The measurement value. May be positive, negative or zero. + * @param attributes A set of attributes to associate with the value. + * @param context The explicit context to associate with this measurement. + */ + virtual void Record(T value, + const common::KeyValueIterable &attributes, + const context::Context &context) noexcept = 0; + + template ::value> * = nullptr> + void Record(T value, const U &attributes) noexcept + { + this->Record(value, common::KeyValueIterableView{attributes}); + } + + template ::value> * = nullptr> + void Record(T value, const U &attributes, const context::Context &context) noexcept + { + this->Record(value, common::KeyValueIterableView{attributes}, context); + } + + void Record(T value, + std::initializer_list> + attributes) noexcept + { + this->Record(value, nostd::span>{ + attributes.begin(), attributes.end()}); + } + + void Record( + T value, + std::initializer_list> attributes, + const context::Context &context) noexcept + { + this->Record(value, + nostd::span>{ + attributes.begin(), attributes.end()}, + context); + } +}; +#endif + +} // namespace metrics +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/all.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/all.h new file mode 100644 index 000000000..deaf3ac6f --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/all.h @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/nostd/utility.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +namespace detail +{ +template +using all = std::is_same, integer_sequence>; + +} // namespace detail +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/decay.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/decay.h new file mode 100644 index 000000000..a6cb11124 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/decay.h @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +template +using decay_t = typename std::decay::type; +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/dependent_type.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/dependent_type.h new file mode 100644 index 000000000..5bba09ff8 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/dependent_type.h @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +namespace detail +{ +template +struct dependent_type : T +{}; +} // namespace detail +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/functional.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/functional.h new file mode 100644 index 000000000..0da58dd18 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/functional.h @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/version.h" + +#define OPENTELEMETRY_RETURN(...) \ + noexcept(noexcept(__VA_ARGS__))->decltype(__VA_ARGS__) \ + { \ + return __VA_ARGS__; \ + } + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +namespace detail +{ +struct equal_to +{ + template + inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const + OPENTELEMETRY_RETURN(std::forward(lhs) == std::forward(rhs)) +}; + +struct not_equal_to +{ + template + inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const + OPENTELEMETRY_RETURN(std::forward(lhs) != std::forward(rhs)) +}; + +struct less +{ + template + inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const + OPENTELEMETRY_RETURN(std::forward(lhs) < std::forward(rhs)) +}; + +struct greater +{ + template + inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const + OPENTELEMETRY_RETURN(std::forward(lhs) > std::forward(rhs)) +}; + +struct less_equal +{ + template + inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const + OPENTELEMETRY_RETURN(std::forward(lhs) <= std::forward(rhs)) +}; + +struct greater_equal +{ + template + inline constexpr auto operator()(Lhs &&lhs, Rhs &&rhs) const + OPENTELEMETRY_RETURN(std::forward(lhs) >= std::forward(rhs)) +}; +} // namespace detail +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE + +#undef OPENTELEMETRY_RETURN diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/invoke.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/invoke.h new file mode 100644 index 000000000..55a943ed1 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/invoke.h @@ -0,0 +1,163 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include + +#include "opentelemetry/nostd/detail/decay.h" +#include "opentelemetry/nostd/detail/void.h" +#include "opentelemetry/version.h" + +#define OPENTELEMETRY_RETURN(...) \ + noexcept(noexcept(__VA_ARGS__))->decltype(__VA_ARGS__) \ + { \ + return __VA_ARGS__; \ + } + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +namespace detail +{ + +template +struct is_reference_wrapper : std::false_type +{}; + +template +struct is_reference_wrapper> : std::true_type +{}; + +template +struct Invoke; + +template <> +struct Invoke +{ + template + inline static constexpr auto invoke(R T::*pmf, Arg &&arg, Args &&...args) + OPENTELEMETRY_RETURN((std::forward(arg).*pmf)(std::forward(args)...)) +}; + +template <> +struct Invoke +{ + template + inline static constexpr auto invoke(R T::*pmf, Arg &&arg, Args &&...args) + OPENTELEMETRY_RETURN((std::forward(arg).get().*pmf)(std::forward(args)...)) +}; + +template <> +struct Invoke +{ + template + inline static constexpr auto invoke(R T::*pmf, Arg &&arg, Args &&...args) + OPENTELEMETRY_RETURN(((*std::forward(arg)).*pmf)(std::forward(args)...)) +}; + +template <> +struct Invoke +{ + template + inline static constexpr auto invoke(R T::*pmo, Arg &&arg) + OPENTELEMETRY_RETURN(std::forward(arg).*pmo) +}; + +template <> +struct Invoke +{ + template + inline static constexpr auto invoke(R T::*pmo, Arg &&arg) + OPENTELEMETRY_RETURN(std::forward(arg).get().*pmo) +}; + +template <> +struct Invoke +{ + template + inline static constexpr auto invoke(R T::*pmo, Arg &&arg) + OPENTELEMETRY_RETURN((*std::forward(arg)).*pmo) +}; + +template +inline constexpr auto invoke_impl(R T::*f, Arg &&arg, Args &&...args) OPENTELEMETRY_RETURN( + Invoke::value, + (std::is_base_of>::value ? 0 + : is_reference_wrapper>::value ? 1 + : 2)>::invoke(f, + std::forward(arg), + std::forward(args)...)) + +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable : 4100) +#endif + template + inline constexpr auto invoke_impl(F &&f, Args &&...args) + OPENTELEMETRY_RETURN(std::forward(f)(std::forward(args)...)) +#ifdef _MSC_VER +# pragma warning(pop) +#endif +} // namespace detail + +/* clang-format off */ +template +inline constexpr auto invoke(F &&f, Args &&... args) + OPENTELEMETRY_RETURN(detail::invoke_impl(std::forward(f), std::forward(args)...)) + +namespace detail +/* clang-format on */ +{ + + template + struct invoke_result + {}; + + template + struct invoke_result(), std::declval()...))>, + F, Args...> + { + using type = decltype(nostd::invoke(std::declval(), std::declval()...)); + }; + +} // namespace detail + +template +using invoke_result = detail::invoke_result; + +template +using invoke_result_t = typename invoke_result::type; + +namespace detail +{ + +template +struct is_invocable : std::false_type +{}; + +template +struct is_invocable>, F, Args...> : std::true_type +{}; + +template +struct is_invocable_r : std::false_type +{}; + +template +struct is_invocable_r>, R, F, Args...> + : std::is_convertible, R> +{}; + +} // namespace detail + +template +using is_invocable = detail::is_invocable; + +template +using is_invocable_r = detail::is_invocable_r; +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE + +#undef OPENTELEMETRY_RETURN diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/trait.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/trait.h new file mode 100644 index 000000000..8f76fdec8 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/trait.h @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/nostd/type_traits.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +namespace detail +{ +enum class Trait +{ + TriviallyAvailable, + Available, + Unavailable +}; + +template + class IsTriviallyAvailable, + template + class IsAvailable> +inline constexpr Trait trait() +{ + return IsTriviallyAvailable::value ? Trait::TriviallyAvailable + : IsAvailable::value ? Trait::Available + : Trait::Unavailable; +} + +inline constexpr Trait common_trait_impl(Trait result) +{ + return result; +} + +template +inline constexpr Trait common_trait_impl(Trait result, Trait t, Traits... ts) +{ + return static_cast(t) > static_cast(result) ? common_trait_impl(t, ts...) + : common_trait_impl(result, ts...); +} + +template +inline constexpr Trait common_trait(Traits... ts) +{ + return common_trait_impl(Trait::TriviallyAvailable, ts...); +} + +template +struct traits +{ + static constexpr Trait copy_constructible_trait = + common_trait(trait()...); + + static constexpr Trait move_constructible_trait = + common_trait(trait()...); + + static constexpr Trait copy_assignable_trait = + common_trait(copy_constructible_trait, + trait()...); + + static constexpr Trait move_assignable_trait = + common_trait(move_constructible_trait, + trait()...); + + static constexpr Trait destructible_trait = + common_trait(trait()...); +}; +} // namespace detail +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/type_pack_element.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/type_pack_element.h new file mode 100644 index 000000000..280d24e94 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/type_pack_element.h @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include + +#include "opentelemetry/nostd/utility.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +namespace detail +{ +template +using size_constant = std::integral_constant; + +template +struct indexed_type : size_constant +{ + using type = T; +}; + +template +struct type_pack_element_impl +{ +private: + template + struct set; + + template + struct set> : indexed_type... + {}; + + template + inline static std::enable_if impl(indexed_type); + + inline static std::enable_if impl(...); + +public: + using type = decltype(impl(set>{})); +}; + +template +using type_pack_element = typename type_pack_element_impl::type; + +template +using type_pack_element_t = typename type_pack_element::type; +} // namespace detail +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/valueless.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/valueless.h new file mode 100644 index 000000000..3b2ca7f7c --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/valueless.h @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +struct valueless_t +{}; +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/variant_alternative.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/variant_alternative.h new file mode 100644 index 000000000..cc0da9c8c --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/variant_alternative.h @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/nostd/detail/type_pack_element.h" +#include "opentelemetry/nostd/detail/variant_fwd.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +template +struct variant_alternative; + +template +using variant_alternative_t = typename variant_alternative::type; + +template +struct variant_alternative : std::add_const> +{}; + +template +struct variant_alternative : std::add_volatile> +{}; + +template +struct variant_alternative : std::add_cv> +{}; + +template +struct variant_alternative> +{ + static_assert(I < sizeof...(Ts), "index out of bounds in `std::variant_alternative<>`"); + using type = detail::type_pack_element_t; +}; +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/variant_fwd.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/variant_fwd.h new file mode 100644 index 000000000..6bae9659e --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/variant_fwd.h @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +template +class variant; +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/variant_size.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/variant_size.h new file mode 100644 index 000000000..d8986a222 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/variant_size.h @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include + +#include "opentelemetry/nostd/detail/variant_fwd.h" +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +template +struct variant_size; + +template +struct variant_size : variant_size +{}; + +template +struct variant_size : variant_size +{}; + +template +struct variant_size : variant_size +{}; + +template +struct variant_size> : std::integral_constant +{}; +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/void.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/void.h new file mode 100644 index 000000000..1b4c3b4f7 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/detail/void.h @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +namespace detail +{ +template +struct voider +{ + using type = void; +}; +} // namespace detail + +/** + * Back port of std::void_t + * + * Note: voider workaround is required for gcc-4.8 to make SFINAE work + */ +template +using void_t = typename detail::voider::type; +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/function_ref.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/function_ref.h new file mode 100644 index 000000000..6f0c2dd33 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/function_ref.h @@ -0,0 +1,94 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include // IWYU pragma: keep +#include +#include + +#include "opentelemetry/version.h" + +OPENTELEMETRY_BEGIN_NAMESPACE +namespace nostd +{ +template +class function_ref; // IWYU pragma: keep + +/** + * Non-owning function reference that can be used as a more performant + * replacement for std::function when ownership sematics aren't needed. + * + * See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/p0792r0.html + * + * Based off of https://stackoverflow.com/a/39087660/4447365 + */ +template +class function_ref +{ + void *callable_ = nullptr; + R (*invoker_)(void *, Args...) = nullptr; + + template + using FunctionPointer = decltype(std::addressof(std::declval())); + + template + void BindTo(F &f) noexcept + { + callable_ = static_cast(std::addressof(f)); + invoker_ = [](void *callable, Args... args) -> R { + return (*static_cast>(callable))(std::forward(args)...); + }; + } + + template + void BindTo(R_in (*f)(Args_in...)) noexcept + { + using F = decltype(f); + if (f == nullptr) + { + return BindTo(nullptr); + } + callable_ = reinterpret_cast(f); + invoker_ = [](void *callable, Args... args) -> R { + return (F(callable))(std::forward(args)...); + }; + } + + void BindTo(std::nullptr_t) noexcept + { + callable_ = nullptr; + invoker_ = nullptr; + } + +public: + template < + class F, + typename std::enable_if::type>::value, + int>::type = 0, + typename std::enable_if< +#if (__cplusplus >= 201703L) || (defined(_MSVC_LANG) && (_MSVC_LANG > 201402)) + // std::result_of deprecated in C++17, removed in C++20 + std::is_convertible::type, R>::value, +#else + // std::result_of since C++11 + std::is_convertible::type, R>::value, +#endif + int>::type = 0> + function_ref(F &&f) + { + BindTo(f); // not forward + } + + function_ref(std::nullptr_t) {} + + function_ref(const function_ref &) noexcept = default; + function_ref(function_ref &&) noexcept = default; + + R operator()(Args... args) const { return invoker_(callable_, std::forward(args)...); } + + explicit operator bool() const { return invoker_; } +}; +} // namespace nostd +OPENTELEMETRY_END_NAMESPACE diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/.clang-format b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/.clang-format new file mode 100644 index 000000000..001170f7e --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/.clang-format @@ -0,0 +1,3 @@ +# Disable formatting for Google Abseil library snapshot +DisableFormat: true +SortIncludes: false diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/README.md b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/README.md new file mode 100644 index 000000000..5dd661971 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/README.md @@ -0,0 +1,4 @@ +# Notes on Abseil Variant implementation + +This is a snapshot of Abseil Variant +`absl::OTABSL_OPTION_NAMESPACE_NAME::variant` from Abseil `v2020-03-03#8`. diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/attributes.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/attributes.h new file mode 100644 index 000000000..72901a84c --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/attributes.h @@ -0,0 +1,621 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This header file defines macros for declaring attributes for functions, +// types, and variables. +// +// These macros are used within Abseil and allow the compiler to optimize, where +// applicable, certain function calls. +// +// This file is used for both C and C++! +// +// Most macros here are exposing GCC or Clang features, and are stubbed out for +// other compilers. +// +// GCC attributes documentation: +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html +// https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html +// +// Most attributes in this file are already supported by GCC 4.7. However, some +// of them are not supported in older version of Clang. Thus, we check +// `__has_attribute()` first. If the check fails, we check if we are on GCC and +// assume the attribute exists on GCC (which is verified on GCC 4.7). +// +// ----------------------------------------------------------------------------- +// Sanitizer Attributes +// ----------------------------------------------------------------------------- +// +// Sanitizer-related attributes are not "defined" in this file (and indeed +// are not defined as such in any file). To utilize the following +// sanitizer-related attributes within your builds, define the following macros +// within your build using a `-D` flag, along with the given value for +// `-fsanitize`: +// +// * `ADDRESS_SANITIZER` + `-fsanitize=address` (Clang, GCC 4.8) +// * `MEMORY_SANITIZER` + `-fsanitize=memory` (Clang-only) +// * `THREAD_SANITIZER + `-fsanitize=thread` (Clang, GCC 4.8+) +// * `UNDEFINED_BEHAVIOR_SANITIZER` + `-fsanitize=undefined` (Clang, GCC 4.9+) +// * `CONTROL_FLOW_INTEGRITY` + -fsanitize=cfi (Clang-only) +// +// Example: +// +// // Enable branches in the Abseil code that are tagged for ASan: +// $ bazel build --copt=-DADDRESS_SANITIZER --copt=-fsanitize=address +// --linkopt=-fsanitize=address *target* +// +// Since these macro names are only supported by GCC and Clang, we only check +// for `__GNUC__` (GCC or Clang) and the above macros. +#ifndef OTABSL_BASE_ATTRIBUTES_H_ +#define OTABSL_BASE_ATTRIBUTES_H_ + +// OTABSL_HAVE_ATTRIBUTE +// +// A function-like feature checking macro that is a wrapper around +// `__has_attribute`, which is defined by GCC 5+ and Clang and evaluates to a +// nonzero constant integer if the attribute is supported or 0 if not. +// +// It evaluates to zero if `__has_attribute` is not defined by the compiler. +// +// GCC: https://gcc.gnu.org/gcc-5/changes.html +// Clang: https://clang.llvm.org/docs/LanguageExtensions.html +#ifdef __has_attribute +#define OTABSL_HAVE_ATTRIBUTE(x) __has_attribute(x) +#else +#define OTABSL_HAVE_ATTRIBUTE(x) 0 +#endif + +// OTABSL_HAVE_CPP_ATTRIBUTE +// +// A function-like feature checking macro that accepts C++11 style attributes. +// It's a wrapper around `__has_cpp_attribute`, defined by ISO C++ SD-6 +// (https://en.cppreference.com/w/cpp/experimental/feature_test). If we don't +// find `__has_cpp_attribute`, will evaluate to 0. +#if defined(__cplusplus) && defined(__has_cpp_attribute) +// NOTE: requiring __cplusplus above should not be necessary, but +// works around https://bugs.llvm.org/show_bug.cgi?id=23435. +#define OTABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define OTABSL_HAVE_CPP_ATTRIBUTE(x) 0 +#endif + +// ----------------------------------------------------------------------------- +// Function Attributes +// ----------------------------------------------------------------------------- +// +// GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html +// Clang: https://clang.llvm.org/docs/AttributeReference.html + +// OTABSL_PRINTF_ATTRIBUTE +// OTABSL_SCANF_ATTRIBUTE +// +// Tells the compiler to perform `printf` format string checking if the +// compiler supports it; see the 'format' attribute in +// . +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +#if OTABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__)) +#define OTABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__printf__, string_index, first_to_check))) +#define OTABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \ + __attribute__((__format__(__scanf__, string_index, first_to_check))) +#else +#define OTABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) +#define OTABSL_SCANF_ATTRIBUTE(string_index, first_to_check) +#endif + +// OTABSL_ATTRIBUTE_ALWAYS_INLINE +// OTABSL_ATTRIBUTE_NOINLINE +// +// Forces functions to either inline or not inline. Introduced in gcc 3.1. +#if OTABSL_HAVE_ATTRIBUTE(always_inline) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define OTABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) +#define OTABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1 +#else +#define OTABSL_ATTRIBUTE_ALWAYS_INLINE +#endif + +#if OTABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__)) +#define OTABSL_ATTRIBUTE_NOINLINE __attribute__((noinline)) +#define OTABSL_HAVE_ATTRIBUTE_NOINLINE 1 +#else +#define OTABSL_ATTRIBUTE_NOINLINE +#endif + +// OTABSL_ATTRIBUTE_NO_TAIL_CALL +// +// Prevents the compiler from optimizing away stack frames for functions which +// end in a call to another function. +#if OTABSL_HAVE_ATTRIBUTE(disable_tail_calls) +#define OTABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define OTABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls)) +#elif defined(__GNUC__) && !defined(__clang__) +#define OTABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 +#define OTABSL_ATTRIBUTE_NO_TAIL_CALL \ + __attribute__((optimize("no-optimize-sibling-calls"))) +#else +#define OTABSL_ATTRIBUTE_NO_TAIL_CALL +#define OTABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0 +#endif + +// OTABSL_ATTRIBUTE_WEAK +// +// Tags a function as weak for the purposes of compilation and linking. +// Weak attributes currently do not work properly in LLVM's Windows backend, +// so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 +// for further information. +// The MinGW compiler doesn't complain about the weak attribute until the link +// step, presumably because Windows doesn't use ELF binaries. +#if (OTABSL_HAVE_ATTRIBUTE(weak) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !(defined(__llvm__) && defined(_WIN32)) && !defined(__MINGW32__) +#undef OTABSL_ATTRIBUTE_WEAK +#define OTABSL_ATTRIBUTE_WEAK __attribute__((weak)) +#define OTABSL_HAVE_ATTRIBUTE_WEAK 1 +#else +#define OTABSL_ATTRIBUTE_WEAK +#define OTABSL_HAVE_ATTRIBUTE_WEAK 0 +#endif + +// OTABSL_ATTRIBUTE_NONNULL +// +// Tells the compiler either (a) that a particular function parameter +// should be a non-null pointer, or (b) that all pointer arguments should +// be non-null. +// +// Note: As the GCC manual states, "[s]ince non-static C++ methods +// have an implicit 'this' argument, the arguments of such methods +// should be counted from two, not one." +// +// Args are indexed starting at 1. +// +// For non-static class member functions, the implicit `this` argument +// is arg 1, and the first explicit argument is arg 2. For static class member +// functions, there is no implicit `this`, and the first explicit argument is +// arg 1. +// +// Example: +// +// /* arg_a cannot be null, but arg_b can */ +// void Function(void* arg_a, void* arg_b) OTABSL_ATTRIBUTE_NONNULL(1); +// +// class C { +// /* arg_a cannot be null, but arg_b can */ +// void Method(void* arg_a, void* arg_b) OTABSL_ATTRIBUTE_NONNULL(2); +// +// /* arg_a cannot be null, but arg_b can */ +// static void StaticMethod(void* arg_a, void* arg_b) +// OTABSL_ATTRIBUTE_NONNULL(1); +// }; +// +// If no arguments are provided, then all pointer arguments should be non-null. +// +// /* No pointer arguments may be null. */ +// void Function(void* arg_a, void* arg_b, int arg_c) OTABSL_ATTRIBUTE_NONNULL(); +// +// NOTE: The GCC nonnull attribute actually accepts a list of arguments, but +// OTABSL_ATTRIBUTE_NONNULL does not. +#if OTABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__)) +#define OTABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index))) +#else +#define OTABSL_ATTRIBUTE_NONNULL(...) +#endif + +// OTABSL_ATTRIBUTE_NORETURN +// +// Tells the compiler that a given function never returns. +#if OTABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__)) +#define OTABSL_ATTRIBUTE_NORETURN __attribute__((noreturn)) +#elif defined(_MSC_VER) +#define OTABSL_ATTRIBUTE_NORETURN __declspec(noreturn) +#else +#define OTABSL_ATTRIBUTE_NORETURN +#endif + +// OTABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +// +// Tells the AddressSanitizer (or other memory testing tools) to ignore a given +// function. Useful for cases when a function reads random locations on stack, +// calls _exit from a cloned subprocess, deliberately accesses buffer +// out of bounds or does other scary things with memory. +// NOTE: GCC supports AddressSanitizer(asan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if defined(__GNUC__) +#define OTABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) +#else +#define OTABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS +#endif + +// OTABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +// +// Tells the MemorySanitizer to relax the handling of a given function. All +// "Use of uninitialized value" warnings from such functions will be suppressed, +// and all values loaded from memory will be considered fully initialized. +// This attribute is similar to the ADDRESS_SANITIZER attribute above, but deals +// with initialized-ness rather than addressability issues. +// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC. +#if defined(__clang__) +#define OTABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) +#else +#define OTABSL_ATTRIBUTE_NO_SANITIZE_MEMORY +#endif + +// OTABSL_ATTRIBUTE_NO_SANITIZE_THREAD +// +// Tells the ThreadSanitizer to not instrument a given function. +// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8. +// https://gcc.gnu.org/gcc-4.8/changes.html +#if defined(__GNUC__) +#define OTABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) +#else +#define OTABSL_ATTRIBUTE_NO_SANITIZE_THREAD +#endif + +// OTABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +// +// Tells the UndefinedSanitizer to ignore a given function. Useful for cases +// where certain behavior (eg. division by zero) is being used intentionally. +// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9. +// https://gcc.gnu.org/gcc-4.9/changes.html +#if defined(__GNUC__) && \ + (defined(UNDEFINED_BEHAVIOR_SANITIZER) || defined(ADDRESS_SANITIZER)) +#define OTABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ + __attribute__((no_sanitize("undefined"))) +#else +#define OTABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED +#endif + +// OTABSL_ATTRIBUTE_NO_SANITIZE_CFI +// +// Tells the ControlFlowIntegrity sanitizer to not instrument a given function. +// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details. +#if defined(__GNUC__) && defined(CONTROL_FLOW_INTEGRITY) +#define OTABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi"))) +#else +#define OTABSL_ATTRIBUTE_NO_SANITIZE_CFI +#endif + +// OTABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +// +// Tells the SafeStack to not instrument a given function. +// See https://clang.llvm.org/docs/SafeStack.html for details. +#if defined(__GNUC__) && defined(SAFESTACK_SANITIZER) +#define OTABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \ + __attribute__((no_sanitize("safe-stack"))) +#else +#define OTABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK +#endif + +// OTABSL_ATTRIBUTE_RETURNS_NONNULL +// +// Tells the compiler that a particular function never returns a null pointer. +#if OTABSL_HAVE_ATTRIBUTE(returns_nonnull) || \ + (defined(__GNUC__) && \ + (__GNUC__ > 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) && \ + !defined(__clang__)) +#define OTABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull)) +#else +#define OTABSL_ATTRIBUTE_RETURNS_NONNULL +#endif + +// OTABSL_HAVE_ATTRIBUTE_SECTION +// +// Indicates whether labeled sections are supported. Weak symbol support is +// a prerequisite. Labeled sections are not supported on Darwin/iOS. +#ifdef OTABSL_HAVE_ATTRIBUTE_SECTION +#error OTABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set +#elif (OTABSL_HAVE_ATTRIBUTE(section) || \ + (defined(__GNUC__) && !defined(__clang__))) && \ + !defined(__APPLE__) && OTABSL_HAVE_ATTRIBUTE_WEAK +#define OTABSL_HAVE_ATTRIBUTE_SECTION 1 + +// OTABSL_ATTRIBUTE_SECTION +// +// Tells the compiler/linker to put a given function into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. Any function annotated with +// `OTABSL_ATTRIBUTE_SECTION` must not be inlined, or it will be placed into +// whatever section its caller is placed into. +// +#ifndef OTABSL_ATTRIBUTE_SECTION +#define OTABSL_ATTRIBUTE_SECTION(name) \ + __attribute__((section(#name))) __attribute__((noinline)) +#endif + + +// OTABSL_ATTRIBUTE_SECTION_VARIABLE +// +// Tells the compiler/linker to put a given variable into a section and define +// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. +// This functionality is supported by GNU linker. +#ifndef OTABSL_ATTRIBUTE_SECTION_VARIABLE +#define OTABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name))) +#endif + +// OTABSL_DECLARE_ATTRIBUTE_SECTION_VARS +// +// A weak section declaration to be used as a global declaration +// for OTABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link +// even without functions with OTABSL_ATTRIBUTE_SECTION(name). +// OTABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's +// a no-op on ELF but not on Mach-O. +// +#ifndef OTABSL_DECLARE_ATTRIBUTE_SECTION_VARS +#define OTABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ + extern char __start_##name[] OTABSL_ATTRIBUTE_WEAK; \ + extern char __stop_##name[] OTABSL_ATTRIBUTE_WEAK +#endif +#ifndef OTABSL_DEFINE_ATTRIBUTE_SECTION_VARS +#define OTABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define OTABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#endif + +// OTABSL_ATTRIBUTE_SECTION_START +// +// Returns `void*` pointers to start/end of a section of code with +// functions having OTABSL_ATTRIBUTE_SECTION(name). +// Returns 0 if no such functions exist. +// One must OTABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and +// link. +// +#define OTABSL_ATTRIBUTE_SECTION_START(name) \ + (reinterpret_cast(__start_##name)) +#define OTABSL_ATTRIBUTE_SECTION_STOP(name) \ + (reinterpret_cast(__stop_##name)) + +#else // !OTABSL_HAVE_ATTRIBUTE_SECTION + +#define OTABSL_HAVE_ATTRIBUTE_SECTION 0 + +// provide dummy definitions +#define OTABSL_ATTRIBUTE_SECTION(name) +#define OTABSL_ATTRIBUTE_SECTION_VARIABLE(name) +#define OTABSL_INIT_ATTRIBUTE_SECTION_VARS(name) +#define OTABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) +#define OTABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) +#define OTABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast(0)) +#define OTABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast(0)) + +#endif // OTABSL_ATTRIBUTE_SECTION + +// OTABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +// +// Support for aligning the stack on 32-bit x86. +#if OTABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \ + (defined(__GNUC__) && !defined(__clang__)) +#if defined(__i386__) +#define OTABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \ + __attribute__((force_align_arg_pointer)) +#define OTABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#elif defined(__x86_64__) +#define OTABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1) +#define OTABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#else // !__i386__ && !__x86_64 +#define OTABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#define OTABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#endif // __i386__ +#else +#define OTABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC +#define OTABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) +#endif + +// OTABSL_MUST_USE_RESULT +// +// Tells the compiler to warn about unused results. +// +// When annotating a function, it must appear as the first part of the +// declaration or definition. The compiler will warn if the return value from +// such a function is unused: +// +// OTABSL_MUST_USE_RESULT Sprocket* AllocateSprocket(); +// AllocateSprocket(); // Triggers a warning. +// +// When annotating a class, it is equivalent to annotating every function which +// returns an instance. +// +// class OTABSL_MUST_USE_RESULT Sprocket {}; +// Sprocket(); // Triggers a warning. +// +// Sprocket MakeSprocket(); +// MakeSprocket(); // Triggers a warning. +// +// Note that references and pointers are not instances: +// +// Sprocket* SprocketPointer(); +// SprocketPointer(); // Does *not* trigger a warning. +// +// OTABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result +// warning. For that, warn_unused_result is used only for clang but not for gcc. +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 +// +// Note: past advice was to place the macro after the argument list. +#if OTABSL_HAVE_ATTRIBUTE(nodiscard) +#define OTABSL_MUST_USE_RESULT [[nodiscard]] +#elif defined(__clang__) && OTABSL_HAVE_ATTRIBUTE(warn_unused_result) +#define OTABSL_MUST_USE_RESULT __attribute__((warn_unused_result)) +#else +#define OTABSL_MUST_USE_RESULT +#endif + +// OTABSL_ATTRIBUTE_HOT, OTABSL_ATTRIBUTE_COLD +// +// Tells GCC that a function is hot or cold. GCC can use this information to +// improve static analysis, i.e. a conditional branch to a cold function +// is likely to be not-taken. +// This annotation is used for function declarations. +// +// Example: +// +// int foo() OTABSL_ATTRIBUTE_HOT; +#if OTABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__)) +#define OTABSL_ATTRIBUTE_HOT __attribute__((hot)) +#else +#define OTABSL_ATTRIBUTE_HOT +#endif + +#if OTABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__)) +#define OTABSL_ATTRIBUTE_COLD __attribute__((cold)) +#else +#define OTABSL_ATTRIBUTE_COLD +#endif + +// OTABSL_XRAY_ALWAYS_INSTRUMENT, OTABSL_XRAY_NEVER_INSTRUMENT, OTABSL_XRAY_LOG_ARGS +// +// We define the OTABSL_XRAY_ALWAYS_INSTRUMENT and OTABSL_XRAY_NEVER_INSTRUMENT +// macro used as an attribute to mark functions that must always or never be +// instrumented by XRay. Currently, this is only supported in Clang/LLVM. +// +// For reference on the LLVM XRay instrumentation, see +// http://llvm.org/docs/XRay.html. +// +// A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration +// will always get the XRay instrumentation sleds. These sleds may introduce +// some binary size and runtime overhead and must be used sparingly. +// +// These attributes only take effect when the following conditions are met: +// +// * The file/target is built in at least C++11 mode, with a Clang compiler +// that supports XRay attributes. +// * The file/target is built with the -fxray-instrument flag set for the +// Clang/LLVM compiler. +// * The function is defined in the translation unit (the compiler honors the +// attribute in either the definition or the declaration, and must match). +// +// There are cases when, even when building with XRay instrumentation, users +// might want to control specifically which functions are instrumented for a +// particular build using special-case lists provided to the compiler. These +// special case lists are provided to Clang via the +// -fxray-always-instrument=... and -fxray-never-instrument=... flags. The +// attributes in source take precedence over these special-case lists. +// +// To disable the XRay attributes at build-time, users may define +// OTABSL_NO_XRAY_ATTRIBUTES. Do NOT define OTABSL_NO_XRAY_ATTRIBUTES on specific +// packages/targets, as this may lead to conflicting definitions of functions at +// link-time. +// +#if OTABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \ + !defined(OTABSL_NO_XRAY_ATTRIBUTES) +#define OTABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]] +#define OTABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] +#if OTABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) +#define OTABSL_XRAY_LOG_ARGS(N) \ + [[clang::xray_always_instrument, clang::xray_log_args(N)]] +#else +#define OTABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] +#endif +#else +#define OTABSL_XRAY_ALWAYS_INSTRUMENT +#define OTABSL_XRAY_NEVER_INSTRUMENT +#define OTABSL_XRAY_LOG_ARGS(N) +#endif + +// OTABSL_ATTRIBUTE_REINITIALIZES +// +// Indicates that a member function reinitializes the entire object to a known +// state, independent of the previous state of the object. +// +// The clang-tidy check bugprone-use-after-move allows member functions marked +// with this attribute to be called on objects that have been moved from; +// without the attribute, this would result in a use-after-move warning. +#if OTABSL_HAVE_CPP_ATTRIBUTE(clang::reinitializes) +#define OTABSL_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]] +#else +#define OTABSL_ATTRIBUTE_REINITIALIZES +#endif + +// ----------------------------------------------------------------------------- +// Variable Attributes +// ----------------------------------------------------------------------------- + +// OTABSL_ATTRIBUTE_UNUSED +// +// Prevents the compiler from complaining about variables that appear unused. +#if OTABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) +#undef OTABSL_ATTRIBUTE_UNUSED +#define OTABSL_ATTRIBUTE_UNUSED __attribute__((__unused__)) +#else +#define OTABSL_ATTRIBUTE_UNUSED +#endif + +// OTABSL_ATTRIBUTE_INITIAL_EXEC +// +// Tells the compiler to use "initial-exec" mode for a thread-local variable. +// See http://people.redhat.com/drepper/tls.pdf for the gory details. +#if OTABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__)) +#define OTABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec"))) +#else +#define OTABSL_ATTRIBUTE_INITIAL_EXEC +#endif + +// OTABSL_ATTRIBUTE_PACKED +// +// Instructs the compiler not to use natural alignment for a tagged data +// structure, but instead to reduce its alignment to 1. This attribute can +// either be applied to members of a structure or to a structure in its +// entirety. Applying this attribute (judiciously) to a structure in its +// entirety to optimize the memory footprint of very commonly-used structs is +// fine. Do not apply this attribute to a structure in its entirety if the +// purpose is to control the offsets of the members in the structure. Instead, +// apply this attribute only to structure members that need it. +// +// When applying OTABSL_ATTRIBUTE_PACKED only to specific structure members the +// natural alignment of structure members not annotated is preserved. Aligned +// member accesses are faster than non-aligned member accesses even if the +// targeted microprocessor supports non-aligned accesses. +#if OTABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__)) +#define OTABSL_ATTRIBUTE_PACKED __attribute__((__packed__)) +#else +#define OTABSL_ATTRIBUTE_PACKED +#endif + +// OTABSL_ATTRIBUTE_FUNC_ALIGN +// +// Tells the compiler to align the function start at least to certain +// alignment boundary +#if OTABSL_HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) +#define OTABSL_ATTRIBUTE_FUNC_ALIGN(bytes) __attribute__((aligned(bytes))) +#else +#define OTABSL_ATTRIBUTE_FUNC_ALIGN(bytes) +#endif + +// OTABSL_CONST_INIT +// +// A variable declaration annotated with the `OTABSL_CONST_INIT` attribute will +// not compile (on supported platforms) unless the variable has a constant +// initializer. This is useful for variables with static and thread storage +// duration, because it guarantees that they will not suffer from the so-called +// "static init order fiasco". Prefer to put this attribute on the most visible +// declaration of the variable, if there's more than one, because code that +// accesses the variable can then use the attribute for optimization. +// +// Example: +// +// class MyClass { +// public: +// OTABSL_CONST_INIT static MyType my_var; +// }; +// +// MyType MyClass::my_var = MakeMyType(...); +// +// Note that this attribute is redundant if the variable is declared constexpr. +#if OTABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) +#define OTABSL_CONST_INIT [[clang::require_constant_initialization]] +#else +#define OTABSL_CONST_INIT +#endif // OTABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) + +#endif // OTABSL_BASE_ATTRIBUTES_H_ diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/config.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/config.h new file mode 100644 index 000000000..e0836b9b3 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/config.h @@ -0,0 +1,671 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: config.h +// ----------------------------------------------------------------------------- +// +// This header file defines a set of macros for checking the presence of +// important compiler and platform features. Such macros can be used to +// produce portable code by parameterizing compilation based on the presence or +// lack of a given feature. +// +// We define a "feature" as some interface we wish to program to: for example, +// a library function or system call. A value of `1` indicates support for +// that feature; any other value indicates the feature support is undefined. +// +// Example: +// +// Suppose a programmer wants to write a program that uses the 'mmap()' system +// call. The Abseil macro for that feature (`OTABSL_HAVE_MMAP`) allows you to +// selectively include the `mmap.h` header and bracket code using that feature +// in the macro: +// +// #include "absl/base/config.h" +// +// #ifdef OTABSL_HAVE_MMAP +// #include "sys/mman.h" +// #endif //OTABSL_HAVE_MMAP +// +// ... +// #ifdef OTABSL_HAVE_MMAP +// void *ptr = mmap(...); +// ... +// #endif // OTABSL_HAVE_MMAP + +#ifndef OTABSL_BASE_CONFIG_H_ +#define OTABSL_BASE_CONFIG_H_ + +// Included for the __GLIBC__ macro (or similar macros on other systems). +#include + +#ifdef __cplusplus +// Included for __GLIBCXX__, _LIBCPP_VERSION +#include +#endif // __cplusplus + +#if defined(__APPLE__) +// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED, +// __IPHONE_8_0. +#include +#include +#endif + +#include "options.h" +#include "policy_checks.h" + +// Helper macro to convert a CPP variable to a string literal. +#define OTABSL_INTERNAL_DO_TOKEN_STR(x) #x +#define OTABSL_INTERNAL_TOKEN_STR(x) OTABSL_INTERNAL_DO_TOKEN_STR(x) + +// ----------------------------------------------------------------------------- +// Abseil namespace annotations +// ----------------------------------------------------------------------------- + +// OTABSL_NAMESPACE_BEGIN/OTABSL_NAMESPACE_END +// +// An annotation placed at the beginning/end of each `namespace absl` scope. +// This is used to inject an inline namespace. +// +// The proper way to write Abseil code in the `absl` namespace is: +// +// namespace absl { +// OTABSL_NAMESPACE_BEGIN +// +// void Foo(); // absl::OTABSL_OPTION_NAMESPACE_NAME::Foo(). +// +// OTABSL_NAMESPACE_END +// } // namespace absl +// +// Users of Abseil should not use these macros, because users of Abseil should +// not write `namespace absl {` in their own code for any reason. (Abseil does +// not support forward declarations of its own types, nor does it support +// user-provided specialization of Abseil templates. Code that violates these +// rules may be broken without warning.) +#if !defined(OTABSL_OPTION_NAMESPACE_NAME) +#error options.h is misconfigured. +#endif + +// Check that OTABSL_OPTION_NAMESPACE_NAME is neither "head" nor "" +#if defined(__cplusplus) + +#define OTABSL_INTERNAL_INLINE_NAMESPACE_STR \ + OTABSL_INTERNAL_TOKEN_STR(OTABSL_OPTION_NAMESPACE_NAME) + +static_assert(OTABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0', + "options.h misconfigured: OTABSL_OPTION_NAMESPACE_NAME must " + "not be empty."); +static_assert(OTABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || + OTABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' || + OTABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' || + OTABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' || + OTABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0', + "options.h misconfigured: OTABSL_OPTION_NAMESPACE_NAME must " + "be changed to a new, unique identifier name."); + +#endif + + +#define OTABSL_NAMESPACE_BEGIN namespace OTABSL_OPTION_NAMESPACE_NAME { +#define OTABSL_NAMESPACE_END } + +// ----------------------------------------------------------------------------- +// Compiler Feature Checks +// ----------------------------------------------------------------------------- + +// OTABSL_HAVE_BUILTIN() +// +// Checks whether the compiler supports a Clang Feature Checking Macro, and if +// so, checks whether it supports the provided builtin function "x" where x +// is one of the functions noted in +// https://clang.llvm.org/docs/LanguageExtensions.html +// +// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check. +// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html +#ifdef __has_builtin +#define OTABSL_HAVE_BUILTIN(x) __has_builtin(x) +#else +#define OTABSL_HAVE_BUILTIN(x) 0 +#endif + +#if defined(__is_identifier) +#define OTABSL_INTERNAL_HAS_KEYWORD(x) !(__is_identifier(x)) +#else +#define OTABSL_INTERNAL_HAS_KEYWORD(x) 0 +#endif + +// OTABSL_HAVE_TLS is defined to 1 when __thread should be supported. +// We assume __thread is supported on Linux when compiled with Clang or compiled +// against libstdc++ with _GLIBCXX_HAVE_TLS defined. +#ifdef OTABSL_HAVE_TLS +#error OTABSL_HAVE_TLS cannot be directly set +#elif defined(__linux__) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) +#define OTABSL_HAVE_TLS 1 +#endif + +// OTABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +// +// Checks whether `std::is_trivially_destructible` is supported. +// +// Notes: All supported compilers using libc++ support this feature, as does +// gcc >= 4.8.1 using libstdc++, and Visual Studio. +#ifdef OTABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE +#error OTABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set +#elif defined(_LIBCPP_VERSION) || \ + (defined(__clang__) && __clang_major__ >= 15) || \ + (!defined(__clang__) && defined(__GNUC__) && defined(__GLIBCXX__) && \ + (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) || \ + defined(_MSC_VER) +#define OTABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1 +#endif + +// OTABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE +// +// Checks whether `std::is_trivially_default_constructible` and +// `std::is_trivially_copy_constructible` are supported. + +// OTABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE +// +// Checks whether `std::is_trivially_copy_assignable` is supported. + +// Notes: Clang with libc++ supports these features, as does gcc >= 5.1 with +// either libc++ or libstdc++, and Visual Studio (but not NVCC). +#if defined(OTABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) +#error OTABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set +#elif defined(OTABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE) +#error OTABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set +#elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ + (defined(__clang__) && __clang_major__ >= 15) || \ + (!defined(__clang__) && defined(__GNUC__) && \ + (__GNUC__ > 7 || (__GNUC__ == 7 && __GNUC_MINOR__ >= 4)) && \ + (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__))) || \ + (defined(_MSC_VER) && !defined(__NVCC__)) +#define OTABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 +#define OTABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1 +#endif + +// OTABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE +// +// Checks whether `std::is_trivially_copyable` is supported. +// +// Notes: Clang 15+ with libc++ supports these features, GCC hasn't been tested. +#if defined(OTABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE) +#error OTABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE cannot be directly set +#elif defined(__clang__) && (__clang_major__ >= 15) +#define OTABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1 +#endif + +// OTABSL_HAVE_SOURCE_LOCATION_CURRENT +// +// Indicates whether `absl::OTABSL_OPTION_NAMESPACE_NAME::SourceLocation::current()` will return useful +// information in some contexts. +#ifndef OTABSL_HAVE_SOURCE_LOCATION_CURRENT +#if OTABSL_INTERNAL_HAS_KEYWORD(__builtin_LINE) && \ + OTABSL_INTERNAL_HAS_KEYWORD(__builtin_FILE) +#define OTABSL_HAVE_SOURCE_LOCATION_CURRENT 1 +#endif +#endif + +// OTABSL_HAVE_THREAD_LOCAL +// +// Checks whether C++11's `thread_local` storage duration specifier is +// supported. +#ifdef OTABSL_HAVE_THREAD_LOCAL +#error OTABSL_HAVE_THREAD_LOCAL cannot be directly set +#elif defined(__APPLE__) +// Notes: +// * Xcode's clang did not support `thread_local` until version 8, and +// even then not for all iOS < 9.0. +// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator +// targeting iOS 9.x. +// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time +// making __has_feature unreliable there. +// +// Otherwise, `__has_feature` is only supported by Clang so it has be inside +// `defined(__APPLE__)` check. +#if __has_feature(cxx_thread_local) && \ + !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0) +#define OTABSL_HAVE_THREAD_LOCAL 1 +#endif +#else // !defined(__APPLE__) +#define OTABSL_HAVE_THREAD_LOCAL 1 +#endif + +// There are platforms for which TLS should not be used even though the compiler +// makes it seem like it's supported (Android NDK < r12b for example). +// This is primarily because of linker problems and toolchain misconfiguration: +// Abseil does not intend to support this indefinitely. Currently, the newest +// toolchain that we intend to support that requires this behavior is the +// r11 NDK - allowing for a 5 year support window on that means this option +// is likely to be removed around June of 2021. +// TLS isn't supported until NDK r12b per +// https://developer.android.com/ndk/downloads/revision_history.html +// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in +// . For NDK < r16, users should define these macros, +// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. +#if defined(__ANDROID__) && defined(__clang__) +#if __has_include() +#include +#endif // __has_include() +#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \ + defined(__NDK_MINOR__) && \ + ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1))) +#undef OTABSL_HAVE_TLS +#undef OTABSL_HAVE_THREAD_LOCAL +#endif +#endif // defined(__ANDROID__) && defined(__clang__) + +// Emscripten doesn't yet support `thread_local` or `__thread`. +// https://github.com/emscripten-core/emscripten/issues/3502 +#if defined(__EMSCRIPTEN__) +#undef OTABSL_HAVE_TLS +#undef OTABSL_HAVE_THREAD_LOCAL +#endif // defined(__EMSCRIPTEN__) + +// OTABSL_HAVE_INTRINSIC_INT128 +// +// Checks whether the __int128 compiler extension for a 128-bit integral type is +// supported. +// +// Note: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is +// supported, but we avoid using it in certain cases: +// * On Clang: +// * Building using Clang for Windows, where the Clang runtime library has +// 128-bit support only on LP64 architectures, but Windows is LLP64. +// * On Nvidia's nvcc: +// * nvcc also defines __GNUC__ and __SIZEOF_INT128__, but not all versions +// actually support __int128. +#ifdef OTABSL_HAVE_INTRINSIC_INT128 +#error OTABSL_HAVE_INTRINSIC_INT128 cannot be directly set +#elif defined(__SIZEOF_INT128__) +#if (defined(__clang__) && !defined(_WIN32)) || \ + (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \ + (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__)) +#define OTABSL_HAVE_INTRINSIC_INT128 1 +#elif defined(__CUDACC__) +// __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a +// string explaining that it has been removed starting with CUDA 9. We use +// nested #ifs because there is no short-circuiting in the preprocessor. +// NOTE: `__CUDACC__` could be undefined while `__CUDACC_VER__` is defined. +#if __CUDACC_VER__ >= 70000 +#define OTABSL_HAVE_INTRINSIC_INT128 1 +#endif // __CUDACC_VER__ >= 70000 +#endif // defined(__CUDACC__) +#endif // OTABSL_HAVE_INTRINSIC_INT128 + +// OTABSL_HAVE_EXCEPTIONS +// +// Checks whether the compiler both supports and enables exceptions. Many +// compilers support a "no exceptions" mode that disables exceptions. +// +// Generally, when OTABSL_HAVE_EXCEPTIONS is not defined: +// +// * Code using `throw` and `try` may not compile. +// * The `noexcept` specifier will still compile and behave as normal. +// * The `noexcept` operator may still return `false`. +// +// For further details, consult the compiler's documentation. +#ifdef OTABSL_HAVE_EXCEPTIONS +#error OTABSL_HAVE_EXCEPTIONS cannot be directly set. + +#elif defined(__clang__) + +#if __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6) +// Clang >= 3.6 +#if __has_feature(cxx_exceptions) +#define OTABSL_HAVE_EXCEPTIONS 1 +#endif // __has_feature(cxx_exceptions) +#else +// Clang < 3.6 +// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro +#if defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) +#define OTABSL_HAVE_EXCEPTIONS 1 +#endif // defined(__EXCEPTIONS) && __has_feature(cxx_exceptions) +#endif // __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 6) + +// Handle remaining special cases and default to exceptions being supported. +#elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ + !(defined(__GNUC__) && (__GNUC__ >= 5) && !defined(__cpp_exceptions)) && \ + !(defined(_MSC_VER) && !defined(_CPPUNWIND)) +#define OTABSL_HAVE_EXCEPTIONS 1 +#endif + +// ----------------------------------------------------------------------------- +// Platform Feature Checks +// ----------------------------------------------------------------------------- + +// Currently supported operating systems and associated preprocessor +// symbols: +// +// Linux and Linux-derived __linux__ +// Android __ANDROID__ (implies __linux__) +// Linux (non-Android) __linux__ && !__ANDROID__ +// Darwin (macOS and iOS) __APPLE__ +// Akaros (http://akaros.org) __ros__ +// Windows _WIN32 +// NaCL __native_client__ +// AsmJS __asmjs__ +// WebAssembly __wasm__ +// Fuchsia __Fuchsia__ +// +// Note that since Android defines both __ANDROID__ and __linux__, one +// may probe for either Linux or Android by simply testing for __linux__. + +// OTABSL_HAVE_MMAP +// +// Checks whether the platform has an mmap(2) implementation as defined in +// POSIX.1-2001. +#ifdef OTABSL_HAVE_MMAP +#error OTABSL_HAVE_MMAP cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__ros__) || defined(__native_client__) || defined(__asmjs__) || \ + defined(__wasm__) || defined(__Fuchsia__) || defined(__sun) || \ + defined(__ASYLO__) +#define OTABSL_HAVE_MMAP 1 +#endif + +// OTABSL_HAVE_PTHREAD_GETSCHEDPARAM +// +// Checks whether the platform implements the pthread_(get|set)schedparam(3) +// functions as defined in POSIX.1-2001. +#ifdef OTABSL_HAVE_PTHREAD_GETSCHEDPARAM +#error OTABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set +#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ + defined(__ros__) +#define OTABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 +#endif + +// OTABSL_HAVE_SCHED_YIELD +// +// Checks whether the platform implements sched_yield(2) as defined in +// POSIX.1-2001. +#ifdef OTABSL_HAVE_SCHED_YIELD +#error OTABSL_HAVE_SCHED_YIELD cannot be directly set +#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) +#define OTABSL_HAVE_SCHED_YIELD 1 +#endif + +// OTABSL_HAVE_SEMAPHORE_H +// +// Checks whether the platform supports the header and sem_init(3) +// family of functions as standardized in POSIX.1-2001. +// +// Note: While Apple provides for both iOS and macOS, it is +// explicitly deprecated and will cause build failures if enabled for those +// platforms. We side-step the issue by not defining it here for Apple +// platforms. +#ifdef OTABSL_HAVE_SEMAPHORE_H +#error OTABSL_HAVE_SEMAPHORE_H cannot be directly set +#elif defined(__linux__) || defined(__ros__) +#define OTABSL_HAVE_SEMAPHORE_H 1 +#endif + +// OTABSL_HAVE_ALARM +// +// Checks whether the platform supports the header and alarm(2) +// function as standardized in POSIX.1-2001. +#ifdef OTABSL_HAVE_ALARM +#error OTABSL_HAVE_ALARM cannot be directly set +#elif defined(__GOOGLE_GRTE_VERSION__) +// feature tests for Google's GRTE +#define OTABSL_HAVE_ALARM 1 +#elif defined(__GLIBC__) +// feature test for glibc +#define OTABSL_HAVE_ALARM 1 +#elif defined(_MSC_VER) +// feature tests for Microsoft's library +#elif defined(__MINGW32__) +// mingw32 doesn't provide alarm(2): +// https://osdn.net/projects/mingw/scm/git/mingw-org-wsl/blobs/5.2-trunk/mingwrt/include/unistd.h +// mingw-w64 provides a no-op implementation: +// https://sourceforge.net/p/mingw-w64/mingw-w64/ci/master/tree/mingw-w64-crt/misc/alarm.c +#elif defined(__EMSCRIPTEN__) +// emscripten doesn't support signals +#elif defined(__Fuchsia__) +// Signals don't exist on fuchsia. +#elif defined(__native_client__) +#else +// other standard libraries +#define OTABSL_HAVE_ALARM 1 +#endif + +// OTABSL_IS_LITTLE_ENDIAN +// OTABSL_IS_BIG_ENDIAN +// +// Checks the endianness of the platform. +// +// Notes: uses the built in endian macros provided by GCC (since 4.6) and +// Clang (since 3.2); see +// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html. +// Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error. +#if defined(OTABSL_IS_BIG_ENDIAN) +#error "OTABSL_IS_BIG_ENDIAN cannot be directly set." +#endif +#if defined(OTABSL_IS_LITTLE_ENDIAN) +#error "OTABSL_IS_LITTLE_ENDIAN cannot be directly set." +#endif + +#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +#define OTABSL_IS_LITTLE_ENDIAN 1 +#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ + __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#define OTABSL_IS_BIG_ENDIAN 1 +#elif defined(_WIN32) +#define OTABSL_IS_LITTLE_ENDIAN 1 +#else +#error "absl endian detection needs to be set up for your compiler" +#endif + +// macOS 10.13 and iOS 10.11 don't let you use , , or +// even though the headers exist and are publicly noted to work. See +// https://github.com/abseil/abseil-cpp/issues/207 and +// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes +// libc++ spells out the availability requirements in the file +// llvm-project/libcxx/include/__config via the #define +// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. +#if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ + ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 120000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ + __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 50000)) +#define OTABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 +#else +#define OTABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0 +#endif + +// OTABSL_HAVE_STD_ANY +// +// Checks whether C++17 std::any is available by checking whether exists. +#ifdef OTABSL_HAVE_STD_ANY +#error "OTABSL_HAVE_STD_ANY cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L && \ + !OTABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define OTABSL_HAVE_STD_ANY 1 +#endif +#endif + +// OTABSL_HAVE_STD_OPTIONAL +// +// Checks whether C++17 std::optional is available. +#ifdef OTABSL_HAVE_STD_OPTIONAL +#error "OTABSL_HAVE_STD_OPTIONAL cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L && \ + !OTABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define OTABSL_HAVE_STD_OPTIONAL 1 +#endif +#endif + +// OTABSL_HAVE_STD_VARIANT +// +// Checks whether C++17 std::variant is available. +#ifdef OTABSL_HAVE_STD_VARIANT +#error "OTABSL_HAVE_STD_VARIANT cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L && \ + !OTABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE +#define OTABSL_HAVE_STD_VARIANT 1 +#endif +#endif + +// OTABSL_HAVE_STD_STRING_VIEW +// +// Checks whether C++17 std::string_view is available. +#ifdef OTABSL_HAVE_STD_STRING_VIEW +#error "OTABSL_HAVE_STD_STRING_VIEW cannot be directly set." +#endif + +#ifdef __has_include +#if __has_include() && __cplusplus >= 201703L +#define OTABSL_HAVE_STD_STRING_VIEW 1 +#endif +#endif + +// For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than +// the support for , , , . So we use +// _MSC_VER to check whether we have VS 2017 RTM (when , , +// , is implemented) or higher. Also, `__cplusplus` is +// not correctly set by MSVC, so we use `_MSVC_LANG` to check the language +// version. +// TODO(zhangxy): fix tests before enabling aliasing for `std::any`. +#if defined(_MSC_VER) && _MSC_VER >= 1910 && \ + ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || __cplusplus > 201402) +// #define OTABSL_HAVE_STD_ANY 1 +#define OTABSL_HAVE_STD_OPTIONAL 1 +#define OTABSL_HAVE_STD_VARIANT 1 +#define OTABSL_HAVE_STD_STRING_VIEW 1 +#endif + +// OTABSL_USES_STD_ANY +// +// Indicates whether absl::OTABSL_OPTION_NAMESPACE_NAME::any is an alias for std::any. +#if !defined(OTABSL_OPTION_USE_STD_ANY) +#error options.h is misconfigured. +#elif OTABSL_OPTION_USE_STD_ANY == 0 || \ + (OTABSL_OPTION_USE_STD_ANY == 2 && !defined(OTABSL_HAVE_STD_ANY)) +#undef OTABSL_USES_STD_ANY +#elif OTABSL_OPTION_USE_STD_ANY == 1 || \ + (OTABSL_OPTION_USE_STD_ANY == 2 && defined(OTABSL_HAVE_STD_ANY)) +#define OTABSL_USES_STD_ANY 1 +#else +#error options.h is misconfigured. +#endif + +// OTABSL_USES_STD_OPTIONAL +// +// Indicates whether absl::OTABSL_OPTION_NAMESPACE_NAME::optional is an alias for std::optional. +#if !defined(OTABSL_OPTION_USE_STD_OPTIONAL) +#error options.h is misconfigured. +#elif OTABSL_OPTION_USE_STD_OPTIONAL == 0 || \ + (OTABSL_OPTION_USE_STD_OPTIONAL == 2 && !defined(OTABSL_HAVE_STD_OPTIONAL)) +#undef OTABSL_USES_STD_OPTIONAL +#elif OTABSL_OPTION_USE_STD_OPTIONAL == 1 || \ + (OTABSL_OPTION_USE_STD_OPTIONAL == 2 && defined(OTABSL_HAVE_STD_OPTIONAL)) +#define OTABSL_USES_STD_OPTIONAL 1 +#else +#error options.h is misconfigured. +#endif + +// OTABSL_USES_STD_VARIANT +// +// Indicates whether absl::OTABSL_OPTION_NAMESPACE_NAME::variant is an alias for std::variant. +#if !defined(OTABSL_OPTION_USE_STD_VARIANT) +#error options.h is misconfigured. +#elif OTABSL_OPTION_USE_STD_VARIANT == 0 || \ + (OTABSL_OPTION_USE_STD_VARIANT == 2 && !defined(OTABSL_HAVE_STD_VARIANT)) +#undef OTABSL_USES_STD_VARIANT +#elif OTABSL_OPTION_USE_STD_VARIANT == 1 || \ + (OTABSL_OPTION_USE_STD_VARIANT == 2 && defined(OTABSL_HAVE_STD_VARIANT)) +#define OTABSL_USES_STD_VARIANT 1 +#else +#error options.h is misconfigured. +#endif + +// OTABSL_USES_STD_STRING_VIEW +// +// Indicates whether absl::OTABSL_OPTION_NAMESPACE_NAME::string_view is an alias for std::string_view. +#if !defined(OTABSL_OPTION_USE_STD_STRING_VIEW) +#error options.h is misconfigured. +#elif OTABSL_OPTION_USE_STD_STRING_VIEW == 0 || \ + (OTABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + !defined(OTABSL_HAVE_STD_STRING_VIEW)) +#undef OTABSL_USES_STD_STRING_VIEW +#elif OTABSL_OPTION_USE_STD_STRING_VIEW == 1 || \ + (OTABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ + defined(OTABSL_HAVE_STD_STRING_VIEW)) +#define OTABSL_USES_STD_STRING_VIEW 1 +#else +#error options.h is misconfigured. +#endif + +// In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION +// SEH exception from emplace for variant when constructing the +// struct can throw. This defeats some of variant_test and +// variant_exception_safety_test. +#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG) +#define OTABSL_INTERNAL_MSVC_2017_DBG_MODE +#endif + +// OTABSL_INTERNAL_MANGLED_NS +// OTABSL_INTERNAL_MANGLED_BACKREFERENCE +// +// Internal macros for building up mangled names in our internal fork of CCTZ. +// This implementation detail is only needed and provided for the MSVC build. +// +// These macros both expand to string literals. OTABSL_INTERNAL_MANGLED_NS is +// the mangled spelling of the `absl` namespace, and +// OTABSL_INTERNAL_MANGLED_BACKREFERENCE is a back-reference integer representing +// the proper count to skip past the CCTZ fork namespace names. (This number +// is one larger when there is an inline namespace name to skip.) +#if defined(_MSC_VER) +#define OTABSL_INTERNAL_MANGLED_NS \ + OTABSL_INTERNAL_TOKEN_STR(OTABSL_OPTION_NAMESPACE_NAME) "@absl" +#define OTABSL_INTERNAL_MANGLED_BACKREFERENCE "6" +#endif + +#undef OTABSL_INTERNAL_HAS_KEYWORD + +// OTABSL_DLL +// +// When building Abseil as a DLL, this macro expands to `__declspec(dllexport)` +// so we can annotate symbols appropriately as being exported. When used in +// headers consuming a DLL, this macro expands to `__declspec(dllimport)` so +// that consumers know the symbol is defined inside the DLL. In all other cases, +// the macro expands to nothing. +#if defined(_MSC_VER) +#if defined(OTABSL_BUILD_DLL) +#define OTABSL_DLL __declspec(dllexport) +#elif 1 +#define OTABSL_DLL __declspec(dllimport) +#else +#define OTABSL_DLL +#endif +#else +#define OTABSL_DLL +#endif // defined(_MSC_VER) + +#endif // OTABSL_BASE_CONFIG_H_ diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/internal/identity.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/internal/identity.h new file mode 100644 index 000000000..4afba3179 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/internal/identity.h @@ -0,0 +1,37 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef OTABSL_BASE_INTERNAL_IDENTITY_H_ +#define OTABSL_BASE_INTERNAL_IDENTITY_H_ + +#include "../config.h" + +namespace absl { +OTABSL_NAMESPACE_BEGIN +namespace internal { + +template +struct identity { + typedef T type; +}; + +template +using identity_t = typename identity::type; + +} // namespace internal +OTABSL_NAMESPACE_END +} // namespace absl + +#endif // OTABSL_BASE_INTERNAL_IDENTITY_H_ diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/internal/inline_variable.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/internal/inline_variable.h new file mode 100644 index 000000000..dd66e9f22 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/internal/inline_variable.h @@ -0,0 +1,107 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef OTABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ +#define OTABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ + +#include + +#include "identity.h" + +// File: +// This file define a macro that allows the creation of or emulation of C++17 +// inline variables based on whether or not the feature is supported. + +//////////////////////////////////////////////////////////////////////////////// +// Macro: OTABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) +// +// Description: +// Expands to the equivalent of an inline constexpr instance of the specified +// `type` and `name`, initialized to the value `init`. If the compiler being +// used is detected as supporting actual inline variables as a language +// feature, then the macro expands to an actual inline variable definition. +// +// Requires: +// `type` is a type that is usable in an extern variable declaration. +// +// Requires: `name` is a valid identifier +// +// Requires: +// `init` is an expression that can be used in the following definition: +// constexpr type name = init; +// +// Usage: +// +// // Equivalent to: `inline constexpr size_t variant_npos = -1;` +// OTABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1); +// +// Differences in implementation: +// For a direct, language-level inline variable, decltype(name) will be the +// type that was specified along with const qualification, whereas for +// emulated inline variables, decltype(name) may be different (in practice +// it will likely be a reference type). +//////////////////////////////////////////////////////////////////////////////// + +#ifdef __cpp_inline_variables + +// Clang's -Wmissing-variable-declarations option erroneously warned that +// inline constexpr objects need to be pre-declared. This has now been fixed, +// but we will need to support this workaround for people building with older +// versions of clang. +// +// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862 +// +// Note: +// identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#if defined(__clang__) +#define OTABSL_INTERNAL_EXTERN_DECL(type, name) \ + extern const ::absl::OTABSL_OPTION_NAMESPACE_NAME::internal::identity_t name; +#else // Otherwise, just define the macro to do nothing. +#define OTABSL_INTERNAL_EXTERN_DECL(type, name) +#endif // defined(__clang__) + +// See above comment at top of file for details. +#define OTABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \ + OTABSL_INTERNAL_EXTERN_DECL(type, name) \ + inline constexpr ::absl::OTABSL_OPTION_NAMESPACE_NAME::internal::identity_t name = init + +#else + +// See above comment at top of file for details. +// +// Note: +// identity_t is used here so that the const and name are in the +// appropriate place for pointer types, reference types, function pointer +// types, etc.. +#define OTABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \ + template \ + struct AbslInternalInlineVariableHolder##name { \ + static constexpr ::absl::OTABSL_OPTION_NAMESPACE_NAME::internal::identity_t kInstance = init; \ + }; \ + \ + template \ + constexpr ::absl::OTABSL_OPTION_NAMESPACE_NAME::internal::identity_t \ + AbslInternalInlineVariableHolder##name::kInstance; \ + \ + static constexpr const ::absl::OTABSL_OPTION_NAMESPACE_NAME::internal::identity_t& \ + name = /* NOLINT */ \ + AbslInternalInlineVariableHolder##name<>::kInstance; \ + static_assert(sizeof(void (*)(decltype(name))) != 0, \ + "Silence unused variable warnings.") + +#endif // __cpp_inline_variables + +#endif // OTABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/internal/invoke.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/internal/invoke.h new file mode 100644 index 000000000..c37f43cfc --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/internal/invoke.h @@ -0,0 +1,188 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// absl::OTABSL_OPTION_NAMESPACE_NAME::base_internal::Invoke(f, args...) is an implementation of +// INVOKE(f, args...) from section [func.require] of the C++ standard. +// +// [func.require] +// Define INVOKE (f, t1, t2, ..., tN) as follows: +// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T; +// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item; +// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T; +// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item; +// 5. f(t1, t2, ..., tN) in all other cases. +// +// The implementation is SFINAE-friendly: substitution failure within Invoke() +// isn't an error. + +#ifndef OTABSL_BASE_INTERNAL_INVOKE_H_ +#define OTABSL_BASE_INTERNAL_INVOKE_H_ + +#include +#include +#include + +#include "../../meta/type_traits.h" + +// The following code is internal implementation detail. See the comment at the +// top of this file for the API documentation. + +namespace absl { +OTABSL_NAMESPACE_BEGIN +namespace base_internal { + +// The five classes below each implement one of the clauses from the definition +// of INVOKE. The inner class template Accept checks whether the +// clause is applicable; static function template Invoke(f, args...) does the +// invocation. +// +// By separating the clause selection logic from invocation we make sure that +// Invoke() does exactly what the standard says. + +template +struct StrippedAccept { + template + struct Accept : Derived::template AcceptImpl::type>::type...> {}; +}; + +// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T +// and t1 is an object of type T or a reference to an object of type T or a +// reference to an object of a type derived from T. +struct MemFunAndRef : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + absl::OTABSL_OPTION_NAMESPACE_NAME::is_function::value> { + }; + + template + static decltype((std::declval().* + std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) { + return (std::forward(obj).* + std::forward(mem_fun))(std::forward(args)...); + } +}; + +// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a +// class T and t1 is not one of the types described in the previous item. +struct MemFunAndPtr : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + absl::OTABSL_OPTION_NAMESPACE_NAME::is_function::value> { + }; + + template + static decltype(((*std::declval()).* + std::declval())(std::declval()...)) + Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) { + return ((*std::forward(ptr)).* + std::forward(mem_fun))(std::forward(args)...); + } +}; + +// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is +// an object of type T or a reference to an object of type T or a reference +// to an object of a type derived from T. +struct DataMemAndRef : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + !absl::OTABSL_OPTION_NAMESPACE_NAME::is_function::value> {}; + + template + static decltype(std::declval().*std::declval()) Invoke( + DataMem&& data_mem, Ref&& ref) { + return std::forward(ref).*std::forward(data_mem); + } +}; + +// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 +// is not one of the types described in the previous item. +struct DataMemAndPtr : StrippedAccept { + template + struct AcceptImpl : std::false_type {}; + + template + struct AcceptImpl + : std::integral_constant::value && + !absl::OTABSL_OPTION_NAMESPACE_NAME::is_function::value> {}; + + template + static decltype((*std::declval()).*std::declval()) Invoke( + DataMem&& data_mem, Ptr&& ptr) { + return (*std::forward(ptr)).*std::forward(data_mem); + } +}; + +// f(t1, t2, ..., tN) in all other cases. +struct Callable { + // Callable doesn't have Accept because it's the last clause that gets picked + // when none of the previous clauses are applicable. + template + static decltype(std::declval()(std::declval()...)) Invoke( + F&& f, Args&&... args) { + return std::forward(f)(std::forward(args)...); + } +}; + +// Resolves to the first matching clause. +template +struct Invoker { + typedef typename std::conditional< + MemFunAndRef::Accept::value, MemFunAndRef, + typename std::conditional< + MemFunAndPtr::Accept::value, MemFunAndPtr, + typename std::conditional< + DataMemAndRef::Accept::value, DataMemAndRef, + typename std::conditional::value, + DataMemAndPtr, Callable>::type>::type>:: + type>::type type; +}; + +// The result type of Invoke. +template +using InvokeT = decltype(Invoker::type::Invoke( + std::declval(), std::declval()...)); + +// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section +// [func.require] of the C++ standard. +template +InvokeT Invoke(F&& f, Args&&... args) { + return Invoker::type::Invoke(std::forward(f), + std::forward(args)...); +} + +} // namespace base_internal +OTABSL_NAMESPACE_END +} // namespace absl + +#endif // OTABSL_BASE_INTERNAL_INVOKE_H_ diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/macros.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/macros.h new file mode 100644 index 000000000..707c375ed --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/macros.h @@ -0,0 +1,220 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: macros.h +// ----------------------------------------------------------------------------- +// +// This header file defines the set of language macros used within Abseil code. +// For the set of macros used to determine supported compilers and platforms, +// see absl/base/config.h instead. +// +// This code is compiled directly on many platforms, including client +// platforms like Windows, Mac, and embedded systems. Before making +// any changes here, make sure that you're not breaking any platforms. + +#ifndef OTABSL_BASE_MACROS_H_ +#define OTABSL_BASE_MACROS_H_ + +#include +#include + +#include "attributes.h" +#include "optimization.h" +#include "port.h" + +// OTABSL_ARRAYSIZE() +// +// Returns the number of elements in an array as a compile-time constant, which +// can be used in defining new arrays. If you use this macro on a pointer by +// mistake, you will get a compile-time error. +#define OTABSL_ARRAYSIZE(array) \ + (sizeof(::absl::OTABSL_OPTION_NAMESPACE_NAME::macros_internal::ArraySizeHelper(array))) + +namespace absl { +OTABSL_NAMESPACE_BEGIN +namespace macros_internal { +// Note: this internal template function declaration is used by OTABSL_ARRAYSIZE. +// The function doesn't need a definition, as we only use its type. +template +auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; +} // namespace macros_internal +OTABSL_NAMESPACE_END +} // namespace absl + +// kLinkerInitialized +// +// An enum used only as a constructor argument to indicate that a variable has +// static storage duration, and that the constructor should do nothing to its +// state. Use of this macro indicates to the reader that it is legal to +// declare a static instance of the class, provided the constructor is given +// the absl::OTABSL_OPTION_NAMESPACE_NAME::base_internal::kLinkerInitialized argument. +// +// Normally, it is unsafe to declare a static variable that has a constructor or +// a destructor because invocation order is undefined. However, if the type can +// be zero-initialized (which the loader does for static variables) into a valid +// state and the type's destructor does not affect storage, then a constructor +// for static initialization can be declared. +// +// Example: +// // Declaration +// explicit MyClass(absl::OTABSL_OPTION_NAMESPACE_NAME::base_internal:LinkerInitialized x) {} +// +// // Invocation +// static MyClass my_global(absl::OTABSL_OPTION_NAMESPACE_NAME::base_internal::kLinkerInitialized); +namespace absl { +OTABSL_NAMESPACE_BEGIN +namespace base_internal { +enum LinkerInitialized { + kLinkerInitialized = 0, +}; +} // namespace base_internal +OTABSL_NAMESPACE_END +} // namespace absl + +// OTABSL_FALLTHROUGH_INTENDED +// +// Annotates implicit fall-through between switch labels, allowing a case to +// indicate intentional fallthrough and turn off warnings about any lack of a +// `break` statement. The OTABSL_FALLTHROUGH_INTENDED macro should be followed by +// a semicolon and can be used in most places where `break` can, provided that +// no statements exist between it and the next switch label. +// +// Example: +// +// switch (x) { +// case 40: +// case 41: +// if (truth_is_out_there) { +// ++x; +// OTABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations +// // in comments +// } else { +// return x; +// } +// case 42: +// ... +// +// Notes: when compiled with clang in C++11 mode, the OTABSL_FALLTHROUGH_INTENDED +// macro is expanded to the [[clang::fallthrough]] attribute, which is analysed +// when performing switch labels fall-through diagnostic +// (`-Wimplicit-fallthrough`). See clang documentation on language extensions +// for details: +// https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough +// +// When used with unsupported compilers, the OTABSL_FALLTHROUGH_INTENDED macro +// has no effect on diagnostics. In any case this macro has no effect on runtime +// behavior and performance of code. +#ifdef OTABSL_FALLTHROUGH_INTENDED +#error "OTABSL_FALLTHROUGH_INTENDED should not be defined." +#endif + +// TODO(zhangxy): Use c++17 standard [[fallthrough]] macro, when supported. +#if defined(__clang__) && defined(__has_warning) +#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough") +#define OTABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]] +#endif +#elif defined(__GNUC__) && __GNUC__ >= 7 +#define OTABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] +#endif + +#ifndef OTABSL_FALLTHROUGH_INTENDED +#define OTABSL_FALLTHROUGH_INTENDED \ + do { \ + } while (0) +#endif + +// OTABSL_DEPRECATED() +// +// Marks a deprecated class, struct, enum, function, method and variable +// declarations. The macro argument is used as a custom diagnostic message (e.g. +// suggestion of a better alternative). +// +// Examples: +// +// class OTABSL_DEPRECATED("Use Bar instead") Foo {...}; +// +// OTABSL_DEPRECATED("Use Baz() instead") void Bar() {...} +// +// template +// OTABSL_DEPRECATED("Use DoThat() instead") +// void DoThis(); +// +// Every usage of a deprecated entity will trigger a warning when compiled with +// clang's `-Wdeprecated-declarations` option. This option is turned off by +// default, but the warnings will be reported by clang-tidy. +#if defined(__clang__) && __cplusplus >= 201103L +#define OTABSL_DEPRECATED(message) __attribute__((deprecated(message))) +#endif + +#ifndef OTABSL_DEPRECATED +#define OTABSL_DEPRECATED(message) +#endif + +// OTABSL_BAD_CALL_IF() +// +// Used on a function overload to trap bad calls: any call that matches the +// overload will cause a compile-time error. This macro uses a clang-specific +// "enable_if" attribute, as described at +// https://clang.llvm.org/docs/AttributeReference.html#enable-if +// +// Overloads which use this macro should be bracketed by +// `#ifdef OTABSL_BAD_CALL_IF`. +// +// Example: +// +// int isdigit(int c); +// #ifdef OTABSL_BAD_CALL_IF +// int isdigit(int c) +// OTABSL_BAD_CALL_IF(c <= -1 || c > 255, +// "'c' must have the value of an unsigned char or EOF"); +// #endif // OTABSL_BAD_CALL_IF +#if OTABSL_HAVE_ATTRIBUTE(enable_if) +#define OTABSL_BAD_CALL_IF(expr, msg) \ + __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg))) +#endif + +// OTABSL_ASSERT() +// +// In C++11, `assert` can't be used portably within constexpr functions. +// OTABSL_ASSERT functions as a runtime assert but works in C++11 constexpr +// functions. Example: +// +// constexpr double Divide(double a, double b) { +// return OTABSL_ASSERT(b != 0), a / b; +// } +// +// This macro is inspired by +// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/ +#if defined(NDEBUG) +#define OTABSL_ASSERT(expr) \ + (false ? static_cast(expr) : static_cast(0)) +#else +#define OTABSL_ASSERT(expr) \ + (OTABSL_PREDICT_TRUE((expr)) ? static_cast(0) \ + : [] { assert(false && #expr); }()) // NOLINT +#endif + +#ifdef OTABSL_HAVE_EXCEPTIONS +#define OTABSL_INTERNAL_TRY try +#define OTABSL_INTERNAL_CATCH_ANY catch (...) +#define OTABSL_INTERNAL_RETHROW do { throw; } while (false) +#else // OTABSL_HAVE_EXCEPTIONS +#define OTABSL_INTERNAL_TRY if (true) +#define OTABSL_INTERNAL_CATCH_ANY else if (false) +#define OTABSL_INTERNAL_RETHROW do {} while (false) +#endif // OTABSL_HAVE_EXCEPTIONS + +#endif // OTABSL_BASE_MACROS_H_ diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/optimization.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/optimization.h new file mode 100644 index 000000000..69713654a --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/optimization.h @@ -0,0 +1,181 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: optimization.h +// ----------------------------------------------------------------------------- +// +// This header file defines portable macros for performance optimization. + +#ifndef OTABSL_BASE_OPTIMIZATION_H_ +#define OTABSL_BASE_OPTIMIZATION_H_ + +#include "config.h" + +// OTABSL_BLOCK_TAIL_CALL_OPTIMIZATION +// +// Instructs the compiler to avoid optimizing tail-call recursion. Use of this +// macro is useful when you wish to preserve the existing function order within +// a stack trace for logging, debugging, or profiling purposes. +// +// Example: +// +// int f() { +// int result = g(); +// OTABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); +// return result; +// } +#if defined(__pnacl__) +#define OTABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#elif defined(__clang__) +// Clang will not tail call given inline volatile assembly. +#define OTABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(__GNUC__) +// GCC will not tail call given inline volatile assembly. +#define OTABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") +#elif defined(_MSC_VER) +#include +// The __nop() intrinsic blocks the optimisation. +#define OTABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop() +#else +#define OTABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } +#endif + +// OTABSL_CACHELINE_SIZE +// +// Explicitly defines the size of the L1 cache for purposes of alignment. +// Setting the cacheline size allows you to specify that certain objects be +// aligned on a cacheline boundary with `OTABSL_CACHELINE_ALIGNED` declarations. +// (See below.) +// +// NOTE: this macro should be replaced with the following C++17 features, when +// those are generally available: +// +// * `std::hardware_constructive_interference_size` +// * `std::hardware_destructive_interference_size` +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +#if defined(__GNUC__) +// Cache line alignment +#if defined(__i386__) || defined(__x86_64__) +#define OTABSL_CACHELINE_SIZE 64 +#elif defined(__powerpc64__) +#define OTABSL_CACHELINE_SIZE 128 +#elif defined(__aarch64__) +// We would need to read special register ctr_el0 to find out L1 dcache size. +// This value is a good estimate based on a real aarch64 machine. +#define OTABSL_CACHELINE_SIZE 64 +#elif defined(__arm__) +// Cache line sizes for ARM: These values are not strictly correct since +// cache line sizes depend on implementations, not architectures. There +// are even implementations with cache line sizes configurable at boot +// time. +#if defined(__ARM_ARCH_5T__) +#define OTABSL_CACHELINE_SIZE 32 +#elif defined(__ARM_ARCH_7A__) +#define OTABSL_CACHELINE_SIZE 64 +#endif +#endif + +#ifndef OTABSL_CACHELINE_SIZE +// A reasonable default guess. Note that overestimates tend to waste more +// space, while underestimates tend to waste more time. +#define OTABSL_CACHELINE_SIZE 64 +#endif + +// OTABSL_CACHELINE_ALIGNED +// +// Indicates that the declared object be cache aligned using +// `OTABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to +// load a set of related objects in the L1 cache for performance improvements. +// Cacheline aligning objects properly allows constructive memory sharing and +// prevents destructive (or "false") memory sharing. +// +// NOTE: this macro should be replaced with usage of `alignas()` using +// `std::hardware_constructive_interference_size` and/or +// `std::hardware_destructive_interference_size` when available within C++17. +// +// See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html +// for more information. +// +// On some compilers, `OTABSL_CACHELINE_ALIGNED` expands to an `__attribute__` +// or `__declspec` attribute. For compilers where this is not known to work, +// the macro expands to nothing. +// +// No further guarantees are made here. The result of applying the macro +// to variables and types is always implementation-defined. +// +// WARNING: It is easy to use this attribute incorrectly, even to the point +// of causing bugs that are difficult to diagnose, crash, etc. It does not +// of itself guarantee that objects are aligned to a cache line. +// +// NOTE: Some compilers are picky about the locations of annotations such as +// this attribute, so prefer to put it at the beginning of your declaration. +// For example, +// +// OTABSL_CACHELINE_ALIGNED static Foo* foo = ... +// +// class OTABSL_CACHELINE_ALIGNED Bar { ... +// +// Recommendations: +// +// 1) Consult compiler documentation; this comment is not kept in sync as +// toolchains evolve. +// 2) Verify your use has the intended effect. This often requires inspecting +// the generated machine code. +// 3) Prefer applying this attribute to individual variables. Avoid +// applying it to types. This tends to localize the effect. +#define OTABSL_CACHELINE_ALIGNED __attribute__((aligned(OTABSL_CACHELINE_SIZE))) +#elif defined(_MSC_VER) +#define OTABSL_CACHELINE_SIZE 64 +#define OTABSL_CACHELINE_ALIGNED __declspec(align(OTABSL_CACHELINE_SIZE)) +#else +#define OTABSL_CACHELINE_SIZE 64 +#define OTABSL_CACHELINE_ALIGNED +#endif + +// OTABSL_PREDICT_TRUE, OTABSL_PREDICT_FALSE +// +// Enables the compiler to prioritize compilation using static analysis for +// likely paths within a boolean branch. +// +// Example: +// +// if (OTABSL_PREDICT_TRUE(expression)) { +// return result; // Faster if more likely +// } else { +// return 0; +// } +// +// Compilers can use the information that a certain branch is not likely to be +// taken (for instance, a CHECK failure) to optimize for the common case in +// the absence of better information (ie. compiling gcc with `-fprofile-arcs`). +// +// Recommendation: Modern CPUs dynamically predict branch execution paths, +// typically with accuracy greater than 97%. As a result, annotating every +// branch in a codebase is likely counterproductive; however, annotating +// specific branches that are both hot and consistently mispredicted is likely +// to yield performance improvements. +#if OTABSL_HAVE_BUILTIN(__builtin_expect) || \ + (defined(__GNUC__) && !defined(__clang__)) +#define OTABSL_PREDICT_FALSE(x) (__builtin_expect(x, 0)) +#define OTABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true)) +#else +#define OTABSL_PREDICT_FALSE(x) (x) +#define OTABSL_PREDICT_TRUE(x) (x) +#endif + +#endif // OTABSL_BASE_OPTIMIZATION_H_ diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/options.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/options.h new file mode 100644 index 000000000..c4b00a3d3 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/options.h @@ -0,0 +1,216 @@ +#ifndef OTABSL_BASE_OPTIONS_H_ +#define OTABSL_BASE_OPTIONS_H_ + +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: options.h +// ----------------------------------------------------------------------------- +// +// This file contains Abseil configuration options for setting specific +// implementations instead of letting Abseil determine which implementation to +// use at compile-time. Setting these options may be useful for package or build +// managers who wish to guarantee ABI stability within binary builds (which are +// otherwise difficult to enforce). +// +// *** IMPORTANT NOTICE FOR PACKAGE MANAGERS: It is important that +// maintainers of package managers who wish to package Abseil read and +// understand this file! *** +// +// Abseil contains a number of possible configuration endpoints, based on +// parameters such as the detected platform, language version, or command-line +// flags used to invoke the underlying binary. As is the case with all +// libraries, binaries which contain Abseil code must ensure that separate +// packages use the same compiled copy of Abseil to avoid a diamond dependency +// problem, which can occur if two packages built with different Abseil +// configuration settings are linked together. Diamond dependency problems in +// C++ may manifest as violations to the One Definition Rule (ODR) (resulting in +// linker errors), or undefined behavior (resulting in crashes). +// +// Diamond dependency problems can be avoided if all packages utilize the same +// exact version of Abseil. Building from source code with the same compilation +// parameters is the easiest way to avoid such dependency problems. However, for +// package managers who cannot control such compilation parameters, we are +// providing the file to allow you to inject ABI (Application Binary Interface) +// stability across builds. Settings options in this file will neither change +// API nor ABI, providing a stable copy of Abseil between packages. +// +// Care must be taken to keep options within these configurations isolated +// from any other dynamic settings, such as command-line flags which could alter +// these options. This file is provided specifically to help build and package +// managers provide a stable copy of Abseil within their libraries and binaries; +// other developers should not have need to alter the contents of this file. +// +// ----------------------------------------------------------------------------- +// Usage +// ----------------------------------------------------------------------------- +// +// For any particular package release, set the appropriate definitions within +// this file to whatever value makes the most sense for your package(s). Note +// that, by default, most of these options, at the moment, affect the +// implementation of types; future options may affect other implementation +// details. +// +// NOTE: the defaults within this file all assume that Abseil can select the +// proper Abseil implementation at compile-time, which will not be sufficient +// to guarantee ABI stability to package managers. + +// Include a standard library header to allow configuration based on the +// standard library in use. +// Using C++20 feature-test macros when possible, otherwise fall back to +// ciso646/iso646.h.There are warnings when including ciso646 in C++17 mode +#ifdef __has_include +# if __has_include() +# include +# endif +#elif defined(_MSC_VER) && \ + ((defined(__cplusplus) && __cplusplus >= 202002L) || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)) +# if _MSC_VER >= 1922 +# include +# endif +#else +# if defined(__GNUC__) && !defined(__clang__) && !defined(__apple_build_version__) +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wcpp" +# elif defined(__clang__) || defined(__apple_build_version__) +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wcpp" +# endif +# include +# if defined(__GNUC__) && !defined(__clang__) && !defined(__apple_build_version__) +# pragma GCC diagnostic pop +# elif defined(__clang__) || defined(__apple_build_version__) +# pragma clang diagnostic pop +# endif +#endif + +// ----------------------------------------------------------------------------- +// Type Compatibility Options +// ----------------------------------------------------------------------------- +// +// OTABSL_OPTION_USE_STD_ANY +// +// This option controls whether absl::OTABSL_OPTION_NAMESPACE_NAME::any is implemented as an alias to +// std::any, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::any. This requires that all code +// using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::any is available. This option is +// useful when you are building your entire program, including all of its +// dependencies, from source. It should not be used otherwise -- for example, +// if you are distributing Abseil in a binary package manager -- since in +// mode 2, absl::OTABSL_OPTION_NAMESPACE_NAME::any will name a different type, with a different mangled name +// and binary layout, depending on the compiler flags passed by the end user. +// For more info, see https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::OTABSL_OPTION_NAMESPACE_NAME::any is a typedef of std::any, use the feature macro OTABSL_USES_STD_ANY. + +#define OTABSL_OPTION_USE_STD_ANY 0 + + +// OTABSL_OPTION_USE_STD_OPTIONAL +// +// This option controls whether absl::OTABSL_OPTION_NAMESPACE_NAME::optional is implemented as an alias to +// std::optional, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::optional. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::optional is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::OTABSL_OPTION_NAMESPACE_NAME::optional will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. + +// User code should not inspect this macro. To check in the preprocessor if +// absl::OTABSL_OPTION_NAMESPACE_NAME::optional is a typedef of std::optional, use the feature macro +// OTABSL_USES_STD_OPTIONAL. + +#define OTABSL_OPTION_USE_STD_OPTIONAL 0 + + +// OTABSL_OPTION_USE_STD_STRING_VIEW +// +// This option controls whether absl::OTABSL_OPTION_NAMESPACE_NAME::string_view is implemented as an alias to +// std::string_view, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::string_view. This requires that +// all code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::string_view is available. This +// option is useful when you are building your program from source. It should +// not be used otherwise -- for example, if you are distributing Abseil in a +// binary package manager -- since in mode 2, absl::OTABSL_OPTION_NAMESPACE_NAME::string_view will name a +// different type, with a different mangled name and binary layout, depending on +// the compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::OTABSL_OPTION_NAMESPACE_NAME::string_view is a typedef of std::string_view, use the feature macro +// OTABSL_USES_STD_STRING_VIEW. + +#define OTABSL_OPTION_USE_STD_STRING_VIEW 0 + +// OTABSL_OPTION_USE_STD_VARIANT +// +// This option controls whether absl::OTABSL_OPTION_NAMESPACE_NAME::variant is implemented as an alias to +// std::variant, or as an independent implementation. +// +// A value of 0 means to use Abseil's implementation. This requires only C++11 +// support, and is expected to work on every toolchain we support. +// +// A value of 1 means to use an alias to std::variant. This requires that all +// code using Abseil is built in C++17 mode or later. +// +// A value of 2 means to detect the C++ version being used to compile Abseil, +// and use an alias only if a working std::variant is available. This option +// is useful when you are building your program from source. It should not be +// used otherwise -- for example, if you are distributing Abseil in a binary +// package manager -- since in mode 2, absl::OTABSL_OPTION_NAMESPACE_NAME::variant will name a different +// type, with a different mangled name and binary layout, depending on the +// compiler flags passed by the end user. For more info, see +// https://abseil.io/about/design/dropin-types. +// +// User code should not inspect this macro. To check in the preprocessor if +// absl::OTABSL_OPTION_NAMESPACE_NAME::variant is a typedef of std::variant, use the feature macro +// OTABSL_USES_STD_VARIANT. + +#define OTABSL_OPTION_USE_STD_VARIANT 0 + + +// OTABSL_OPTION_NAMESPACE_NAME +// +// All codes in otabsl are under OTABSL_OPTION_NAMESPACE_NAME, we do not use inline namespace to avoid +// conlict with external Abseil. + +#define OTABSL_OPTION_NAMESPACE_NAME otel_v1 + +#endif // OTABSL_BASE_OPTIONS_H_ diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/policy_checks.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/policy_checks.h new file mode 100644 index 000000000..f86c264fc --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/policy_checks.h @@ -0,0 +1,115 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: policy_checks.h +// ----------------------------------------------------------------------------- +// +// This header enforces a minimum set of policies at build time, such as the +// supported compiler and library versions. Unsupported configurations are +// reported with `#error`. This enforcement is best effort, so successfully +// compiling this header does not guarantee a supported configuration. + +#ifndef OTABSL_BASE_POLICY_CHECKS_H_ +#define OTABSL_BASE_POLICY_CHECKS_H_ + +// Included for the __GLIBC_PREREQ macro used below. +#include + +// Included for the _STLPORT_VERSION macro used below. +#if defined(__cplusplus) +#include +#endif + +// ----------------------------------------------------------------------------- +// Operating System Check +// ----------------------------------------------------------------------------- + +#if defined(__CYGWIN__) +#error "Cygwin is not supported." +#endif + +// ----------------------------------------------------------------------------- +// Compiler Check +// ----------------------------------------------------------------------------- + +#if 0 /* FIXME: MG */ +// We support MSVC++ 14.0 update 2 and later. +// This minimum will go up. +#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023918 && !defined(__clang__) +#error "This package requires Visual Studio 2015 Update 2 or higher." +#endif +#endif + +// We support gcc 4.7 and later. +// This minimum will go up. +#if defined(__GNUC__) && !defined(__clang__) +#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7) +#error "This package requires gcc 4.7 or higher." +#endif +#endif + +// We support Apple Xcode clang 4.2.1 (version 421.11.65) and later. +// This corresponds to Apple Xcode version 4.5. +// This minimum will go up. +#if defined(__apple_build_version__) && __apple_build_version__ < 4211165 +#error "This package requires __apple_build_version__ of 4211165 or higher." +#endif + +// ----------------------------------------------------------------------------- +// C++ Version Check +// ----------------------------------------------------------------------------- + +// Enforce C++11 as the minimum. +#if defined(_MSVC_LANG) +#if _MSVC_LANG < 201103L +#error "C++ versions less than C++11 are not supported." +#endif // _MSVC_LANG < 201103L +#elif defined(__cplusplus) +#if __cplusplus < 201103L +#error "C++ versions less than C++11 are not supported." +#endif // __cplusplus < 201103L +#endif + +// ----------------------------------------------------------------------------- +// Standard Library Check +// ----------------------------------------------------------------------------- + +#if defined(_STLPORT_VERSION) +#error "STLPort is not supported." +#endif + +// ----------------------------------------------------------------------------- +// `char` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes CHAR_BIT == 8. If you would like to use Abseil on a +// platform where this is not the case, please provide us with the details about +// your platform so we can consider relaxing this requirement. +#if CHAR_BIT != 8 +#error "Abseil assumes CHAR_BIT == 8." +#endif + +// ----------------------------------------------------------------------------- +// `int` Size Check +// ----------------------------------------------------------------------------- + +// Abseil currently assumes that an int is 4 bytes. If you would like to use +// Abseil on a platform where this is not the case, please provide us with the +// details about your platform so we can consider relaxing this requirement. +#if INT_MAX < 2147483647 +#error "Abseil assumes that int is at least 4 bytes. " +#endif + +#endif // OTABSL_BASE_POLICY_CHECKS_H_ diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/port.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/port.h new file mode 100644 index 000000000..aaba551b5 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/base/port.h @@ -0,0 +1,26 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This files is a forwarding header for other headers containing various +// portability macros and functions. +// This file is used for both C and C++! + +#ifndef OTABSL_BASE_PORT_H_ +#define OTABSL_BASE_PORT_H_ + +#include "attributes.h" +#include "config.h" +#include "optimization.h" + +#endif // OTABSL_BASE_PORT_H_ diff --git a/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/meta/type_traits.h b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/meta/type_traits.h new file mode 100644 index 000000000..dbd034887 --- /dev/null +++ b/ext/opentelemetry-cpp-1.21.0/api/include/opentelemetry/nostd/internal/absl/meta/type_traits.h @@ -0,0 +1,779 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// type_traits.h +// ----------------------------------------------------------------------------- +// +// This file contains C++11-compatible versions of standard API +// functions for determining the characteristics of types. Such traits can +// support type inference, classification, and transformation, as well as +// make it easier to write templates based on generic type behavior. +// +// See https://en.cppreference.com/w/cpp/header/type_traits +// +// WARNING: use of many of the constructs in this header will count as "complex +// template metaprogramming", so before proceeding, please carefully consider +// https://google.github.io/styleguide/cppguide.html#Template_metaprogramming +// +// WARNING: using template metaprogramming to detect or depend on API +// features is brittle and not guaranteed. Neither the standard library nor +// Abseil provides any guarantee that APIs are stable in the face of template +// metaprogramming. Use with caution. +#ifndef OTABSL_META_TYPE_TRAITS_H_ +#define OTABSL_META_TYPE_TRAITS_H_ + +#include +#include +#include + +#include "../base/config.h" + +// MSVC constructibility traits do not detect destructor properties and so our +// implementations should not use them as a source-of-truth. +#if defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__) +#define OTABSL_META_INTERNAL_STD_CONSTRUCTION_TRAITS_DONT_CHECK_DESTRUCTION 1 +#endif + +namespace absl { +OTABSL_NAMESPACE_BEGIN + +// Defined and documented later on in this file. +template +struct is_trivially_destructible; + +// Defined and documented later on in this file. +template +struct is_trivially_move_assignable; + +namespace type_traits_internal { + +// Silence MSVC warnings about the destructor being defined as deleted. +#if defined(_MSC_VER) && !defined(__GNUC__) +#pragma warning(push) +#pragma warning(disable : 4624) +#endif // defined(_MSC_VER) && !defined(__GNUC__) + +template +union SingleMemberUnion { + T t; +}; + +// Restore the state of the destructor warning that was silenced above. +#if defined(_MSC_VER) && !defined(__GNUC__) +#pragma warning(pop) +#endif // defined(_MSC_VER) && !defined(__GNUC__) + +template +struct IsTriviallyMoveConstructibleObject + : std::integral_constant< + bool, std::is_move_constructible< + type_traits_internal::SingleMemberUnion>::value && + absl::OTABSL_OPTION_NAMESPACE_NAME::is_trivially_destructible::value> {}; + +template +struct IsTriviallyCopyConstructibleObject + : std::integral_constant< + bool, std::is_copy_constructible< + type_traits_internal::SingleMemberUnion>::value && + absl::OTABSL_OPTION_NAMESPACE_NAME::is_trivially_destructible::value> {}; + +template +struct IsTriviallyMoveAssignableReference : std::false_type {}; + +template +struct IsTriviallyMoveAssignableReference + : absl::OTABSL_OPTION_NAMESPACE_NAME::is_trivially_move_assignable::type {}; + +template +struct IsTriviallyMoveAssignableReference + : absl::OTABSL_OPTION_NAMESPACE_NAME::is_trivially_move_assignable::type {}; + +template +struct VoidTImpl { + using type = void; +}; + +//////////////////////////////// +// Library Fundamentals V2 TS // +//////////////////////////////// + +// NOTE: The `is_detected` family of templates here differ from the library +// fundamentals specification in that for library fundamentals, `Op` is +// evaluated as soon as the type `is_detected` undergoes +// substitution, regardless of whether or not the `::value` is accessed. That +// is inconsistent with all other standard traits and prevents lazy evaluation +// in larger contexts (such as if the `is_detected` check is a trailing argument +// of a `conjunction`. This implementation opts to instead be lazy in the same +// way that the standard traits are (this "defect" of the detection idiom +// specifications has been reported). + +template class Op, class... Args> +struct is_detected_impl { + using type = std::false_type; +}; + +template