Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions build.zig
Original file line number Diff line number Diff line change
Expand Up @@ -715,6 +715,7 @@ fn addInternalImports(b: *Build, mod: *Module, opts: *BunBuildOptions) void {
.{ .file = "bun-error/index.js", .enable = opts.shouldEmbedCode() },
.{ .file = "bun-error/bun-error.css", .enable = opts.shouldEmbedCode() },
.{ .file = "fallback-decoder.js", .enable = opts.shouldEmbedCode() },
.{ .file = "feedback.js", .enable = opts.shouldEmbedCode() },
.{ .file = "node-fallbacks/react-refresh.js", .enable = opts.shouldEmbedCode() },
.{ .file = "node-fallbacks/assert.js", .enable = opts.shouldEmbedCode() },
.{ .file = "node-fallbacks/buffer.js", .enable = opts.shouldEmbedCode() },
Expand Down
24 changes: 24 additions & 0 deletions src/cli/run_command.zig
Original file line number Diff line number Diff line change
Expand Up @@ -1352,6 +1352,30 @@ pub const RunCommand = struct {
}
const passthrough = ctx.passthrough; // unclear why passthrough is an escaped string, it should probably be []const []const u8 and allow its users to escape it.

// Check if this is the feedback command
if (bun.strings.eqlComptime(target_name, "feedback")) {
// Get the embedded feedback.js code
const code = if (bun.Environment.codegen_embed)
@embedFile("codegen/feedback.js")
else
bun.runtimeEmbedFile(.codegen, "feedback.js");

// Set up eval to run the feedback code
ctx.runtime_options.eval.script = code;

// Ensure passthrough arguments are available to the eval script
// The passthrough already contains the arguments after "feedback"
// ctx.passthrough is already set correctly from the CLI parsing

// Run as eval
const trigger = bun.pathLiteral("/[eval]");
var entry_point_buf: [bun.MAX_PATH_BYTES + trigger.len]u8 = undefined;
const cwd = try std.posix.getcwd(&entry_point_buf);
@memcpy(entry_point_buf[cwd.len..][0..trigger.len], trigger);
try Run.boot(ctx, entry_point_buf[0 .. cwd.len + trigger.len], null);
return true;
}

var try_fast_run = false;
var skip_script_check = false;
if (target_name.len > 0 and target_name[0] == '.') {
Expand Down
173 changes: 173 additions & 0 deletions src/js/eval/feedback.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,173 @@
// Output banner immediately before any requires
console.error("\nbun feedback - Send feedback to the Bun team\n");

const { readFileSync, existsSync, writeFileSync, fstatSync } = require("fs");
const { join } = require("path");

const VERSION = process.versions.bun || "unknown";
const OS = process.platform;
const ARCH = process.arch;

// Check if stdin is readable (not /dev/null or closed)
function isStdinReadable() {
try {
// Get file descriptor 0 (stdin)
const stats = fstatSync(0);
// Check if it's a regular file and has size 0 (like /dev/null)
Comment on lines +14 to +16
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

Fix: Incorrect file descriptor stats check on Windows

The logic for checking if stdin is /dev/null won't work correctly on Windows. When stats.isFile() returns true with size === 0, this could match legitimate empty files, not just /dev/null. Additionally, on Windows, /dev/null doesn't exist - it's NUL instead.

Apply this diff to handle both Unix and Windows correctly:

 function isStdinReadable() {
   try {
     // Get file descriptor 0 (stdin)
     const stats = fstatSync(0);
-    // Check if it's a regular file and has size 0 (like /dev/null)
-    // or if it's not a character device/pipe/socket
-    if (stats.isFile() && stats.size === 0) {
+    // On Unix: /dev/null is a character device with specific properties
+    // On Windows: NUL doesn't have a file descriptor, so this will throw
+    if (process.platform === "win32") {
+      // Windows doesn't have /dev/null, it has NUL
+      // This check will be handled by the catch block
+      return true;
+    }
+    // Check if it's a regular file with size 0 or if it's the null device
+    if (stats.isCharacterDevice() && stats.rdev === 259) { // Unix /dev/null
       return false;
     }
+    if (stats.isFile() && stats.size === 0 && stats.dev === 0) {
+      return false;
+    }
     return true;
   } catch {
     return false;
   }
 }

Let me verify if this is the correct approach for detecting /dev/null across platforms:


🏁 Script executed:

#!/bin/bash
# Check how Bun handles stdin detection in other parts of the codebase
rg -n "fstatSync.*0\)|fstat.*stdin" --type=ts --type=js

Length of output: 421


🏁 Script executed:

#!/bin/bash
# Look for existing /dev/null or NUL detection patterns
rg -n "(/dev/null|\\bnul\\b|isCharacterDevice)" --type=ts --type=js -i

Length of output: 6831


Fix: robust cross‑platform stdin (/dev/null) detection

Do not use stats.isFile() && stats.size === 0 or an unconditional process.platform === "win32" early-return — that misidentifies empty files as /dev/null and the proposed Windows shortcut is incorrect. Replace with platform‑agnostic checks:

  • If stats.isFIFO() or stats.isSocket() → stdin is piped/readable (return true).
  • If stats.isCharacterDevice() → return !!process.stdin?.isTTY (tty = interactive/readable; non‑tty char device is likely /dev/null).
  • If stats.isFile() → handle empty‑file semantics explicitly (empty file != automatically treat as /dev/null; use stats.size > 0 only if you require non‑empty input).

Location: src/js/eval/feedback.ts (around the fstatSync(0) check). Note: the repo already maps devNull for Windows in src/js/os.ts — don’t short‑circuit win32 to true.

🤖 Prompt for AI Agents
In src/js/eval/feedback.ts around lines 14 to 16, the current stdin detection
using stats.isFile() && stats.size === 0 and a win32 early-return is incorrect
and misclassifies empty files as /dev/null; replace with platform-agnostic
checks: treat stats.isFIFO() or stats.isSocket() as piped/readable (return
true), treat stats.isCharacterDevice() by returning !!process.stdin?.isTTY (TTY
= interactive/readable; non-tty char device likely /dev/null), and if
stats.isFile() handle empty-file explicitly (do not automatically treat size ===
0 as /dev/null; only consider stats.size > 0 when you require non-empty input).
Do not special-case process.platform === "win32" here because Windows devNull is
already mapped in src/js/os.ts.

// or if it's not a character device/pipe/socket
if (stats.isFile() && stats.size === 0) {
return false;
}
return true;
} catch {
return false;
}
}

async function getEmail() {
const bunInstall = process.env.BUN_INSTALL;

// Check for saved email
if (bunInstall) {
const feedbackPath = join(bunInstall, "feedback");
if (existsSync(feedbackPath)) {
const savedEmail = readFileSync(feedbackPath, "utf8").trim();
if (savedEmail) {
return savedEmail;
}
}
}

// Try to get email from git config
let defaultEmail = "";
try {
const result = Bun.spawnSync(["git", "config", "user.email"], {
stdout: "pipe",
stderr: "ignore",
});
if (result.exitCode === 0 && result.stdout) {
defaultEmail = result.stdout.toString().trim();
}
} catch {}

// If stdin is not readable (e.g., /dev/null), return default or empty
if (!isStdinReadable()) {
return defaultEmail || "";
}

// Prompt for email
process.stderr.write(`? Email address${defaultEmail ? ` (${defaultEmail})` : ""}: `);

const decoder = new TextDecoder();
for await (const chunk of Bun.stdin.stream()) {
const line = decoder.decode(chunk).trim();
const email = line || defaultEmail;

// Save email if BUN_INSTALL is set
if (bunInstall && email) {
const feedbackPath = join(bunInstall, "feedback");
try {
writeFileSync(feedbackPath, email);
} catch {}
}

return email;
}

return defaultEmail;
}

async function getBody() {
// Get args from process.argv
// process.argv[0] = bun executable, process.argv[1+] = actual args
const args = process.argv.slice(1);

Comment on lines +83 to +84
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

Incorrect argv slicing for embedded eval scripts

When running as an embedded eval script, process.argv[0] will be the bun executable, and process.argv[1] will be "feedback". The actual user arguments start at index 2, not 1.

Apply this diff to correctly handle arguments:

   // Get args from process.argv
-  // process.argv[0] = bun executable, process.argv[1+] = actual args
-  const args = process.argv.slice(1);
+  // When running as eval: process.argv[0] = bun executable, process.argv[1] = "feedback", process.argv[2+] = user args
+  const args = process.argv.slice(2);

Let me verify how process.argv works for eval scripts in Bun:


🏁 Script executed:

#!/bin/bash
# Search for how other eval scripts handle process.argv
rg -n "process\.argv.*slice" src/js/eval/ --type=ts

Length of output: 64


🏁 Script executed:

#!/bin/bash
# Look for documentation or tests about eval script argv handling
rg -n "eval.*process\.argv|argv.*eval" test/ --type=ts --type=js

Length of output: 683


Incorrect argv slicing for embedded eval scripts — use slice(2)

When running as an embedded eval, process.argv[0]=bun and process.argv[1]="feedback", so user arguments start at process.argv[2]; slice(1) wrongly includes "feedback". Change to slice(2).

File: src/js/eval/feedback.ts (around lines 83–84)

  // Get args from process.argv
-  // process.argv[0] = bun executable, process.argv[1+] = actual args
-  const args = process.argv.slice(1);
+  // When running as eval: process.argv[0] = bun executable, process.argv[1] = "feedback", process.argv[2+] = user args
+  const args = process.argv.slice(2);
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
const args = process.argv.slice(1);
// Get args from process.argv
// When running as eval: process.argv[0] = bun executable, process.argv[1] = "feedback", process.argv[2+] = user args
const args = process.argv.slice(2);
🤖 Prompt for AI Agents
In src/js/eval/feedback.ts around lines 83 to 84, process.argv is being sliced
with slice(1) which incorrectly includes the script name ("feedback") for
embedded eval runs; change the slicing to process.argv.slice(2) so user-provided
arguments start at argv[2]; update any downstream uses that assume the first
element is a user arg if necessary.

// If we have positional arguments, use them
if (args.length > 0) {
return args.join(" ");
}

// Check if stdin is readable
if (!isStdinReadable()) {
return "";
}

// If stdin is not a TTY, read from pipe
if (!process.stdin.isTTY) {
const chunks = [];
for await (const chunk of Bun.stdin.stream()) {
chunks.push(chunk);
}
const buffer = Buffer.concat(chunks);
return buffer.toString("utf8").trim();
}

// Otherwise prompt for message
process.stderr.write("? Feedback message (Press Enter to submit): ");

const decoder = new TextDecoder();
for await (const chunk of Bun.stdin.stream()) {
return decoder.decode(chunk).trim();
}

return "";
}

async function sendFeedback(email, body) {
const url = process.env.BUN_FEEDBACK_URL || "https://bun.com/api/v1/feedback";

const payload = JSON.stringify({
os: OS,
cpu: ARCH,
version: VERSION,
body,
email,
});

// Show progress
process.stderr.write("Sending feedback...");

try {
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: payload,
});

process.stderr.write("\r\x1b[K"); // Clear the line
Comment on lines +128 to +139
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

⚠️ Potential issue

Add timeout and better error handling for network requests

The fetch request lacks a timeout, which could cause the command to hang indefinitely on network issues. Also, the error clearing could leave artifacts on some terminals.

Apply this diff to add timeout and improve error handling:

   // Show progress
   process.stderr.write("Sending feedback...");
 
   try {
+    const controller = new AbortController();
+    const timeoutId = setTimeout(() => controller.abort(), 30000); // 30 second timeout
+    
     const response = await fetch(url, {
       method: "POST",
       headers: {
         "Content-Type": "application/json",
       },
       body: payload,
+      signal: controller.signal,
     });
+    
+    clearTimeout(timeoutId);
 
     process.stderr.write("\r\x1b[K"); // Clear the line

Also, consider handling specific error cases:

   } catch (error) {
     process.stderr.write("\r\x1b[K"); // Clear the line
-    console.error(`\nerror: Failed to send feedback: ${error?.message || error}\n`);
+    const errorMessage = error?.name === 'AbortError' 
+      ? 'Request timed out'
+      : error?.message || String(error);
+    console.error(`\nerror: Failed to send feedback: ${errorMessage}\n`);
     process.exit(1);
   }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
process.stderr.write("Sending feedback...");
try {
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: payload,
});
process.stderr.write("\r\x1b[K"); // Clear the line
process.stderr.write("Sending feedback...");
try {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 30000); // 30 second timeout
const response = await fetch(url, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: payload,
signal: controller.signal,
});
clearTimeout(timeoutId);
process.stderr.write("\r\x1b[K"); // Clear the line
} catch (error) {
process.stderr.write("\r\x1b[K"); // Clear the line
const errorMessage =
error?.name === "AbortError" ? "Request timed out" : error?.message || String(error);
console.error(`\nerror: Failed to send feedback: ${errorMessage}\n`);
process.exit(1);
}
🤖 Prompt for AI Agents
In src/js/eval/feedback.ts around lines 128 to 139, the fetch call needs a
timeout and more robust error handling: wrap the fetch with an AbortController,
start a timer (e.g., 10s) that calls controller.abort(), pass controller.signal
to fetch, and clear the timer after fetch resolves; move the progress
line-clearing into a finally block so it always runs; in the catch block handle
AbortError (request timed out) separately from other network errors and treat
non-OK HTTP responses explicitly (check response.ok and throw or log a
descriptive error), and ensure any error logging cleans the progress line (use
process.stderr.write("\r\x1b[K") before writing the error) so terminals don’t
show artifacts.


if (response.ok) {
console.error("\n✓ Thank you for your feedback!\n");
} else {
console.error(`\nerror: Failed to send feedback (status code: ${response.status})\n`);
process.exit(1);
}
} catch (error) {
process.stderr.write("\r\x1b[K"); // Clear the line
console.error(`\nerror: Failed to send feedback: ${error?.message || error}\n`);
process.exit(1);
}
}

(async () => {
try {
// Get email
const email = await getEmail();

// Get feedback body
const body = await getBody();

if (!body) {
console.error("error: No feedback message provided");
process.exit(1);
}

// Send feedback
await sendFeedback(email, body);
} catch (err) {
console.error("error: Unexpected error in feedback command:", err);
process.exit(1);
}
})();
Loading