Android 12 init(4) 子进程回收与服务重启分析

龚奕
2023-12-01

文章托管在gitee上 Android Notes , 同步csdn
本文基于Android12 分析

在init运行过程中,不可避免的会出现子进程或服务退出,需要做一些针对性处理:

  • 对于已终止的子进程需要将其回收掉,防止产生僵尸进程
  • 对于非oneshot服务,需要重新将其拉起,防止异常退出。

处理子进程退出

在init中通过监听信号 SIGCHLD,来获取子进程终止事件,然后做一些针对性动作。

InstallSignalFdHandler

初始化信号处理器,注册子进程终止的监听

/// @system/core/init/init.cpp
static void InstallSignalFdHandler(Epoll* epoll) {
    // Applying SA_NOCLDSTOP to a defaulted SIGCHLD handler prevents the signalfd from receiving
    // SIGCHLD when a child process stops or continues (b/77867680#comment9).
    const struct sigaction act { .sa_handler = SIG_DFL, .sa_flags = SA_NOCLDSTOP };
    sigaction(SIGCHLD, &act, nullptr);// 添加flag ,不接收进程 stop/continue 事件

    sigset_t mask;
    sigemptyset(&mask);
    sigaddset(&mask, SIGCHLD);

    if (!IsRebootCapable()) { // 没有CAP_SYS_BOOT capability,不具备重启能力
        // If init does not have the CAP_SYS_BOOT capability, it is running in a container.
        // In that case, receiving SIGTERM will cause the system to shut down.
        sigaddset(&mask, SIGTERM); // 添加SIGTERM到信号集
    }

    if (sigprocmask(SIG_BLOCK, &mask, nullptr) == -1) { // block这些信号,与signalfd匹配使用
        PLOG(FATAL) << "failed to block signals";
    }

    // Register a handler to unblock signals in the child processes.
    // UnblockSignals在fork返回之前,在子进程上下文中被执行,使得子进程不block这些信号
    const int result = pthread_atfork(nullptr, nullptr, &UnblockSignals);
    if (result != 0) {
        LOG(FATAL) << "Failed to register a fork handler: " << strerror(result);
    }

    signal_fd = signalfd(-1, &mask, SFD_CLOEXEC); // 创建fd,用于读取被block的信号
    if (signal_fd == -1) {
        PLOG(FATAL) << "failed to create signalfd";
    }
    // 通过 epoll 监听新的信号到来
    if (auto result = epoll->RegisterHandler(signal_fd, HandleSignalFd); !result.ok()) {
        LOG(FATAL) << result.error();
    }
}

UnblockSignals

在子进程执行该函数,即子进程默认是不阻塞这些信号的。

/// @system/core/init/init.cpp
static void UnblockSignals() {
    const struct sigaction act { .sa_handler = SIG_DFL };
    sigaction(SIGCHLD, &act, nullptr);

    sigset_t mask;
    sigemptyset(&mask);
    sigaddset(&mask, SIGCHLD);
    sigaddset(&mask, SIGTERM);

    if (sigprocmask(SIG_UNBLOCK, &mask, nullptr) == -1) {
        PLOG(FATAL) << "failed to unblock signals for PID " << getpid();
    }
}

当epoll监听到signal_fd有事件到来,即产生了相关信号,则会回调HandleSignalFd来处理

HandleSignalFd

/// system/core/init/init.cpp
static void HandleSignalFd() {
    signalfd_siginfo siginfo;
    // 从fd读取信号信息
    ssize_t bytes_read = TEMP_FAILURE_RETRY(read(signal_fd, &siginfo, sizeof(siginfo)));
    if (bytes_read != sizeof(siginfo)) {
        PLOG(ERROR) << "Failed to read siginfo from signal_fd";
        return;
    }

    switch (siginfo.ssi_signo) {
        case SIGCHLD: // 子进程终止事件
            ReapAnyOutstandingChildren();
            break;
        case SIGTERM: // 信号15,kill命令默认发送的信号
            HandleSigtermSignal(siginfo);
            break;
        default:
            PLOG(ERROR) << "signal_fd: received unexpected signal " << siginfo.ssi_signo;
            break;
    }
}

处理 SIGCHLD 会调用ReapAnyOutstandingChildren,它实现了所有终止子进程的回收

ReapAnyOutstandingChildren

/// @system/core/init/sigchld_handler.cpp
void ReapAnyOutstandingChildren() {
    while (ReapOneProcess() != 0) { // 循环处理所有已终止的进程(调用exit或被信号杀死)
    }
}

ReapOneProcess

这个函数的作用如下:

  • 调用waitid回收已经终止的进程
  • 打印进程死亡原因,被信号kill或者调用exit退出
  • 针对 service 调用其 Reap 函数,清理状态、处理重启及 onrestart 命令
/// @system/core/init/sigchld_handler.cpp
static pid_t ReapOneProcess() {
    siginfo_t siginfo = {};
    // This returns a zombie pid or informs us that there are no zombies left to be reaped.
    // It does NOT reap the pid; that is done below.
    if (TEMP_FAILURE_RETRY(waitid(P_ALL, 0, &siginfo, WEXITED | WNOHANG | WNOWAIT)) != 0) {
        PLOG(ERROR) << "waitid failed";
        return 0;
    }

    auto pid = siginfo.si_pid;
    if (pid == 0) return 0;

    // At this point we know we have a zombie pid, so we use this scopeguard to reap the pid
    // whenever the function returns from this point forward.
    // We do NOT want to reap the zombie earlier as in Service::Reap(), we kill(-pid, ...) and we
    // want the pid to remain valid throughout that (and potentially future) usages.
    auto reaper = make_scope_guard([pid] { TEMP_FAILURE_RETRY(waitpid(pid, nullptr, WNOHANG)); });

    std::string name;
    std::string wait_string;
    Service* service = nullptr;

    if (SubcontextChildReap(pid)) { // 处理Subcontext进程退出,非正在关机中会重启该进程
        name = "Subcontext";
    } else {
      // 判断该进程是否是某个服务,比如surfaceflinger
        service = ServiceList::GetInstance().FindService(pid, &Service::pid);

        if (service) { // 服务存在
            name = StringPrintf("Service '%s' (pid %d)", service->name().c_str(), pid);
            if (service->flags() & SVC_EXEC) { // 通过'exec' or 'exec_start' 启动的可执行程序进程
                auto exec_duration = boot_clock::now() - service->time_started();
                auto exec_duration_ms =
                    std::chrono::duration_cast<std::chrono::milliseconds>(exec_duration).count();
                wait_string = StringPrintf(" waiting took %f seconds", exec_duration_ms / 1000.0f);
            } else if (service->flags() & SVC_ONESHOT) { // 一次性的服务
                auto exec_duration = boot_clock::now() - service->time_started();
                auto exec_duration_ms =
                        std::chrono::duration_cast<std::chrono::milliseconds>(exec_duration)
                                .count();
                wait_string = StringPrintf(" oneshot service took %f seconds in background",
                                           exec_duration_ms / 1000.0f);
            }
        } else {
            name = StringPrintf("Untracked pid %d", pid); // 非服务进程,未追踪的进程退出
        }
    }

    if (siginfo.si_code == CLD_EXITED) { // 进程 exit
        LOG(INFO) << name << " exited with status " << siginfo.si_status << wait_string;
    } else { // 进程被 kill
        LOG(INFO) << name << " received signal " << siginfo.si_status << wait_string;
    }

    if (!service) return pid;

    service->Reap(siginfo); // 调用Reap,做清理工作,并重启非oneshot的服务

    if (service->flags() & SVC_TEMPORARY) { // 通过'exec' 启动的服务
        ServiceList::GetInstance().RemoveService(*service);
    }

    return pid;
}

Service::Reap

  • kill进程组所有进程
  • 清理所有socket资源相关文件
  • 回调reap_callbacks_,比如之前设置的启动失败回调
  • critical服务持续保持退出(4分钟大于4次),则重启到BootLoader
  • 标记服务SVC_RESTARTING,在HandleProcessActions中重启服务
  • 执行onrestart命令
  • 通知服务状态改变
/// @system/core/init/service.cpp
  void Service::Reap(const siginfo_t& siginfo) {
      if (!(flags_ & SVC_ONESHOT) || (flags_ & SVC_RESTART)) {// 不是一次性的或者需要重启的
          KillProcessGroup(SIGKILL, false); // 服务死亡,杀死其进程组所有进程, 第二个参数表示是否report_oneshot
      } else {
          // Legacy behavior from ~2007 until Android R: this else branch did not exist and we did not
          // kill the process group in this case.
          if (SelinuxGetVendorAndroidVersion() >= __ANDROID_API_R__) { // 杀死oneshot服务的进程组
              // The new behavior in Android R is to kill these process groups in all cases.  The
              // 'true' parameter instructions KillProcessGroup() to report a warning message where it
              // detects a difference in behavior has occurred.
              KillProcessGroup(SIGKILL, true);
          }
      }

      // Remove any socket resources we may have created.
      for (const auto& socket : sockets_) { // 清理该服务创建的socket 路径文件
          auto path = ANDROID_SOCKET_DIR "/" + socket.name;
          unlink(path.c_str());
      }

      for (const auto& f : reap_callbacks_) { // 执行通过 AddReapCallback 添加的reap操作的回调
          f(siginfo);
      }

      if ((siginfo.si_code != CLD_EXITED || siginfo.si_status != 0) && on_failure_reboot_target_) {
          LOG(ERROR) << "Service with 'reboot_on_failure' option failed, shutting down system.";
          trigger_shutdown(*on_failure_reboot_target_);// 带有reboot_on_failure选项的服务,非正常退出则会触发关机
      }

      if (flags_ & SVC_EXEC) UnSetExec();  // 重置 is_exec_service_running_ flag

      if (flags_ & SVC_TEMPORARY) return; // 临时oneshot服务,返回

      pid_ = 0;
      flags_ &= (~SVC_RUNNING);
      start_order_ = 0;

      // Oneshot processes go into the disabled state on exit,
      // except when manually restarted.
      // 标记为 SVC_RESTART 的,是需要重启服务的。在StopOrReset函数先kill进程,然后标记为SVC_RESTART,到回收后则进行重启
      if ((flags_ & SVC_ONESHOT) && !(flags_ & SVC_RESTART) && !(flags_ & SVC_RESET)) {
          flags_ |= SVC_DISABLED; // oneshot服务置disabled状态
      }

      // Disabled and reset processes do not get restarted automatically.
      if (flags_ & (SVC_DISABLED | SVC_RESET))  { // disabled 和 reset 状态服务不重启
          NotifyStateChange("stopped");
          return;
      }

  #if INIT_FULL_SOURCES
      static bool is_apex_updatable = android::sysprop::ApexProperties::updatable().value_or(false);
  #else
      static bool is_apex_updatable = false;
  #endif
      const bool is_process_updatable = !use_bootstrap_ns_ && is_apex_updatable;

      // If we crash > 4 times in 'fatal_crash_window_' minutes or before boot_completed,
      // reboot into bootloader or set crashing property
      boot_clock::time_point now = boot_clock::now();
      // critica或可更新(如apex) 并且 服务未标记要重启
      if (((flags_ & SVC_CRITICAL) || is_process_updatable) && !(flags_ & SVC_RESTART)) {
          bool boot_completed = GetBoolProperty("sys.boot_completed", false);
          if (now < time_crashed_ + fatal_crash_window_ || !boot_completed) { // 在窗口时间内 或 开机流程未完成
              if (++crash_count_ > 4) {
                  auto exit_reason = boot_completed ?
                      "in " + std::to_string(fatal_crash_window_.count()) + " minutes" :
                      "before boot completed";
                  if (flags_ & SVC_CRITICAL) { // critical 服务在窗口时间(4分钟内)或开机完成前 crash超过4次,则会重启到 bootloader
                      if (!GetBoolProperty("init.svc_debug.no_fatal." + name_, false)) {
                          // Aborts into 'atal_reboot_target_'.
                          SetFatalRebootTarget(fatal_reboot_target_);
                          LOG(FATAL) << "critical process '" << name_ << "' exited 4 times "
                                     << exit_reason;
                      }
                  } else { // 非 critical 服务只有一个打印,然后记录到属性
                      LOG(ERROR) << "process with updatable components '" << name_
                                 << "' exited 4 times " << exit_reason;
                      // Notifies update_verifier and apexd
                      SetProperty("sys.init.updatable_crashing_process_name", name_);
                      SetProperty("sys.init.updatable_crashing", "1");
                  }
              }
          } else { // 重新记录时间和次数
              time_crashed_ = now;
              crash_count_ = 1;
          }
      }

      flags_ &= (~SVC_RESTART);
      flags_ |= SVC_RESTARTING; // 注意此处标记,是服务重启的关键

      // Execute all onrestart commands for this service.
      onrestart_.ExecuteAllCommands(); // 执行所有 onrestart 命令, 在rc里面配置的

      NotifyStateChange("restarting");
      return;
}

Service::KillProcessGroup

void Service::KillProcessGroup(int signal, bool report_oneshot) {
    // If we've already seen a successful result from killProcessGroup*(), then we have removed
    // the cgroup already and calling these functions a second time will simply result in an error.
    // This is true regardless of which signal was sent.
    // These functions handle their own logging, so no additional logging is needed.
    if (!process_cgroup_empty_) {
        LOG(INFO) << "Sending signal " << signal << " to service '" << name_ << "' (pid " << pid_
                  << ") process group...";
        int max_processes = 0;
        int r;
        if (signal == SIGTERM) {
            r = killProcessGroupOnce(proc_attr_.uid, pid_, signal, &max_processes);
        } else {
            r = killProcessGroup(proc_attr_.uid, pid_, signal, &max_processes);
        }

        if (report_oneshot && max_processes > 0) {
            LOG(WARNING)
                    << "Killed " << max_processes
                    << " additional processes from a oneshot process group for service '" << name_
                    << "'. This is new behavior, previously child processes would not be killed in "
                       "this case.";
        }

        if (r == 0) process_cgroup_empty_ = true;
    }

    if (oom_score_adjust_ != DEFAULT_OOM_SCORE_ADJUST) {
        LmkdUnregister(name_, pid_); // 从lmkd移除进程信息
    }
}

上面两个killProcessGroup实现如下:

/// @system/core/libprocessgroup/processgroup.cpp
int killProcessGroup(uid_t uid, int initialPid, int signal, int* max_processes) {
  // 内部调用DoKillProcessGroupOnce去kill进程组
    return KillProcessGroup(uid, initialPid, signal, 40 /*retries*/, max_processes);
}

int killProcessGroupOnce(uid_t uid, int initialPid, int signal, int* max_processes) {
    return KillProcessGroup(uid, initialPid, signal, 0 /*retries*/, max_processes);
}

关于cgroup配置可参见 cgroups.json

/// @system/core/libprocessgroup/profiles/cgroups.json
{
  "Cgroups": [
    {
      "Controller": "blkio",
      "Path": "/dev/blkio",
      "Mode": "0755",
      "UID": "system",
      "GID": "system"
    },
    {
      "Controller": "cpu",
      "Path": "/dev/cpuctl",
      "Mode": "0755",
      "UID": "system",
      "GID": "system"
    },
    {
      "Controller": "cpuset",
      "Path": "/dev/cpuset",
      "Mode": "0755",
      "UID": "system",
      "GID": "system"
    },
    {
      "Controller": "memory",
      "Path": "/dev/memcg",
      "Mode": "0700",
      "UID": "root",
      "GID": "system",
      "Optional": true
    }
  ],
  "Cgroups2": {
    "Path": "/sys/fs/cgroup",
    "Mode": "0755",
    "UID": "system",
    "GID": "system",
    "Controllers": [
      {
        "Controller": "freezer",
        "Path": ".",
        "Mode": "0755",
        "UID": "system",
        "GID": "system"
      }
    ]
  }
}

KillProcessGroup

/// @system/core/libprocessgroup/processgroup.cpp
static int KillProcessGroup(uid_t uid, int initialPid, int signal, int retries,
                            int* max_processes) {
    std::string hierarchy_root_path;
    // 获取cgroup2对应的root path,也就是 /sys/fs/cgroup
    CgroupGetControllerPath(CGROUPV2_CONTROLLER_NAME, &hierarchy_root_path);
    const char* cgroup = hierarchy_root_path.c_str();

    std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
    if (max_processes != nullptr) {
        *max_processes = 0;
    }

    int retry = retries;
    int processes;
    while ((processes = DoKillProcessGroupOnce(cgroup, uid, initialPid, signal)) > 0) {// kill进程组
        if (max_processes != nullptr && processes > *max_processes) {
            *max_processes = processes;
        }
        LOG(VERBOSE) << "Killed " << processes << " processes for processgroup " << initialPid;
        if (retry > 0) {
            std::this_thread::sleep_for(5ms);
            --retry;
        } else {
            break;
        }
    }

    if (processes < 0) {
        PLOG(ERROR) << "Error encountered killing process cgroup uid " << uid << " pid " << initialPid;
        return -1;
    }

    std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
    auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count();

    // We only calculate the number of 'processes' when killing the processes.
    // In the retries == 0 case, we only kill the processes once and therefore
    // will not have waited then recalculated how many processes are remaining
    // after the first signals have been sent.
    // Logging anything regarding the number of 'processes' here does not make sense.

    if (processes == 0) { // 多次retry下若为0,则表示没有剩余待kill进程
        if (retries > 0) {
            LOG(INFO) << "Successfully killed process cgroup uid " << uid << " pid " << initialPid
                      << " in " << static_cast<int>(ms) << "ms";
        }

        int err = RemoveProcessGroup(cgroup, uid, initialPid, retries); // 移除进程组目录

        if (isMemoryCgroupSupported() && UsePerAppMemcg()) { // 移除memory cgroup相关目录
            std::string memory_path;
            CgroupGetControllerPath("memory", &memory_path);
            memory_path += "/apps";
            if (RemoveProcessGroup(memory_path.c_str(), uid, initialPid, retries)) return -1;
        }

        return err;
    } else {
        if (retries > 0) {
            LOG(ERROR) << "Failed to kill process cgroup uid " << uid << " pid " << initialPid
                       << " in " << static_cast<int>(ms) << "ms, " << processes
                       << " processes remain";
        }
        return -1;
    }
}

DoKillProcessGroupOnce

// Returns number of processes killed on success
// Returns 0 if there are no processes in the process cgroup left to kill
// Returns -1 on error
static int DoKillProcessGroupOnce(const char* cgroup, uid_t uid, int initialPid, int signal) {
    // 路径即 /sys/fs/cgroup/uid_%d/pid_%d/cgroup.procs
    auto path = ConvertUidPidToPath(cgroup, uid, initialPid) + PROCESSGROUP_CGROUP_PROCS_FILE;
    std::unique_ptr<FILE, decltype(&fclose)> fd(fopen(path.c_str(), "re"), fclose);
    if (!fd) {
        if (errno == ENOENT) {
            // This happens when process is already dead
            return 0;
        }
        PLOG(WARNING) << "Failed to open process cgroup uid " << uid << " pid " << initialPid;
        return -1;
    }

    // We separate all of the pids in the cgroup into those pids that are also the leaders of
    // process groups (stored in the pgids set) and those that are not (stored in the pids set).
    std::set<pid_t> pgids;
    pgids.emplace(initialPid);
    std::set<pid_t> pids;

    pid_t pid;
    int processes = 0;
    while (fscanf(fd.get(), "%d\n", &pid) == 1 && pid >= 0) { // 读取路径下的所有pid
        processes++;
        if (pid == 0) {
            // Should never happen...  but if it does, trying to kill this
            // will boomerang right back and kill us!  Let's not let that happen.
            LOG(WARNING) << "Yikes, we've been told to kill pid 0!  How about we don't do that?";
            continue;
        }
        pid_t pgid = getpgid(pid);
        if (pgid == -1) PLOG(ERROR) << "getpgid(" << pid << ") failed";
        if (pgid == pid) { // 保存进程组id,后面直接kill进程组
            pgids.emplace(pid);
        } else {
            pids.emplace(pid);
        }
    }

    // Erase all pids that will be killed when we kill the process groups.
    for (auto it = pids.begin(); it != pids.end();) {
        pid_t pgid = getpgid(*it);
        if (pgids.count(pgid) == 1) { // 如果进程在进程组里面,则从移除,因为kill进程组时会kill它,防止重复kill
            it = pids.erase(it);
        } else {
            ++it;
        }
    }

    // Kill all process groups.
    for (const auto pgid : pgids) { // kill 进程组的进程
        LOG(VERBOSE) << "Killing process group " << -pgid << " in uid " << uid
                     << " as part of process cgroup " << initialPid;

        if (kill(-pgid, signal) == -1 && errno != ESRCH) {
            PLOG(WARNING) << "kill(" << -pgid << ", " << signal << ") failed";
        }
    }

    // Kill remaining pids.
    for (const auto pid : pids) { // kill 剩余其他进程
        LOG(VERBOSE) << "Killing pid " << pid << " in uid " << uid << " as part of process cgroup "
                     << initialPid;

        if (kill(pid, signal) == -1 && errno != ESRCH) {
            PLOG(WARNING) << "kill(" << pid << ", " << signal << ") failed";
        }
    }

    return feof(fd.get()) ? processes : -1;
}

Service::NotifyStateChange

void Service::NotifyStateChange(const std::string& new_state) const {
    if ((flags_ & SVC_TEMPORARY) != 0) {
        // Services created by 'exec' are temporary and don't have properties tracking their state.
        return;
    }

    std::string prop_name = "init.svc." + name_;
    SetProperty(prop_name, new_state);  // 将服务状态设置到属性 如 [init.svc.zygote]: [running]

    if (new_state == "running") { // 运行态设置启动时间
        uint64_t start_ns = time_started_.time_since_epoch().count();
        std::string boottime_property = "ro.boottime." + name_; // [ro.boottime.zygote]: [7240451464]
        if (GetProperty(boottime_property, "").empty()) {  // 为空时才设置
            SetProperty(boottime_property, std::to_string(start_ns));
        }
    }

    // init.svc_debug_pid.* properties are only for tests, and should not be used
    // on device for security checks.
    std::string pid_property = "init.svc_debug_pid." + name_;
    if (new_state == "running") { // 运行时记录pid, [init.svc_debug_pid.zygote]: [364]
        SetProperty(pid_property, std::to_string(pid_));
    } else if (new_state == "stopped") { // 停止时移除pid记录
        SetProperty(pid_property, "");
    }
}

服务的onrestart命令执行流程

在服务退出后,会执行onrestart去重启某些相关服务,如下是zygote的rc文件,记录了其重启时将会重启 audioserver 等

service zygote /system/bin/app_process64 -Xzygote /system/bin --zygote --start-system-server
    class main
    priority -20
    user root
    group root readproc reserved_disk
    socket zygote stream 660 root system
    socket usap_pool_primary stream 660 root system
    onrestart exec_background - system system -- /system/bin/vdc volume abort_fuse
    onrestart write /sys/power/state on
    onrestart restart audioserver   # 注意这些 onrestart
    onrestart restart cameraserver
    onrestart restart media
    onrestart restart media.tuner
    onrestart restart netd
    onrestart restart wificond
    task_profiles ProcessCapacityHigh
    critical window=${zygote.critical_window.minute:-off} target=zygote-fatal

下面分析onrestart restart 服务这个流程

onrestart_.ExecuteAllCommands

onrestart_ 是一个Action,解析onrestart时添加了相关命令。ExecuteAllCommands将遍历并执行每一条命令。

/// @system/core/init/action.cpp
void Action::ExecuteAllCommands() const {
    for (const auto& c : commands_) {
        ExecuteCommand(c); // 循环执行每一条命令
    }
}

onrestart会执行相关命令,对于服务执行的动作是restart,它对应的函数是do_restart,在执行ExecuteCommand时会回调该函数,如

onrestart restart audioserver   # 表明将会重启audioserver服务

do_restart

该函数的实现是用来重启一个服务

/// @system/core/init/builtins.cpp
static Result<void> do_restart(const BuiltinArguments& args) {
    Service* svc = ServiceList::GetInstance().FindService(args[1]); // 首先寻找目标服务是否存在
    if (!svc) return Error() << "service " << args[1] << " not found";
    svc->Restart(); // 对应服务的 Restart 函数
    return {};
}

Service::Restart

void Service::Restart() {
    if (flags_ & SVC_RUNNING) { // 如果服务正在运行,则重启该服务
        /* Stop, wait, then start the service. */
        StopOrReset(SVC_RESTART);
    } else if (!(flags_ & SVC_RESTARTING)) { // 如果不是在重启中
        /* Just start the service since it's not running. */
        if (auto result = Start(); !result.ok()) { // 调用 Start
            LOG(ERROR) << "Could not restart '" << name_ << "': " << result.error();
        }
    } /* else: Service is restarting anyways. */
}

如果服务还在运行,则会调用StopOrReset去重启服务

Service::StopOrReset

此处传的how是 SVC_RESTART。此处传的how是 SVC_RESTART。在StopOrReset函数先将服务标记为SVC_RESTART,然后kill进程,当进程回收过程,执行Service::Reap函数进行服务重启

// The how field should be either SVC_DISABLED, SVC_RESET, or SVC_RESTART.
void Service::StopOrReset(int how) {
    // The service is still SVC_RUNNING until its process exits, but if it has
    // already exited it shoudn't attempt a restart yet.
    flags_ &= ~(SVC_RESTARTING | SVC_DISABLED_START); // 移除与restart相冲的flag

    if ((how != SVC_DISABLED) && (how != SVC_RESET) && (how != SVC_RESTART)) { // how 只能是三者之一
        // An illegal flag: default to SVC_DISABLED.
        how = SVC_DISABLED; // 非法参数,默认disabled
    }

    // If the service has not yet started, prevent it from auto-starting with its class.
    if (how == SVC_RESET) {
        flags_ |= (flags_ & SVC_RC_DISABLED) ? SVC_DISABLED : SVC_RESET;
    } else {
        flags_ |= how; // 非reset,添加此flag
    }
    // Make sure it's in right status when a restart immediately follow a
    // stop/reset or vice versa.
    if (how == SVC_RESTART) {
        flags_ &= (~(SVC_DISABLED | SVC_RESET));
    } else {
        flags_ &= (~SVC_RESTART);
    }

    if (pid_) { // 进程存在,则kill进程组
        KillProcessGroup(SIGKILL);
        NotifyStateChange("stopping"); // 更新进程状态
    } else {
        NotifyStateChange("stopped");
    }
}

如果服务还没启动,则调用Start执行启动流程。

Service::Start

  • 延时启动可更新服务若APEXes configs未完全加载
  • 创建进程指定的socket以及打开指定文件
  • fork创建子进程并通过execv执行服务可执行程序
  • 创建进程组并做初始化
  • 向lmkd注册此进程
  • 通知进程状态改变
Result<void> Service::Start() {
    auto reboot_on_failure = make_scope_guard([this] { // 在启动失败时回调
        if (on_failure_reboot_target_) {
            trigger_shutdown(*on_failure_reboot_target_);
        }
    });

    if (is_updatable() && !ServiceList::GetInstance().IsServicesUpdated()) {
        ServiceList::GetInstance().DelayService(*this);  // 延时启动
        return Error() << "Cannot start an updatable service '" << name_
                       << "' before configs from APEXes are all loaded. "
                       << "Queued for execution.";
    }

    bool disabled = (flags_ & (SVC_DISABLED | SVC_RESET));
    // Starting a service removes it from the disabled or reset state and
    // immediately takes it out of the restarting state if it was in there.
    flags_ &= (~(SVC_DISABLED|SVC_RESTARTING|SVC_RESET|SVC_RESTART|SVC_DISABLED_START));

    // Running processes require no additional work --- if they're in the
    // process of exiting, we've ensured that they will immediately restart
    // on exit, unless they are ONESHOT. For ONESHOT service, if it's in
    // stopping status, we just set SVC_RESTART flag so it will get restarted
    // in Reap().
    if (flags_ & SVC_RUNNING) {
        if ((flags_ & SVC_ONESHOT) && disabled) {
            flags_ |= SVC_RESTART;
        }
        // It is not an error to try to start a service that is already running.
        reboot_on_failure.Disable();
        return {};
    }

    bool needs_console = (flags_ & SVC_CONSOLE);
    if (needs_console) {
        if (proc_attr_.console.empty()) {
            proc_attr_.console = "/dev/" + GetProperty("ro.boot.console", "console");
        }

        // Make sure that open call succeeds to ensure a console driver is
        // properly registered for the device node
        int console_fd = open(proc_attr_.console.c_str(), O_RDWR | O_CLOEXEC);
        if (console_fd < 0) {
            flags_ |= SVC_DISABLED;
            return ErrnoError() << "Couldn't open console '" << proc_attr_.console << "'";
        }
        close(console_fd);
    }

    struct stat sb;
    if (stat(args_[0].c_str(), &sb) == -1) { // 判断可执行程序是否存在
        flags_ |= SVC_DISABLED;
        return ErrnoError() << "Cannot find '" << args_[0] << "'";
    }

    std::string scon;
    if (!seclabel_.empty()) { // 执行seclabel
        scon = seclabel_;
    } else {
        auto result = ComputeContextFromExecutable(args_[0]);
        if (!result.ok()) {
            return result.error();
        }
        scon = *result;
    }

    // APEXd is always started in the "current" namespace because it is the process to set up
    // the current namespace.
    const bool is_apexd = args_[0] == "/system/bin/apexd";

    if (!IsDefaultMountNamespaceReady() && !is_apexd) {
        // If this service is started before APEXes and corresponding linker configuration
        // get available, mark it as pre-apexd one. Note that this marking is
        // permanent. So for example, if the service is re-launched e.g., due
        // to crash), it is still recognized as pre-apexd... for consistency.
        use_bootstrap_ns_ = true;
    }

    // For pre-apexd services, override mount namespace as "bootstrap" one before starting.
    // Note: "ueventd" is supposed to be run in "default" mount namespace even if it's pre-apexd
    // to support loading firmwares from APEXes.
    std::optional<MountNamespace> override_mount_namespace;
    if (name_ == "ueventd") {
        override_mount_namespace = NS_DEFAULT;
    } else if (use_bootstrap_ns_) {
        override_mount_namespace = NS_BOOTSTRAP;
    }

    post_data_ = ServiceList::GetInstance().IsPostData();

    LOG(INFO) << "starting service '" << name_ << "'...";

    std::vector<Descriptor> descriptors;
    for (const auto& socket : sockets_) { // 创建进程指定的socket
        if (auto result = socket.Create(scon); result.ok()) {
            descriptors.emplace_back(std::move(*result));
        } else {
            LOG(INFO) << "Could not create socket '" << socket.name << "': " << result.error();
        }
    }

    for (const auto& file : files_) { // 打开通过file指定的文件,如 file /dev/kmsg w
        if (auto result = file.Create(); result.ok()) {
            descriptors.emplace_back(std::move(*result));
        } else {
            LOG(INFO) << "Could not open file '" << file.name << "': " << result.error();
        }
    }
    // fork 服务进程
    pid_t pid = -1;
    if (namespaces_.flags) {
        pid = clone(nullptr, nullptr, namespaces_.flags | SIGCHLD, nullptr);
    } else {
        pid = fork();
    }

    if (pid == 0) { // 新创建的子进程,做初始化并执行execv
        umask(077);

        if (auto result = EnterNamespaces(namespaces_, name_, override_mount_namespace);
            !result.ok()) {
            LOG(FATAL) << "Service '" << name_
                       << "' failed to set up namespaces: " << result.error();
        }

        for (const auto& [key, value] : environment_vars_) {
            setenv(key.c_str(), value.c_str(), 1);
        }

        for (const auto& descriptor : descriptors) {
            descriptor.Publish(); // 在执行execv 去掉 FD_CLOEXEC,防止被关闭
        }
        // 处理writepid到文件,如 writepid /dev/cpuset/foreground/tasks
        if (auto result = WritePidToFiles(&writepid_files_); !result.ok()) {
            LOG(ERROR) << "failed to write pid to files: " << result.error();
        }

        if (task_profiles_.size() > 0 && !SetTaskProfiles(getpid(), task_profiles_)) {
            LOG(ERROR) << "failed to set task profiles";
        }

        // As requested, set our gid, supplemental gids, uid, context, and
        // priority. Aborts on failure.
        SetProcessAttributesAndCaps();

        if (!ExpandArgsAndExecv(args_, sigstop_)) { // 展开参数并执行 execv
            PLOG(ERROR) << "cannot execv('" << args_[0]
                        << "'). See the 'Debugging init' section of init's README.md for tips";
        }

        _exit(127);
    }

    if (pid < 0) { // 创建失败
        pid_ = 0;
        return ErrnoError() << "Failed to fork";
    }

    if (oom_score_adjust_ != DEFAULT_OOM_SCORE_ADJUST) { // 写入oom_score_adj
        std::string oom_str = std::to_string(oom_score_adjust_);
        std::string oom_file = StringPrintf("/proc/%d/oom_score_adj", pid);
        if (!WriteStringToFile(oom_str, oom_file)) {
            PLOG(ERROR) << "couldn't write oom_score_adj";
        }
    }

    time_started_ = boot_clock::now();
    pid_ = pid;
    flags_ |= SVC_RUNNING;
    start_order_ = next_start_order_++;
    process_cgroup_empty_ = false;

    bool use_memcg = swappiness_ != -1 || soft_limit_in_bytes_ != -1 || limit_in_bytes_ != -1 ||
                      limit_percent_ != -1 || !limit_property_.empty();
    errno = -createProcessGroup(proc_attr_.uid, pid_, use_memcg); // 创建进程组
    if (errno != 0) {
        PLOG(ERROR) << "createProcessGroup(" << proc_attr_.uid << ", " << pid_
                    << ") failed for service '" << name_ << "'";
    } else if (use_memcg) {
        if (swappiness_ != -1) {
          // 设置 swappiness , rc中通过 memcg.swappiness 设置的
            if (!setProcessGroupSwappiness(proc_attr_.uid, pid_, swappiness_)) {
                PLOG(ERROR) << "setProcessGroupSwappiness failed";
            }
        }

        if (soft_limit_in_bytes_ != -1) { // rc中通过 memcg.soft_limit_in_bytes 设置
            if (!setProcessGroupSoftLimit(proc_attr_.uid, pid_, soft_limit_in_bytes_)) {
                PLOG(ERROR) << "setProcessGroupSoftLimit failed";
            }
        }

        size_t computed_limit_in_bytes = limit_in_bytes_; // memcg.limit_in_bytes
        if (limit_percent_ != -1) { // memcg.limit_percent
            long page_size = sysconf(_SC_PAGESIZE);
            long num_pages = sysconf(_SC_PHYS_PAGES);
            if (page_size > 0 && num_pages > 0) {
                size_t max_mem = SIZE_MAX;
                if (size_t(num_pages) < SIZE_MAX / size_t(page_size)) {
                    max_mem = size_t(num_pages) * size_t(page_size);
                }
                computed_limit_in_bytes =
                        std::min(computed_limit_in_bytes, max_mem / 100 * limit_percent_);
            }
        }

        if (!limit_property_.empty()) {
            // This ends up overwriting computed_limit_in_bytes but only if the
            // property is defined.
            computed_limit_in_bytes = android::base::GetUintProperty(
                    limit_property_, computed_limit_in_bytes, SIZE_MAX);
        }

        if (computed_limit_in_bytes != size_t(-1)) {
            if (!setProcessGroupLimit(proc_attr_.uid, pid_, computed_limit_in_bytes)) { // 设置 MemLimit
                PLOG(ERROR) << "setProcessGroupLimit failed";
            }
        }
    }

    if (oom_score_adjust_ != DEFAULT_OOM_SCORE_ADJUST) {
        LmkdRegister(name_, proc_attr_.uid, pid_, oom_score_adjust_); // 添加到lmkd监听
    }

    NotifyStateChange("running"); // 通知服务运行起来了。
    reboot_on_failure.Disable();
    return {};
}

到这里,子进程回收流程就已经处理完了。

服务自身重启

在上面通过waitpid对服务进程回收,并调用Reap函数对服务状态以及onrestart命令等做了处理,但是并没有完成重启服务的工作。这个工作实际上是在SecondStageMain中处理的。

SecondStageMain 循环处理事件

如下是 init 主循环,负责处理相关事件。

/// @system/core/init/main.cpp
int SecondStageMain(int argc, char** argv) {
...
// Restore prio before main loop
setpriority(PRIO_PROCESS, 0, 0);
while (true) {
    // By default, sleep until something happens. 计算epool超时
    auto epoll_timeout = std::optional<std::chrono::milliseconds>{};

    ...
    if (!IsShuttingDown()) { // 不是正在关机, 如有需要重启的服务,需要据此重新计算超时时间
        auto next_process_action_time = HandleProcessActions(); // 处理进程相关action

        // If there's a process that needs restarting, wake up in time for that.
        if (next_process_action_time) { // 计数下次唤醒时间
            epoll_timeout = std::chrono::ceil<std::chrono::milliseconds>(
                    *next_process_action_time - boot_clock::now());
            if (*epoll_timeout < 0ms) epoll_timeout = 0ms;
        }
    }
    ...
}
return 0;
}

HandleProcessActions

  • 处理服务超时
  • 处理服务重启
static std::optional<boot_clock::time_point> HandleProcessActions() {
    std::optional<boot_clock::time_point> next_process_action_time; // 计算下次action处理时间
    for (const auto& s : ServiceList::GetInstance()) {
        if ((s->flags() & SVC_RUNNING) && s->timeout_period()) { // 计算服务是否超时,通知 timeout_period <seconds> 指定
            auto timeout_time = s->time_started() + *s->timeout_period();
            if (boot_clock::now() > timeout_time) { // 超过设置的超时时间
                s->Timeout(); // 调用服务的Timeout函数,将会kill服务进程
            } else {
                if (!next_process_action_time || timeout_time < *next_process_action_time) {// 超时时间早于下次action处理时间
                    next_process_action_time = timeout_time; // 设置超时时间下次action处理时间
                }
            }
        }

        if (!(s->flags() & SVC_RESTARTING)) continue;
        // 指定 SVC_RESTARTING 则会重新启动服务,在Service::Reap中指定
        auto restart_time = s->time_started() + s->restart_period(); // 计数启动时间限制,上次启动时间 + restart_period
        if (boot_clock::now() > restart_time) { // 距上次启动时间大于 restart_period 5s,防止启动过频繁
            if (auto result = s->Start(); !result.ok()) { // 执行Start函数启动服务
                LOG(ERROR) << "Could not restart process '" << s->name() << "': " << result.error();
            }
        } else {
            if (!next_process_action_time || restart_time < *next_process_action_time) {// 重启服务时间早于下次action处理时间
                next_process_action_time = restart_time; // 设置重启时间为下次action处理时间
            }
        }
    }
    return next_process_action_time;
}

在上面的实现中,也是通过Service::Start来启动服务,如之前分析。

 类似资料: